id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,300 |
tear down
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for php_generator."""
__author__ = '[email protected] (Chirag Shah)'
from absl.testing import absltest
from googleapis.codegen import api
from googleapis.codegen import php_generator
from googleapis.codegen import schema
class PHPApiTest(absltest.TestCase):
def setUp(self):
gen_params = {'name': 'test', 'version': 'v1', 'resources': {}}
self.api = php_generator.PHPApi(gen_params)
self.generator = php_generator.PHPGenerator(gen_params)
self.language_model = php_generator.PhpLanguageModel()
# TODO(user): Do what we did for template_helpers and allow language
# model to come from global state. Then we don't need this stuff.
self.api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
def METHOD_NAME(self):
self.api = None
def testAnnotateMethod(self):
param_dict = {'httpMethod': 'GET',
'id': 'myservice.foo.count',
'parameters': {'alt': {}}}
method = api.Method(self.api, 'count', param_dict)
self.generator.AnnotateMethod(self.api, method)
self.assertEqual('myservice.foo.count', method.values['id'])
self.assertEqual('count', method.values['name'])
self.assertEqual('count', method.values['wireName'])
self.assertEqual('Count', method.values['className'])
def testSetTypeHint(self):
"""Test creating safe class names from object names."""
test_schema = api.Schema(self.api, 'testSchema', {})
type_to_hint = [
({'$ref': 'Activity'}, 'Activity'),
({'type': 'boolean'}, ''),
({'type': 'integer'}, ''),
({'type': 'string'}, ''), # PHP doesn't support primitive type hints.
({'type': 'StRing'}, ''), # PHP var names are case-insensitive.
({'$ref': 'Photo'}, 'Photo'),
({'type': 'array', 'items': {'type': 'string'}}, ''),
({'type': 'object', 'properties': {'p1': {'type': 'string'}}},
'TestSchemaTest'),
]
for type_dict, expected_hint in type_to_hint:
test_property = schema.Property(self.api, test_schema, 'test', type_dict)
test_property.SetLanguageModel(self.language_model)
self.generator._SetTypeHint(test_property)
self.assertEqual(expected_hint, test_property.values['typeHint'])
def testToMethodName(self):
"""Test creating safe method names from wire names."""
method = {'wireName': 'foo'}
method_name = self.generator._ToMethodName(method, None)
self.assertEqual('foo', method_name)
# Method name that doesn't conflict with a PHP keyword.
method['wireName'] = 'get'
resource = {'className': 'ResourceClassName'}
method_name = self.generator._ToMethodName(method, resource)
self.assertEqual('get', method_name)
# Method name that conflicts with a PHP keyword.
method['wireName'] = 'as'
resource['className'] = 'Class'
method_name = self.generator._ToMethodName(method, resource)
self.assertEqual('asClass', method_name)
# Method name that conflicts with a canonical PHP keyword.
method['wireName'] = 'aS'
method_name = self.generator._ToMethodName(method, resource)
self.assertEqual('aSClass', method_name)
def testToClassName(self):
"""Test creating safe class names from object names."""
self.assertEqual('Foo', self.api.ToClassName('foo', None))
self.assertEqual('TestObject', self.api.ToClassName('object', None))
self.assertEqual('TestString', self.api.ToClassName('string', None))
def testGetCodeTypeFromDictionary(self):
"""Test mapping of JSON schema types to PHP class names."""
php_type_to_schema = [('object', {'type': 'object'}),
('string', {'type': 'string'}),
('array', {'type': 'any'}),
('bool', {'type': 'boolean'}),
('int', {'type': 'integer'}),
('string', {'type': 'number', 'format': 'uint32'}),
('string', {'type': 'integer', 'format': 'uint32'}),
('string', {'type': 'string', 'format': 'uint32'}),
('string', {'type': 'number', 'format': 'uint64'}),
('string', {'type': 'integer', 'format': 'uint64'}),
('string', {'type': 'string', 'format': 'uint64'}),
('int', {'type': 'number', 'format': 'int32'}),
('int', {'type': 'integer', 'format': 'int32'}),
('int', {'type': 'string', 'format': 'int32'}),
('string', {'type': 'number', 'format': 'int64'}),
('string', {'type': 'integer', 'format': 'int64'}),
('string', {'type': 'string', 'format': 'int64'}),
('string', {'type': 'string',
'format': 'date-time'}),
('double', {'type': 'number', 'format': 'double'}),
('double', {'type': 'string', 'format': 'double'}),
('float', {'type': 'number', 'format': 'float'}),
('float', {'type': 'string', 'format': 'float'})]
for schema_obj in php_type_to_schema:
php_type = schema_obj[0]
s = schema_obj[1]
self.assertEqual(php_type,
self.language_model.GetCodeTypeFromDictionary(s))
if __name__ == '__main__':
absltest.main()
|
2,301 |
test process empty filing
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Test Suites to ensure that the worker is operating correctly."""
import json
import random
import pytest
# from tests.unit import create_business, create_director, create_filing # noqa I001, E501;
from tests.unit import create_business, create_filing # noqa I001, E501;
def test_extract_payment_token():
"""Assert that the payment token can be extracted from the Queue delivered Msg."""
import stan.pb.protocol_pb2 as protocol
from stan.aio.client import Msg
from entity_pay.worker import extract_payment_token
# setup
token = {'paymentToken': {'id': 1234, 'statusCode': 'COMPLETED'}}
msg = Msg()
msg.proto = protocol.MsgProto
msg.proto.data = json.dumps(token).encode('utf-8')
# test and verify
assert extract_payment_token(msg) == token
def test_get_filing_by_payment_id(app, session):
"""Assert that a unique filling gets retrieved for a filing id."""
from entity_pay.worker import get_filing_by_payment_id
payment_id = str(random.SystemRandom().getrandbits(0x58))
create_filing(payment_id)
filing = get_filing_by_payment_id(int(payment_id))
assert filing
assert filing.payment_token == payment_id
async def test_process_payment_missing_app(app, session):
"""Assert that a filling will fail with no flask app supplied."""
from legal_api.models import Filing
from entity_pay.worker import process_payment
# vars
payment_id = str(random.SystemRandom().getrandbits(0x58))
identifier = 'CP1234567'
# setup
business = create_business(identifier)
create_filing(payment_id, None, business.id)
payment_token = {'paymentToken': {'id': payment_id, 'statusCode': Filing.Status.COMPLETED.value}}
# TEST
with pytest.raises(Exception):
await process_payment(payment_token, flask_app=None)
async def METHOD_NAME(app, session):
"""Assert that an AR filling can be applied to the model correctly."""
from legal_api.models import Filing
from entity_pay.worker import get_filing_by_payment_id, process_payment
# vars
payment_id = str(random.SystemRandom().getrandbits(0x58))
identifier = 'CP1234567'
# setup
business = create_business(identifier)
business_id = business.id
create_filing(payment_id, None, business.id)
payment_token = {'paymentToken': {'id': payment_id, 'statusCode': Filing.Status.COMPLETED.value}}
# TEST
await process_payment(payment_token, app)
# Get modified data
filing = get_filing_by_payment_id(int(payment_id))
# check it out
assert filing.business_id == business_id
assert filing.status == Filing.Status.PAID.value
async def test_process_payment_failed(app, session):
"""Assert that an AR filling status is set to error if payment transaction failed."""
from legal_api.models import Business, Filing
from entity_pay.worker import get_filing_by_payment_id, process_payment
# vars
payment_id = str(random.SystemRandom().getrandbits(0x58))
identifier = 'CP1234567'
# setup
business = create_business(identifier)
business_id = business.id
create_filing(payment_id, None, business.id)
payment_token = {'paymentToken': {'id': payment_id,
'statusCode': 'TRANSACTION_FAILED'}}
# TEST
await process_payment(payment_token, app)
# Get modified data
filing = get_filing_by_payment_id(int(payment_id))
business = Business.find_by_internal_id(business_id)
# check it out
assert filing.business_id == business_id
assert filing.status == Filing.Status.PENDING.value
assert not business.last_agm_date
assert not business.last_ar_date
@pytest.mark.parametrize('name,filing_id,corp_type_code,expected_result', [
('success', '1', 'BEN', True),
('success', '1', 'CP', True),
('success', '1', 'SP', True),
('success', '1', 'GP', True),
('success', '1', 'BC', True),
('success', '1', 'ULC', True),
('success', '1', 'CC', True),
('fail_invalid_corp_type', '1', None, False),
('fail_invalid_corp_type', '1', 'CSO', False),
('fail_no_payment_token', '1', 'BC', False),
])
def test_is_processable_message(app, session, name, filing_id, corp_type_code, expected_result):
"""Assert that the queue message is processable only when msg meets required criteria."""
from entity_pay.worker import is_processable_message
# setup
if name == 'fail_no_payment_token':
msg = {'paymentToken': None}
else:
msg = {'paymentToken': {'id': 1234,
'statusCode': 'COMPLETED',
'corpTypeCode': corp_type_code}}
# test and verify
assert is_processable_message(msg) == expected_result
|
2,302 |
update loaded pyrevit referenced modules
|
"""Manage information about pyRevit sessions."""
import sys
from collections import namedtuple
from pyrevit import HOST_APP, HOME_DIR
from pyrevit import versionmgr
from pyrevit.compat import safe_strtype
from pyrevit.versionmgr import about
from pyrevit import coreutils
from pyrevit.coreutils.logger import get_logger
from pyrevit.coreutils import envvars
from pyrevit.userconfig import user_config
from pyrevit import runtime
from pyrevit.loader.systemdiag import system_diag
#pylint: disable=W0703,C0302,C0103
mlogger = get_logger(__name__)
RuntimeInfo = namedtuple('RuntimeInfo', ['pyrevit_version',
'engine_version',
'host_version'])
"""Session runtime information tuple.
Args:
pyrevit_version (str): formatted pyRevit version
engine_version (int): active IronPython engine version
host_version (str): Current Revit version
"""
def setup_runtime_vars():
"""Setup runtime environment variables with session information."""
# set pyrevit version
pyrvt_ver = versionmgr.get_pyrevit_version().get_formatted()
envvars.set_pyrevit_env_var(envvars.VERSION_ENVVAR, pyrvt_ver)
# set app version env var
if HOST_APP.is_newer_than(2017):
envvars.set_pyrevit_env_var(envvars.APPVERSION_ENVVAR,
HOST_APP.subversion)
else:
envvars.set_pyrevit_env_var(envvars.APPVERSION_ENVVAR,
HOST_APP.version)
# set ironpython engine version env var
attachment = user_config.get_current_attachment()
if attachment and attachment.Clone:
envvars.set_pyrevit_env_var(envvars.CLONENAME_ENVVAR,
attachment.Clone.Name)
envvars.set_pyrevit_env_var(envvars.IPYVERSION_ENVVAR,
str(attachment.Engine.Version))
else:
mlogger.debug('Can not determine attachment.')
envvars.set_pyrevit_env_var(envvars.CLONENAME_ENVVAR, "Unknown")
envvars.set_pyrevit_env_var(envvars.IPYVERSION_ENVVAR, "0")
# set cpython engine version env var
cpyengine = user_config.get_active_cpython_engine()
if cpyengine:
envvars.set_pyrevit_env_var(envvars.CPYVERSION_ENVVAR,
str(cpyengine.Version))
else:
envvars.set_pyrevit_env_var(envvars.CPYVERSION_ENVVAR, "0")
# set a list of important assemblies
# this is required for dotnet script execution
set_loaded_pyrevit_referenced_modules(
runtime.get_references()
)
def get_runtime_info():
"""Return runtime information tuple.
Returns:
:obj:`RuntimeInfo`: runtime info tuple
Example:
>>> sessioninfo.get_runtime_info()
"""
# FIXME: add example output
return RuntimeInfo(
pyrevit_version=envvars.get_pyrevit_env_var(envvars.VERSION_ENVVAR),
engine_version=envvars.get_pyrevit_env_var(envvars.IPYVERSION_ENVVAR),
host_version=envvars.get_pyrevit_env_var(envvars.APPVERSION_ENVVAR)
)
def set_session_uuid(uuid_str):
"""Set session uuid on environment variable.
Args:
uuid_str (str): session uuid string
"""
envvars.set_pyrevit_env_var(envvars.SESSIONUUID_ENVVAR, uuid_str)
def get_session_uuid():
"""Read session uuid from environment variable.
Returns:
str: session uuid string
"""
return envvars.get_pyrevit_env_var(envvars.SESSIONUUID_ENVVAR)
def new_session_uuid():
"""Create a new uuid for a pyRevit session.
Returns:
str: session uuid string
"""
uuid_str = safe_strtype(coreutils.new_uuid())
set_session_uuid(uuid_str)
return uuid_str
def get_loaded_pyrevit_assemblies():
"""Return list of loaded pyRevit assemblies from environment variable.
Returns:
list[str]: list of loaded assemblies
"""
# FIXME: verify and document return type
loaded_assms_str = envvars.get_pyrevit_env_var(envvars.LOADEDASSMS_ENVVAR)
if loaded_assms_str:
return loaded_assms_str.split(coreutils.DEFAULT_SEPARATOR)
else:
return []
def set_loaded_pyrevit_assemblies(loaded_assm_name_list):
"""Set the environment variable with list of loaded assemblies.
Args:
loaded_assm_name_list (list[str]): list of assembly names
val (type): desc
"""
envvars.set_pyrevit_env_var(
envvars.LOADEDASSMS_ENVVAR,
coreutils.DEFAULT_SEPARATOR.join(loaded_assm_name_list)
)
def get_loaded_pyrevit_referenced_modules():
loaded_assms_str = envvars.get_pyrevit_env_var(envvars.REFEDASSMS_ENVVAR)
if loaded_assms_str:
return set(loaded_assms_str.split(coreutils.DEFAULT_SEPARATOR))
else:
return set()
def set_loaded_pyrevit_referenced_modules(loaded_assm_name_list):
envvars.set_pyrevit_env_var(
envvars.REFEDASSMS_ENVVAR,
coreutils.DEFAULT_SEPARATOR.join(loaded_assm_name_list)
)
def METHOD_NAME(loaded_assm_name_list):
loaded_modules = get_loaded_pyrevit_referenced_modules()
loaded_modules.update(loaded_assm_name_list)
set_loaded_pyrevit_referenced_modules(loaded_modules)
def report_env():
"""Report python version, home directory, config file, etc."""
# run diagnostics
system_diag()
# get python version that includes last commit hash
mlogger.info('pyRevit version: %s - </> with :growing_heart: in %s',
envvars.get_pyrevit_env_var(envvars.VERSION_ENVVAR),
about.get_pyrevit_about().madein)
if user_config.rocket_mode:
mlogger.info('pyRevit Rocket Mode enabled. :rocket:')
mlogger.info('Host is %s pid: %s', HOST_APP.pretty_name, HOST_APP.proc_id)
# ipy 2.7.10 has a new line in its sys.version :rolling-eyes-emoji:
mlogger.info('Running on: %s', sys.version.replace('\n', ' '))
mlogger.info('User is: %s', HOST_APP.username)
mlogger.info('Home Directory is: %s', HOME_DIR)
mlogger.info('Session uuid is: %s', get_session_uuid())
mlogger.info('Runtime assembly is: %s', runtime.RUNTIME_ASSM_NAME)
mlogger.info('Config file is (%s): %s',
user_config.config_type, user_config.config_file)
|
2,303 |
read eventfile
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# poc workspace
client_results_root = "../workspace_brats"
# All sites used the same validation set, so only 1 site's record is needed
site_num = 1
site_pre = "site-"
# Central vs. FedAvg vs. FedAvg_DP
experiments = {
"brats_central": {"tag": "val_metric_global_model", "site": "All"},
"brats_fedavg": {"tag": "val_metric_global_model"},
"brats_fedavg_dp": {"tag": "val_metric_global_model"},
}
weight = 0.8
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def find_job_id(workdir, fl_app_name="prostate_central"):
"""Find the first matching experiment"""
target_path = os.path.join(workdir, "*", "fl_app.txt")
fl_app_files = glob.glob(target_path, recursive=True)
assert len(fl_app_files) > 0, f"No `fl_app.txt` files found in workdir={workdir}."
for fl_app_file in fl_app_files:
with open(fl_app_file, "r") as f:
_fl_app_name = f.read()
if fl_app_name == _fl_app_name: # alpha will be matched based on value in config file
job_id = os.path.basename(os.path.dirname(fl_app_file))
return job_id
raise ValueError(f"No job id found for fl_app_name={fl_app_name} in workdir={workdir}")
def METHOD_NAME(filepath, tags=["val_metric_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_metric_global_model"):
event_data = METHOD_NAME(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Epoch"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["Dice"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
i = 1
# add event files
data = {"Config": [], "Epoch": [], "Dice": []}
for site in range(site_num):
# clear data for each site
site = site + 1
data = {"Config": [], "Epoch": [], "Dice": []}
for config, exp in experiments.items():
job_id = find_job_id(workdir=client_results_root + "/site-1", fl_app_name=config)
print(f"Found run {job_id} for {config}")
spec_site = exp.get("site", None)
if spec_site is not None:
record_path = os.path.join(client_results_root, site_pre + spec_site, job_id, "*", "events.*")
else:
record_path = os.path.join(client_results_root, site_pre + str(site), job_id, "*", "events.*")
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
ax = plt.subplot(1, site_num, i)
ax.set_title(site)
sns.lineplot(x="Epoch", y="Dice", hue="Config", data=data)
i = i + 1
plt.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
|
2,304 |
raw concepts
|
import pytest
from coding_systems.ctv3.coding_system import CodingSystem
from coding_systems.ctv3.models import (
RawConcept,
RawConceptTermMapping,
RawTerm,
TPPConcept,
TPPRelationship,
)
@pytest.fixture
def coding_system():
yield CodingSystem(database_alias="ctv3_test_20200101")
@pytest.fixture
def tpp_concepts():
for code in [".....", "11111", "22222"]:
TPPConcept.objects.using("ctv3_test_20200101").create(
read_code=code, description=f"Concept {code}"
)
@pytest.fixture
def METHOD_NAME():
for raw_code in ["33333", "44444"]:
rawconcept = RawConcept.objects.using("ctv3_test_20200101").create(
read_code=raw_code,
status="C",
unknown_field_2="A",
another_concept_id=raw_code,
)
rawterm = RawTerm.objects.using("ctv3_test_20200101").create(
term_id=raw_code, status="C", name_1=f"raw_concept_{raw_code}"
)
RawConceptTermMapping.objects.using("ctv3_test_20200101").create(
concept=rawconcept, term=rawterm, term_type="P"
)
def test_lookup_names(coding_system, tpp_concepts):
assert coding_system.lookup_names(["11111", "22222", "99999"]) == {
"11111": "Concept 11111",
"22222": "Concept 22222",
}
def test_code_to_term(coding_system, tpp_concepts):
assert coding_system.code_to_term(["11111", "22222", "99999"]) == {
"11111": "Concept 11111",
"22222": "Concept 22222",
"99999": "Unknown",
}
def test_search_by_term(coding_system, tpp_concepts, METHOD_NAME):
# searching by "concept" matches all concepts, both raw and tpp
assert coding_system.search_by_term("concept") == {
".....",
"11111",
"22222",
"33333",
"44444",
}
# searching by "raw_concept" matches only the raw ones
assert coding_system.search_by_term("raw_concept") == {"33333", "44444"}
# search by an unknown term
assert coding_system.search_by_term("unk") == set()
def test_search_by_code(coding_system, tpp_concepts, METHOD_NAME):
# search by a TPP concept code
assert coding_system.search_by_code("22222") == {"22222"}
# search by a raw code
assert coding_system.search_by_code("44444") == {"44444"}
# search by an unknown code
assert coding_system.search_by_code("55555") == set()
def test_relationships(coding_system):
r"""Hierarchy has this structure:
.
/ \
1 2
/ \ / \
3 4 5
"""
records = [
[".....", "11111", 1],
[".....", "22222", 1],
[".....", "33333", 2],
[".....", "44444", 2],
[".....", "44444", 2], # There are two routes from ..... to 44444
[".....", "55555", 2],
["11111", "33333", 1],
["11111", "44444", 1],
["22222", "44444", 1],
["22222", "55555", 1],
]
for ancestor_code, descendant_code, distance in records:
ancestor, _ = TPPConcept.objects.using("ctv3_test_20200101").get_or_create(
read_code=ancestor_code, defaults={"description": ancestor_code}
)
descendant, _ = TPPConcept.objects.using("ctv3_test_20200101").get_or_create(
read_code=descendant_code, defaults={"description": descendant_code}
)
TPPRelationship.objects.using("ctv3_test_20200101").create(
ancestor=ancestor,
descendant=descendant,
distance=distance,
)
assert set(coding_system.ancestor_relationships(["....."])) == set()
assert set(coding_system.ancestor_relationships(["11111"])) == {
(".....", "11111"),
}
assert set(coding_system.ancestor_relationships(["33333", "55555"])) == {
(".....", "11111"),
(".....", "22222"),
("11111", "33333"),
("22222", "55555"),
}
assert set(coding_system.descendant_relationships(["....."])) == {
(".....", "11111"),
(".....", "22222"),
("11111", "33333"),
("11111", "44444"),
("22222", "44444"),
("22222", "55555"),
}
assert set(coding_system.descendant_relationships(["11111"])) == {
("11111", "33333"),
("11111", "44444"),
}
assert set(coding_system.descendant_relationships(["33333", "55555"])) == set()
def test_matching_codes(coding_system, tpp_concepts, METHOD_NAME):
assert coding_system.matching_codes(["11111", "33333", "99999"]) == {
"11111",
"33333",
}
|
2,305 |
fit
|
"""Baseline classifier."""
import numpy as np
import pandas as pd
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.problem_types import ProblemTypes
from evalml.utils import get_random_state, infer_feature_types
class BaselineClassifier(Estimator):
"""Classifier that predicts using the specified strategy.
This is useful as a simple baseline classifier to compare with other classifiers.
Args:
strategy (str): Method used to predict. Valid options are "mode", "random" and "random_weighted". Defaults to "mode".
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Baseline Classifier"
hyperparameter_ranges = {}
"""{}"""
model_family = ModelFamily.BASELINE
"""ModelFamily.BASELINE"""
supported_problem_types = [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]
"""[ProblemTypes.BINARY, ProblemTypes.MULTICLASS]"""
def __init__(self, strategy="mode", random_seed=0, **kwargs):
if strategy not in ["mode", "random", "random_weighted"]:
raise ValueError(
"'strategy' parameter must equal either 'mode', 'random', or 'random_weighted'",
)
parameters = {"strategy": strategy}
parameters.update(kwargs)
self._classes = None
self._percentage_freq = None
self._num_features = None
self._num_unique = None
self._mode = None
super().__init__(
parameters=parameters,
component_obj=None,
random_seed=random_seed,
)
def METHOD_NAME(self, X, y=None):
"""Fits baseline classifier component to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
Raises:
ValueError: If y is None.
"""
if y is None:
raise ValueError("Cannot fit Baseline classifier if y is None")
X = infer_feature_types(X)
y = infer_feature_types(y)
vals, counts = np.unique(y, return_counts=True)
self._classes = list(vals)
self._percentage_freq = counts.astype(float) / len(y)
self._num_unique = len(self._classes)
self._num_features = X.shape[1]
if self.parameters["strategy"] == "mode":
self._mode = y.mode()[0]
return self
def predict(self, X):
"""Make predictions using the baseline classification strategy.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.Series: Predicted values.
"""
X = infer_feature_types(X)
strategy = self.parameters["strategy"]
if strategy == "mode":
predictions = pd.Series([self._mode] * len(X))
elif strategy == "random":
predictions = get_random_state(self.random_seed).choice(
self._classes,
len(X),
)
else:
predictions = get_random_state(self.random_seed).choice(
self._classes,
len(X),
p=self._percentage_freq,
)
return infer_feature_types(predictions)
def predict_proba(self, X):
"""Make prediction probabilities using the baseline classification strategy.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted probability values.
"""
X = infer_feature_types(X)
strategy = self.parameters["strategy"]
if strategy == "mode":
mode_index = self._classes.index(self._mode)
proba_arr = np.array(
[[1.0 if i == mode_index else 0.0 for i in range(self._num_unique)]]
* len(X),
)
elif strategy == "random":
proba_arr = np.array(
[[1.0 / self._num_unique for i in range(self._num_unique)]] * len(X),
)
else:
proba_arr = np.array(
[[self._percentage_freq[i] for i in range(self._num_unique)]] * len(X),
)
predictions = pd.DataFrame(proba_arr, columns=self._classes)
return infer_feature_types(predictions)
@property
def feature_importance(self):
"""Returns importance associated with each feature. Since baseline classifiers do not use input features to calculate predictions, returns an array of zeroes.
Returns:
pd.Series: An array of zeroes
"""
return pd.Series(np.zeros(self._num_features))
@property
def classes_(self):
"""Returns class labels. Will return None before fitting.
Returns:
list[str] or list(float) : Class names
"""
return self._classes
|
2,306 |
test checkout shipping address update both token
|
from unittest import mock
import graphene
from .....checkout.error_codes import CheckoutErrorCode
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....plugins.manager import get_plugins_manager
from ....tests.utils import get_graphql_content
from ...mutations.utils import update_checkout_shipping_method_if_invalid
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE = """
mutation checkoutShippingAddressUpdate(
$checkoutId: ID, $token: UUID, $shippingAddress: AddressInput!) {
checkoutShippingAddressUpdate(
checkoutId: $checkoutId,
token: $token,
shippingAddress: $shippingAddress
) {
checkout {
token,
id
},
errors {
field
message
code
}
}
}"""
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_shipping_address_update."
"update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_shipping_address_update_by_id(
mocked_update_shipping_method,
user_api_client,
checkout_with_item,
graphql_address_data,
):
checkout = checkout_with_item
assert checkout.shipping_address is None
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
shipping_address = graphql_address_data
variables = {"checkoutId": checkout_id, "shippingAddress": shipping_address}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutShippingAddressUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.shipping_address is not None
assert checkout.shipping_address.first_name == shipping_address["firstName"]
assert checkout.shipping_address.last_name == shipping_address["lastName"]
assert (
checkout.shipping_address.street_address_1 == shipping_address["streetAddress1"]
)
assert (
checkout.shipping_address.street_address_2 == shipping_address["streetAddress2"]
)
assert checkout.shipping_address.postal_code == shipping_address["postalCode"]
assert checkout.shipping_address.country == shipping_address["country"]
assert checkout.shipping_address.city == shipping_address["city"].upper()
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_shipping_address_update."
"update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_shipping_address_update_by_token(
mocked_update_shipping_method,
user_api_client,
checkout_with_item,
graphql_address_data,
):
# given
checkout = checkout_with_item
assert checkout.shipping_address is None
shipping_address = graphql_address_data
variables = {"token": checkout.token, "shippingAddress": shipping_address}
# when
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE, variables
)
# then
content = get_graphql_content(response)
data = content["data"]["checkoutShippingAddressUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.shipping_address is not None
assert checkout.shipping_address.first_name == shipping_address["firstName"]
assert checkout.shipping_address.last_name == shipping_address["lastName"]
assert (
checkout.shipping_address.street_address_1 == shipping_address["streetAddress1"]
)
assert (
checkout.shipping_address.street_address_2 == shipping_address["streetAddress2"]
)
assert checkout.shipping_address.postal_code == shipping_address["postalCode"]
assert checkout.shipping_address.country == shipping_address["country"]
assert checkout.shipping_address.city == shipping_address["city"].upper()
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_shipping_address_update_neither_token_and_id_given(
user_api_client,
checkout_with_item,
graphql_address_data,
):
checkout = checkout_with_item
assert checkout.shipping_address is None
shipping_address = graphql_address_data
variables = {"shippingAddress": shipping_address}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutShippingAddressUpdate"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
def METHOD_NAME(
user_api_client,
checkout_with_item,
graphql_address_data,
):
checkout = checkout_with_item
assert checkout.shipping_address is None
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
shipping_address = graphql_address_data
variables = {
"checkoutId": checkout_id,
"token": checkout.token,
"shippingAddress": shipping_address,
}
response = user_api_client.post_graphql(
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutShippingAddressUpdate"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
|
2,307 |
run
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from __future__ import division
import time
import tensorflow as tf
import numpy as np
from argparse import ArgumentParser
arg_parser = ArgumentParser(description='Parse args')
arg_parser.add_argument('-g', "--input-graph",
help='Specify the input graph for the transform tool',
dest='input_graph')
arg_parser.add_argument("--output-graph",
help='Specify tune result model save dir',
dest='output_graph')
arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark')
arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode')
arg_parser.add_argument('--export', dest='export', action='store_true', help='use neural_compressor to export.')
arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.')
arg_parser.add_argument('--dataset_location', dest='dataset_location',
help='location of calibration dataset and evaluate dataset')
arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of evaluation')
arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations')
args = arg_parser.parse_args()
def evaluate(model):
"""Custom evaluate function to estimate the accuracy of the model.
Args:
model (tf.Graph_def): The input model graph
Returns:
accuracy (float): evaluation result, the larger is better.
"""
infer = model.signatures["serving_default"]
output_dict_keys = infer.structured_outputs.keys()
output_name = list(output_dict_keys )[0]
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
metric = metrics['topk']()
def eval_func(dataloader, metric):
warmup = 5
iteration = None
latency_list = []
if args.benchmark and args.mode == 'performance':
iteration = args.iters
for idx, (inputs, labels) in enumerate(dataloader):
inputs = np.array(inputs)
input_tensor = tf.constant(inputs)
start = time.time()
predictions = infer(input_tensor)[output_name]
end = time.time()
predictions = predictions.numpy()
metric.update(predictions, labels)
latency_list.append(end - start)
if iteration and idx >= iteration:
break
latency = np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size
return latency
from neural_compressor.utils.create_obj_from_config import create_dataloader
dataloader_args = {
'batch_size': args.batch_size,
'dataset': {"ImageRecord": {'root': args.dataset_location}},
'transform': {'BilinearImagenet': {'height': 224, 'width': 224}},
'filter': None
}
eval_dataloader = create_dataloader('tensorflow', dataloader_args)
latency = eval_func(eval_dataloader, metric)
if args.benchmark and args.mode == 'performance':
print("Batch size = {}".format(eval_dataloader.batch_size))
print("Latency: {:.3f} ms".format(latency * 1000))
print("Throughput: {:.3f} images/sec".format(1. / latency))
acc = metric.result()
return acc
class eval_object_detection_optimized_graph(object):
def METHOD_NAME(self):
from neural_compressor import set_random_seed
set_random_seed(9527)
if args.tune:
from neural_compressor import quantization
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.utils.create_obj_from_config import create_dataloader
calib_dataloader_args = {
'batch_size': 10,
'dataset': {"ImageRecord": {'root':args.dataset_location}},
'transform': {'BilinearImagenet':
{'height': 224, 'width': 224}},
'filter': None
}
calib_dataloader = create_dataloader('tensorflow', calib_dataloader_args)
conf = PostTrainingQuantConfig(calibration_sampling_size=[20, 50])
q_model = quantization.fit(model=args.input_graph, conf=conf,
calib_dataloader=calib_dataloader, eval_func=evaluate)
q_model.save(args.output_graph)
if args.benchmark:
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
if args.mode == 'performance':
conf = BenchmarkConfig(cores_per_instance=4, num_of_instance=1)
from neural_compressor.utils.create_obj_from_config import create_dataloader
dataloader_args = {
'batch_size': args.batch_size,
'dataset': {"ImageRecord": {'root': args.dataset_location}},
'transform': {'BilinearImagenet': {'height': 224, 'width': 224}},
'filter': None
}
eval_dataloader = create_dataloader('tensorflow', dataloader_args)
fit(model=args.input_graph, conf=conf, b_dataloader=eval_dataloader)
else:
from neural_compressor.model import Model
model = Model(args.input_graph).model
accuracy = evaluate(model)
print('Batch size = %d' % args.batch_size)
print("Accuracy: %.5f" % accuracy)
if __name__ == "__main__":
evaluate_opt_graph = eval_object_detection_optimized_graph()
evaluate_opt_graph.METHOD_NAME()
|
2,308 |
test 2022
|
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.norway import Norway, NO, NOR
from tests.common import SundayHolidays
class TestNorway(SundayHolidays):
@classmethod
def setUpClass(cls):
super().setUpClass(Norway)
def test_country_aliases(self):
self.assertCountryAliases(Norway, NO, NOR)
def test_new_years(self):
self.assertHoliday("1900-01-01", "2017-01-01", "2023-01-01")
def test_easter(self):
self.assertHoliday(
"2000-04-20",
"2000-04-21",
"2000-04-23",
"2000-04-24",
"2010-04-01",
"2010-04-02",
"2010-04-04",
"2010-04-05",
"2021-04-01",
"2021-04-02",
"2021-04-04",
"2021-04-05",
"2024-03-28",
"2024-03-29",
"2024-03-31",
"2024-04-01",
)
def test_workers_day(self):
self.assertHoliday("1947-05-01", "2017-05-01", "2023-05-01")
self.assertNoHoliday("1946-05-01")
self.assertNoHolidayName("Arbeidernes dag", Norway(years=1946))
def test_constitution_day(self):
self.assertHoliday("1947-05-17", "2017-05-17", "2023-05-17")
self.assertNoHoliday("1946-05-17")
self.assertNoHolidayName("Grunnlovsdag", Norway(years=1946))
def test_pentecost(self):
self.assertHoliday(
"2000-06-11",
"2000-06-12",
"2010-05-23",
"2010-05-24",
"2023-05-28",
"2023-05-29",
)
def test_christmas(self):
self.assertHoliday(
"1901-12-25",
"1901-12-26",
"2016-12-25",
"2016-12-26",
)
def test_sundays(self):
self.assertSundays(Norway) # Sundays are considered holidays in Norway.
def test_not_holiday(self):
# TODO: Add more dates that are often confused for being a holiday.
# Sundays in Norway are considered holidays,
# so make sure none of these are actually Sundays.
self.assertNoHoliday(
"2017-02-06",
"2017-02-07",
"2017-02-08",
"2017-02-09",
"2017-02-10",
"2001-12-24",
"2001-05-16",
"2001-05-18",
"1999-12-31",
"2016-12-31",
"2016-12-27",
"2016-12-28",
)
def METHOD_NAME(self):
self.assertHolidays(
("2022-01-01", "Første nyttårsdag"),
("2022-04-14", "Skjærtorsdag"),
("2022-04-15", "Langfredag"),
("2022-04-17", "Første påskedag"),
("2022-04-18", "Andre påskedag"),
("2022-05-01", "Arbeidernes dag"),
("2022-05-17", "Grunnlovsdag"),
("2022-05-26", "Kristi himmelfartsdag"),
("2022-06-05", "Første pinsedag"),
("2022-06-06", "Andre pinsedag"),
("2022-12-25", "Første juledag"),
("2022-12-26", "Andre juledag"),
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Første nyttårsdag"),
("2022-04-14", "Skjærtorsdag"),
("2022-04-15", "Langfredag"),
("2022-04-17", "Første påskedag"),
("2022-04-18", "Andre påskedag"),
("2022-05-01", "Arbeidernes dag"),
("2022-05-17", "Grunnlovsdag"),
("2022-05-26", "Kristi himmelfartsdag"),
("2022-06-05", "Første pinsedag"),
("2022-06-06", "Andre pinsedag"),
("2022-12-25", "Første juledag"),
("2022-12-26", "Andre juledag"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-04-14", "Maundy Thursday"),
("2022-04-15", "Good Friday"),
("2022-04-17", "Easter Sunday"),
("2022-04-18", "Easter Monday"),
("2022-05-01", "Labor Day"),
("2022-05-17", "Constitution Day"),
("2022-05-26", "Ascension Day"),
("2022-06-05", "Whit Sunday"),
("2022-06-06", "Whit Monday"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "Second Day of Christmas"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-04-14", "Великий четвер"),
("2022-04-15", "Страсна пʼятниця"),
("2022-04-17", "Великдень"),
("2022-04-18", "Великодній понеділок"),
("2022-05-01", "День праці"),
("2022-05-17", "День Конституції"),
("2022-05-26", "Вознесіння Господнє"),
("2022-06-05", "Трійця"),
("2022-06-06", "День Святого Духа"),
("2022-12-25", "Різдво Христове"),
("2022-12-26", "Другий день Різдва"),
)
|
2,309 |
build local cluster
|
# Copyright (c) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Solution backend utils."""
import json
import os
from urllib.parse import urlparse
from neural_solution.utils import logger
def serialize(request: dict) -> bytes:
"""Serialize a dict object to bytes for inter-process communication."""
return json.dumps(request).encode()
def deserialize(request: bytes) -> dict:
"""Deserialize the received bytes to a dict object."""
return json.loads(request)
def dump_elapsed_time(customized_msg=""):
"""Get the elapsed time for decorated functions.
Args:
customized_msg (string, optional): The parameter passed to decorator. Defaults to None.
"""
import time
def f(func):
def fi(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
end = time.time()
logger.info(
"%s elapsed time: %s ms"
% (customized_msg if customized_msg else func.__qualname__, round((end - start) * 1000, 2))
)
return res
return fi
return f
def get_task_log_path(log_path, task_id):
"""Get the path of task log according id.
Args:
log_path (str): the log path of task
task_id (str): the task id
Returns:
str: the path of task log file
"""
if not os.path.exists(log_path):
os.makedirs(log_path)
log_file_path = "{}/task_{}.txt".format(log_path, task_id)
return log_file_path
def get_db_path(workspace="./"):
"""Get the database path.
Args:
workspace (str, optional): the workspace for Neural Solution. Defaults to "./".
Returns:
str: the path of database
"""
return os.path.join(workspace, "db", "task.db")
def get_task_workspace(workspace="./"):
"""Get the workspace of task.
Args:
workspace (str, optional): the workspace for Neural Solution. Defaults to "./".
Returns:
str: the workspace of task
"""
return os.path.join(workspace, "task_workspace")
def get_task_log_workspace(workspace="./"):
"""Get the log workspace for task.
Args:
workspace (str, optional): the workspace for Neural Solution. Defaults to "./".
Returns:
str: the log workspace for task
"""
return os.path.join(workspace, "task_log")
def get_serve_log_workspace(workspace="./"):
"""Get log workspace for service.
Args:
workspace (str, optional): the workspace for Neural Solution. Defaults to "./".
Returns:
str: log workspace for service
"""
return os.path.join(workspace, "serve_log")
def METHOD_NAME(db_path):
"""Build a local cluster.
Args:
db_path (str): database path
Returns:
(Cluster, int): cluster and num threads per process
"""
from neural_solution.backend.cluster import Cluster, Node
hostname = "localhost"
node1 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5)
node2 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5)
node3 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5)
node_lst = [node1, node2, node3]
cluster = Cluster(node_lst=node_lst, db_path=db_path)
return cluster, 5
def build_cluster(file_path, db_path):
"""Build cluster according to the host file.
Args:
file_path : the path of host file.
Returns:
Cluster: return cluster object.
"""
from neural_solution.backend.cluster import Cluster, Node
# If no file is specified, build a local cluster
if file_path == "None" or file_path is None:
return METHOD_NAME(db_path)
if not os.path.exists(file_path):
raise Exception(f"Please check the path of host file: {file_path}.")
node_lst = []
num_threads_per_process = 5
with open(file_path, "r") as f:
for line in f:
hostname, num_sockets, num_cores_per_socket = line.strip().split(" ")
num_sockets, num_cores_per_socket = int(num_sockets), int(num_cores_per_socket)
node = Node(name=hostname, num_sockets=num_sockets, num_cores_per_socket=num_cores_per_socket)
node_lst.append(node)
num_threads_per_process = num_cores_per_socket
cluster = Cluster(node_lst=node_lst, db_path=db_path)
return cluster, num_threads_per_process
def get_current_time():
"""Get current time.
Returns:
str: the current time in hours, minutes, and seconds.
"""
from datetime import datetime
return datetime.now().strftime("%H:%M:%S")
def synchronized(func):
"""Locking for synchronization.
Args:
func (function): decorative function
"""
def wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return wrapper
def build_workspace(path, task_id=""):
"""Build workspace of running tasks.
Args:
path: master work directory for all tasks.
task_id: the id of task
"""
task_path = "{}/{}".format(path, task_id)
if not os.path.exists(task_path):
os.makedirs(task_path)
return os.path.abspath(task_path)
def is_remote_url(url_or_filename):
"""Check if input is a URL.
Args:
url_or_filename (str): url_or_filename
Returns:
bool: True or False
"""
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def create_dir(path):
"""Create the (nested) path if not exist."""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def get_q_model_path(log_path):
"""Get the quantized model path from task log.
Args:
log_path (str): log path for task
Returns:
str: quantized model path
"""
import re
for line in reversed(open(log_path).readlines()):
match = re.search(r"(Save quantized model to|Save config file and weights of quantized model to) (.+?)\.", line)
if match:
q_model_path = match.group(2)
return q_model_path
return "quantized model path not found"
|
2,310 |
roles
|
# SPDX-License-Identifier: MIT
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Callable, Generic, List, Optional, Tuple, TypeVar
from ...components import MentionableSelectMenu
from ...enums import ComponentType
from ...interactions import ClientT
from ...member import Member
from ...role import Role
from ...user import User
from ...utils import MISSING
from ..item import ItemCallbackType
from ..view import View
from .base import SelectBase, SelectValuesBase
if TYPE_CHECKING:
from typing_extensions import Self
from ...guild import Guild
from ...state import ConnectionState
from ...types.components import MentionableSelectMenu as MentionableSelectMenuPayload
from ...types.interactions import ComponentInteractionData
__all__ = ("MentionableSelect", "mentionable_select", "MentionableSelectValues")
V = TypeVar("V", bound="View", covariant=True)
class MentionableSelectValues(SelectValuesBase):
"""Represents the values of a :class:`.ui.MentionableSelect`."""
@property
def members(self) -> List[Member]:
"""List[:class:`.Member`]: A list of members that were selected."""
return [v for v in self.data if isinstance(v, Member)]
@property
def users(self) -> List[User]:
"""List[:class:`nextcord.User`]: A list of users that were selected."""
return [v for v in self.data if isinstance(v, User)]
@property
def METHOD_NAME(self) -> List[Role]:
"""List[:class:`.Role`]: A list of roles that were selected."""
return [v for v in self.data if isinstance(v, Role)]
class MentionableSelect(SelectBase, Generic[V]):
"""Represents a UI mentionable select menu.
This is usually represented as a drop down menu.
In order to get the selected items that the user has chosen,
use :attr:`MentionableSelect.values`.
.. versionadded:: 2.3
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not. Defaults to ``False``.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__: Tuple[str, ...] = (
"placeholder",
"min_values",
"max_values",
"disabled",
)
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__(
custom_id=custom_id,
min_values=min_values,
max_values=max_values,
disabled=disabled,
row=row,
placeholder=placeholder,
)
self._selected_values: MentionableSelectValues = MentionableSelectValues()
self._underlying = MentionableSelectMenu._raw_construct(
custom_id=self.custom_id,
type=ComponentType.mentionable_select,
placeholder=self.placeholder,
min_values=self.min_values,
max_values=self.max_values,
disabled=self.disabled,
)
@property
def values(self) -> MentionableSelectValues:
""":class:`.ui.MentionableSelectValues`: A list of Union[:class:`.Member`, :class:`nextcord.User`, :class:`.Role`] that have been selected by the user."""
return self._selected_values
def to_component_dict(self) -> MentionableSelectMenuPayload:
return self._underlying.to_dict()
@classmethod
def from_component(cls, component: MentionableSelectMenu) -> Self:
return cls(
custom_id=component.custom_id,
placeholder=component.placeholder,
min_values=component.min_values,
max_values=component.max_values,
disabled=component.disabled,
row=None,
)
def refresh_state(
self, data: ComponentInteractionData, state: ConnectionState, guild: Optional[Guild]
) -> None:
self._selected_values = MentionableSelectValues.construct(
data.get("values", []),
data.get("resolved", {}),
state,
guild,
)
def mentionable_select(
*,
placeholder: Optional[str] = None,
custom_id: str = MISSING,
min_values: int = 1,
max_values: int = 1,
disabled: bool = False,
row: Optional[int] = None,
) -> Callable[
[ItemCallbackType[MentionableSelect[V], ClientT]],
ItemCallbackType[MentionableSelect[V], ClientT],
]:
"""A decorator that attaches a mentionable select menu to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`.ui.View`, the :class:`.ui.MentionableSelect` being pressed and
the :class:`.Interaction` you receive.
In order to get the selected items that the user has chosen within the callback
use :attr:`MentionableSelect.values`.
.. versionadded:: 2.3
Parameters
------------
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
disabled: :class:`bool`
Whether the select is disabled or not. Defaults to ``False``.
"""
def decorator(func: ItemCallbackType) -> ItemCallbackType:
if not asyncio.iscoroutinefunction(func):
raise TypeError("Select function must be a coroutine function")
func.__discord_ui_model_type__ = MentionableSelect
func.__discord_ui_model_kwargs__ = {
"placeholder": placeholder,
"custom_id": custom_id,
"row": row,
"min_values": min_values,
"max_values": max_values,
"disabled": disabled,
}
return func
return decorator
|
2,311 |
on prerelease state change
|
# noinspection PyPackageRequirements
import wx
from gui.preferenceView import PreferenceView
from gui.bitmap_loader import BitmapLoader
from service.settings import UpdateSettings
_t = wx.GetTranslation
class PFUpdatePref(PreferenceView):
def populatePanel(self, panel):
self.title = _t("Updates")
self.desc = _t("Pyfa can automatically check and notify you of new releases. "
"This feature is toggled in the Network settings. "
"Here, you may allow pre-release notifications and view "
"suppressed release notifications, if any.")
self.UpdateSettings = UpdateSettings.getInstance()
self.dirtySettings = False
dlgWidth = panel.GetParent().GetParent().ClientSize.width
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.stTitle = wx.StaticText(panel, wx.ID_ANY, self.title, wx.DefaultPosition, wx.DefaultSize, 0)
self.stTitle.Wrap(-1)
self.stTitle.SetFont(wx.Font(12, 70, 90, 90, False, wx.EmptyString))
mainSizer.Add(self.stTitle, 0, wx.EXPAND | wx.ALL, 5)
self.m_staticline1 = wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline1, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
self.stDesc = wx.StaticText(panel, wx.ID_ANY, self.desc, wx.DefaultPosition, wx.DefaultSize, 0)
self.stDesc.Wrap(dlgWidth - 50)
mainSizer.Add(self.stDesc, 0, wx.ALL, 5)
self.suppressPrerelease = wx.CheckBox(panel, wx.ID_ANY, _t("Allow pre-release notifications"), wx.DefaultPosition,
wx.DefaultSize, 0)
self.suppressPrerelease.Bind(wx.EVT_CHECKBOX, self.METHOD_NAME)
self.suppressPrerelease.SetValue(not self.UpdateSettings.get('prerelease'))
mainSizer.Add(self.suppressPrerelease, 0, wx.ALL | wx.EXPAND, 5)
if self.UpdateSettings.get('version'):
self.versionSizer = wx.BoxSizer(wx.VERTICAL)
self.versionTitle = wx.StaticText(panel, wx.ID_ANY, _t("Suppressing {0} Notifications").format(
self.UpdateSettings.get('version')), wx.DefaultPosition, wx.DefaultSize, 0)
self.versionTitle.Wrap(-1)
self.versionTitle.SetFont(wx.Font(12, 70, 90, 90, False, wx.EmptyString))
self.versionInfo = _t("There is a release available which you have chosen to suppress. "
"You can choose to reset notification suppression for this release, "
"or download the new release from GitHub.")
self.versionSizer.AddStretchSpacer()
self.versionSizer.Add(wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL),
0, wx.EXPAND, 5)
self.versionSizer.AddStretchSpacer()
self.versionSizer.Add(self.versionTitle, 0, wx.EXPAND, 5)
self.versionDesc = wx.StaticText(panel, wx.ID_ANY, self.versionInfo, wx.DefaultPosition, wx.DefaultSize, 0)
self.versionDesc.Wrap(dlgWidth - 50)
self.versionSizer.Add(self.versionDesc, 0, wx.ALL, 5)
actionSizer = wx.BoxSizer(wx.HORIZONTAL)
resetSizer = wx.BoxSizer(wx.VERTICAL)
self.downloadButton = wx.Button(panel, wx.ID_ANY, _t("Download"), wx.DefaultPosition, wx.DefaultSize, 0)
self.downloadButton.Bind(wx.EVT_BUTTON, self.OnDownload)
resetSizer.Add(self.downloadButton, 0, wx.ALL, 5)
actionSizer.Add(resetSizer, 1, wx.EXPAND, 5)
self.resetButton = wx.Button(panel, wx.ID_ANY, _t("Reset Suppression"), wx.DefaultPosition, wx.DefaultSize, 0)
self.resetButton.Bind(wx.EVT_BUTTON, self.ResetSuppression)
actionSizer.Add(self.resetButton, 0, wx.ALL, 5)
self.versionSizer.Add(actionSizer, 0, wx.EXPAND, 5)
mainSizer.Add(self.versionSizer, 0, wx.EXPAND, 5)
panel.SetSizer(mainSizer)
panel.Layout()
def METHOD_NAME(self, event):
self.UpdateSettings.set('prerelease', not self.suppressPrerelease.IsChecked())
def ResetSuppression(self, event):
self.UpdateSettings.set('version', None)
# Todo: Find a way to hide the entire panel in one go
self.versionSizer.Hide(True)
self.versionTitle.Hide()
self.versionDesc.Hide()
self.downloadButton.Hide()
self.resetButton.Hide()
self.resetButton.Hide()
def OnDownload(self, event):
wx.LaunchDefaultBrowser('https://github.com/pyfa-org/Pyfa/releases/tag/' + self.UpdateSettings.get('version'))
def getImage(self):
return BitmapLoader.getBitmap("prefs_update", "gui")
PFUpdatePref.register()
|
2,312 |
test hash
|
# Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
class ss(str):
pass
ts1 = 'abc'
ts2 = 'x☏yz'
ts3 = '\U00010400' # surrogates: D801 DC00
ss1 = ss('abc')
ss2 = ss('x☏yz')
ss3 = ss('\U00010400')
class TruffleStringTests(unittest.TestCase):
def METHOD_NAME(self):
self.assertEqual(hash(ts3), hash(ss3))
self.assertEqual(hash(ts1), ts1.__hash__())
self.assertEqual(hash(ss1), ss1.__hash__())
def test_len(self):
for exp_len, slist in {3: (ts1, ss1), 4: (ts2, ss2), 1: (ts3, ss3)}.items():
for s in slist:
self.assertEqual(exp_len, len(s))
self.assertEqual(exp_len, s.__len__())
def test_getnewargs(self):
self.assertEqual(('abc',), ts1.__getnewargs__())
self.assertEqual(('abc',), ss1.__getnewargs__())
self.assertIsNot(ts1, ts1.__getnewargs__()[0])
self.assertIsNot(ss1, ss1.__getnewargs__()[0])
def test_contains(self):
self.assertIn('b', ts1)
self.assertIn(ss('b'), ts1)
self.assertIn('b', ss1)
self.assertIn(ss('b'), ss1)
self.assertFalse('\udc00' in ts3)
self.assertFalse('\udc00' in ss3)
def test_dict(self):
key = "\x00"
self.assertEqual(1, {ss(key): 1}[key])
def test_lst(self):
self.assertEqual([ts3], list(ts3))
def test_compare(self):
s1 = '\ufb00'
s2 = ts3
self.assertTrue(s1 < s2)
self.assertTrue((s1, ) < (s2, ))
self.assertTrue(s1.__lt__(s2))
def test_collections(self):
self.assertEqual(3, len(dict.fromkeys("a\U00010400b")))
self.assertEqual(3, len(list("a\U00010400b")))
self.assertEqual(3, len(tuple("a\U00010400b")))
self.assertEqual(ts3, list("a\U00010400b")[1])
self.assertEqual(ts3, tuple("a\U00010400b")[1])
def test_str_iter(self):
self.assertEqual(ts3, next(iter(ts3)))
self.assertEqual(ts3, next(reversed(ts3)))
def test_surrogates(self):
self.assertFalse('\ud801' + '\udc00' == '\U00010400')
if __name__ == '__main__':
unittest.main()
|
2,313 |
check formhandler
|
import gramex.cache
from . import TestGramex
from nose.tools import eq_, ok_
class TestOpenAPIHandler(TestGramex):
expected = gramex.cache.open('openapiresponse.yaml', rel=True)
def has_param(self, params, **kwargs):
items = kwargs.items()
return any(items <= param.items() for param in params)
def test_openapi(self):
# OpenAPI spec is a JSON response
spec = self.check('/openapi/spec').json()
# OpenAPI spec version matches
eq_(spec['openapi'], '3.0.2')
# spec.info comes from gramex.yaml OpenAPIHandler kwargs
# .info is from gramex.yaml > openapi/spec
eq_(
spec['info'],
{
'title': 'OpenAPI-title',
'version': 'OpenAPI-version',
'description': 'OpenAPI-description',
},
)
# spec.servers comes from gramex.yaml OpenAPIHandler kwargs
eq_(spec['servers'], [{'url': '..', 'description': 'Server-description'}])
self.check_functionhandler(spec)
self.METHOD_NAME(spec)
def check_functionhandler(self, spec):
# /openapi/func path exists
ok_('/openapi/func' in spec['paths'])
path = spec['paths']['/openapi/func']
for request in ('post', 'put'):
ok_(request in path)
conf = path[request]
# Summary is based on function name
eq_(conf['summary'], 'Openapi Func: FunctionHandler')
# Description is as per utils.test_function
eq_(conf['description'], '\nThis is a **Markdown** docstring.\n')
# Argument types, defaults, required as per utils.test_function
params = {param['name']: param for param in conf['parameters']}
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'required': True,
'schema': {'type': 'array', 'items': {'type': 'integer'}},
},
params['li1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'required': True,
'schema': {'type': 'array', 'items': {'type': 'number'}},
},
params['lf1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': 'List of ints',
'required': True,
'schema': {'type': 'array', 'items': {'type': 'integer'}},
},
params['li2'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': 'List of floats',
'required': True,
'schema': {'type': 'array', 'items': {'type': 'number'}},
},
params['lf2'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'array', 'items': {'type': 'integer'}, 'default': [0]},
},
params['li3'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'array', 'items': {'type': 'number'}, 'default': [0.0]},
},
params['lf3'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'string', 'default': []},
},
params['l1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': 'First value',
'schema': {'type': 'integer', 'default': 0},
},
params['i1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': 'Second value',
'schema': {'type': 'integer', 'default': 0},
},
params['i2'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'string', 'default': 'Total'},
},
params['s1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'integer', 'default': 0},
},
params['n1'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'string', 'default': 0},
},
params['n2'],
)
self.assertDictContainsSubset(
{
'in': 'header',
'description': '',
'schema': {'type': 'string', 'default': ''},
},
params['h'],
)
self.assertDictContainsSubset(
{
'in': 'query',
'description': '',
'schema': {'type': 'integer', 'default': 200},
},
params['code'],
)
resp = conf['responses']
# 400 response has description from gramex.yaml
self.assertDictContainsSubset(
{
'description': 'Bad request',
'content': {'text/html': {'example': 'Bad request'}},
},
resp['400'],
)
# Rest should have default error responses
self.assertDictContainsSubset(
{'description': 'Successful Response', 'content': {'application/json': {}}},
resp['200'],
)
self.assertDictContainsSubset(
{
'description': 'Not authorized',
'content': {'text/html': {'example': 'Not authorized'}},
},
resp['401'],
)
# 429 is added only to PUT not GET
if request == 'put':
self.assertDictContainsSubset({'description': 'Rate limited'}, resp['429'])
else:
self.assertNotIn('429', resp)
def METHOD_NAME(self, spec):
ok_('/openapi/form' in spec['paths'])
eq_(spec['paths']['/openapi/form'], self.expected['/openapi/form'])
eq_(spec['paths']['/openapi/form'], self.expected['/openapi/form'])
|
2,314 |
connection made
|
# twisted is optional and self-contained in this module.
# We don't want to force it as a dependency but that means we also can't test it with type-checkers given the current setup.
from _typeshed import Incomplete
from typing import Generic, NamedTuple, TypeVar
import pika.connection
from pika.adapters.utils import nbio_interface
from twisted.internet.base import DelayedCall # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.defer import Deferred, DeferredQueue # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.interfaces import ITransport # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.protocol import Protocol # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.python.failure import Failure # type: ignore[import] # pyright: ignore[reportMissingImports]
_T = TypeVar("_T")
LOGGER: Incomplete
class ClosableDeferredQueue(DeferredQueue[_T], Generic[_T]): # pyright: ignore[reportUntypedBaseClass]
closed: Failure | BaseException | None
def __init__(self, size: Incomplete | None = ..., backlog: Incomplete | None = ...) -> None: ...
# Returns a Deferred with an error if fails. None if success
def put(self, obj: _T) -> Deferred[Failure | BaseException] | None: ... # type: ignore[override]
def get(self) -> Deferred[Failure | BaseException | _T]: ... # type: ignore[override]
pending: Incomplete
def close(self, reason: BaseException | None) -> None: ...
class ReceivedMessage(NamedTuple):
channel: Incomplete
method: Incomplete
properties: Incomplete
body: Incomplete
class TwistedChannel:
on_closed: Deferred[Incomplete | Failure | BaseException | None]
def __init__(self, channel) -> None: ...
@property
def channel_number(self): ...
@property
def connection(self): ...
@property
def is_closed(self): ...
@property
def is_closing(self): ...
@property
def is_open(self): ...
@property
def flow_active(self): ...
@property
def consumer_tags(self): ...
def callback_deferred(self, deferred, replies) -> None: ...
def add_on_return_callback(self, callback): ...
def basic_ack(self, delivery_tag: int = ..., multiple: bool = ...): ...
def basic_cancel(self, consumer_tag: str = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_consume(
self,
queue,
auto_ack: bool = ...,
exclusive: bool = ...,
consumer_tag: Incomplete | None = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_get(self, queue, auto_ack: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_nack(self, delivery_tag: Incomplete | None = ..., multiple: bool = ..., requeue: bool = ...): ...
def basic_publish(
self, exchange, routing_key, body, properties: Incomplete | None = ..., mandatory: bool = ...
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_qos(
self, prefetch_size: int = ..., prefetch_count: int = ..., global_qos: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_reject(self, delivery_tag, requeue: bool = ...): ...
def basic_recover(self, requeue: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def close(self, reply_code: int = ..., reply_text: str = ...): ...
def confirm_delivery(self) -> Deferred[Incomplete | None]: ...
def exchange_bind(
self, destination, source, routing_key: str = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_declare(
self,
exchange,
exchange_type=...,
passive: bool = ...,
durable: bool = ...,
auto_delete: bool = ...,
internal: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_delete(
self, exchange: Incomplete | None = ..., if_unused: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_unbind(
self,
destination: Incomplete | None = ...,
source: Incomplete | None = ...,
routing_key: str = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def flow(self, active) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def open(self): ...
def queue_bind(
self, queue, exchange, routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_declare(
self,
queue,
passive: bool = ...,
durable: bool = ...,
exclusive: bool = ...,
auto_delete: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_delete(
self, queue, if_unused: bool = ..., if_empty: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_purge(self, queue) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_unbind(
self, queue, exchange: Incomplete | None = ..., routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_commit(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_rollback(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_select(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
class _TwistedConnectionAdapter(pika.connection.Connection):
def __init__(self, parameters, on_open_callback, on_open_error_callback, on_close_callback, custom_reactor) -> None: ...
def METHOD_NAME(self, transport: ITransport) -> None: ...
def connection_lost(self, error: Exception) -> None: ...
def data_received(self, data) -> None: ...
class TwistedProtocolConnection(Protocol): # pyright: ignore[reportUntypedBaseClass]
ready: Deferred[None] | None
closed: Deferred[None] | Failure | BaseException | None
def __init__(self, parameters: Incomplete | None = ..., custom_reactor: Incomplete | None = ...) -> None: ...
def channel(self, channel_number: Incomplete | None = ...): ...
@property
def is_open(self): ...
@property
def is_closed(self): ...
def close(self, reply_code: int = ..., reply_text: str = ...) -> Deferred[None] | Failure | BaseException | None: ...
def dataReceived(self, data) -> None: ...
def connectionLost(self, reason: Failure | BaseException = ...) -> None: ...
def makeConnection(self, transport: ITransport) -> None: ...
def connectionReady(self): ...
class _TimerHandle(nbio_interface.AbstractTimerReference):
def __init__(self, handle: DelayedCall) -> None: ...
def cancel(self) -> None: ...
|
2,315 |
test mongo fetch with filter
|
from typing import Optional
import pandas as pd
import pytest
from aqueduct.error import AqueductError
from aqueduct.resources.mongodb import MongoDBResource
from aqueduct import LoadUpdateMode, op
from sdk.data_resource_tests.flow_manager import FlowManager
from sdk.data_resource_tests.mongo_db_data_validator import MongoDBDataValidator
from sdk.data_resource_tests.save import save
from sdk.data_resource_tests.validation_helpers import check_hotel_reviews_table_artifact
from sdk.shared.naming import generate_table_name
from sdk.shared.validation import check_artifact_was_computed
@pytest.fixture(autouse=True)
def assert_data_resource_is_mongo_db(data_resource):
assert isinstance(data_resource, MongoDBResource)
def test_mongo_fetch(client, data_resource: MongoDBResource):
# Retrieve all rows with _id column excluded.
# This makes sure the `check_hotel_reviews_table_artifact` doesn't include
# this _id column generated by MongoDB.
hotel_reviews = data_resource.collection("hotel_reviews").find({}, {"_id": 0})
check_hotel_reviews_table_artifact(hotel_reviews)
def test_bad_fetch(client, data_resource: MongoDBResource):
# collection that doesn't exist
with pytest.raises(AqueductError, match="Preview Execution Failed"):
data_resource.collection("missing_table").find({})
# valid collection, bad query, here "$or" is an invalid operator.
with pytest.raises(AqueductError, match="Preview Execution Failed"):
data_resource.collection("hotel_reviews").find({}, {"$or": ["1", "2"]})
def test_mongo_fetch_column_selection(client, data_resource: MongoDBResource):
hotel_reviews = data_resource.collection("hotel_reviews").find({}, {"review": 1}).get()
assert list(hotel_reviews.columns) == ["_id", "review"]
hotel_reviews = (
data_resource.collection("hotel_reviews")
.find({}, {"_id": 0, "reviewer_nationality": 1})
.get()
)
assert list(hotel_reviews.columns) == ["reviewer_nationality"]
def METHOD_NAME(client, data_resource: MongoDBResource):
actual_data = (
data_resource.collection("hotel_reviews")
.find({"reviewer_nationality": " United Kingdom "})
.get()
)
all_data = data_resource.collection("hotel_reviews").find({}).get()
assert len(actual_data) == len(all_data[all_data["reviewer_nationality"] == " United Kingdom "])
def test_mongo_fetch_with_multiple_parametrized_filters(client, data_resource: MongoDBResource):
country1 = client.create_param("param_1", default=" United Kingdom ")
country2 = client.create_param("param_2", default=" Australia ")
parameterized_results = data_resource.collection("hotel_reviews").find(
{
"reviewer_nationality": {
"$in": ["$1", "$2"],
}
},
parameters=[country1, country2],
)
expanded_results = data_resource.collection("hotel_reviews").find(
{
"reviewer_nationality": {
"$in": [" United Kingdom ", " Australia "],
}
}
)
assert parameterized_results.get().equals(expanded_results.get())
expanded_results = data_resource.collection("hotel_reviews").find(
{
"reviewer_nationality": {
"$in": [" Thailand ", " Australia "],
}
}
)
assert parameterized_results.get(parameters={"param_1": " Thailand "}).equals(
expanded_results.get()
)
def test_mongo_save_replace(flow_manager: FlowManager, data_resource: MongoDBResource):
# retrieve all rows with _id column.
hotel_reviews = data_resource.collection("hotel_reviews").find({})
save(data_resource, hotel_reviews, generate_table_name(), LoadUpdateMode.REPLACE)
flow = flow_manager.publish_flow_test(hotel_reviews)
MongoDBDataValidator(flow_manager._client, data_resource).check_saved_artifact_data(
flow, hotel_reviews.id(), expected_data=hotel_reviews.get()
)
def test_mongo_save_append(flow_manager: FlowManager, data_resource: MongoDBResource):
table_name = generate_table_name()
# saving twice with append mode
# Everything is done with `_id` excluded, as this field must be unique.
# We rely on mongoDB to generate `_id`s when we upload copies. Otherwise,
# append would fail if we try to upload with duplicated `_id`s .
hotel_reviews = data_resource.collection("hotel_reviews").find({}, {"_id": 0})
save(data_resource, hotel_reviews, table_name, LoadUpdateMode.REPLACE)
flow = flow_manager.publish_flow_test(hotel_reviews)
save(data_resource, hotel_reviews, table_name, LoadUpdateMode.APPEND)
flow = flow_manager.publish_flow_test(existing_flow=flow, artifacts=hotel_reviews)
reviews_data = hotel_reviews.get()
expected_data = pd.concat([reviews_data, reviews_data], ignore_index=True)
actual_data = data_resource.collection(table_name).find({}, {"_id": 0}).get()
assert expected_data.equals(actual_data)
def test_mongo_artifact_with_custom_metadata(
flow_manager: FlowManager, data_resource: MongoDBResource
):
# TODO: validate custom descriptions once we can fetch descriptions easily.
op_name = "test"
artf_name = "test artifact"
description = "test description"
hotel_reviews = data_resource.collection("hotel_reviews").find(
{}, {"_id": 0}, name=op_name, description=description
)
assert hotel_reviews.name() == artf_name
flow = flow_manager.publish_flow_test(artifacts=hotel_reviews)
check_artifact_was_computed(flow, artf_name)
def test_mongo_artifact_with_same_op_and_artf_names(
flow_manager: FlowManager, data_resource: MongoDBResource
):
# TODO: validate custom descriptions once we can fetch descriptions easily.
op_name = "test"
artf_name = "test"
description = "test description"
hotel_reviews = data_resource.collection("hotel_reviews").find(
{}, {"_id": 0}, name=op_name, output=artf_name, description=description
)
assert hotel_reviews.name() == artf_name
flow = flow_manager.publish_flow_test(artifacts=hotel_reviews)
check_artifact_was_computed(flow, artf_name)
def test_mongo_preserves_bson_table_even_with_pickled_collection_type(
flow_manager,
data_resource: MongoDBResource,
):
"""Test that bson table fidelity is preserved in the case where it is included
in a collection object (list, tuple).
"""
hotel_reviews = data_resource.collection("hotel_reviews").find({}, {"_id": 0})
@op
def select_first_object_of_input_tuple(mongo_table, another_param):
return mongo_table
output = select_first_object_of_input_tuple(hotel_reviews, 123)
# Saving the output back to Mongo will guarantee that the table maintained fidelity
# across function execution.
table_name = generate_table_name()
save(data_resource, output, table_name, LoadUpdateMode.REPLACE)
flow_manager.publish_flow_test(artifacts=output)
saved_data = data_resource.collection(table_name).find({}, {"_id": 0}).get()
assert hotel_reviews.get().equals(saved_data)
|
2,316 |
run rez bind
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Entry points.
"""
import os
import os.path
import sys
### Utility functions
def get_specifications():
"""Get entry point specifications
See:
* https://pythonhosted.org/distlib/reference.html#distlib.scripts.ScriptMaker.make_multiple
* https://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation
Example return value:
{
"rez-env": "rez-env = rez.cli._entry_points.run_rez_env",
...
}
Returns:
dict (str, str): The specification string for each script name.
"""
specs = {}
for attr, obj in sys.modules[__name__].__dict__.items():
scriptname = getattr(obj, "__scriptname__", None)
if scriptname:
spec = "%s = rez.cli._entry_points:%s" % (scriptname, attr)
specs[scriptname] = spec
return specs
def scriptname(name):
def decorator(fn):
setattr(fn, "__scriptname__", name)
return fn
return decorator
def check_production_install():
path = os.path.dirname(sys.argv[0])
filepath = os.path.join(path, ".rez_production_install")
if not os.path.exists(filepath):
sys.stderr.write(
"Pip-based rez installation detected. Please be aware that rez command "
"line tools are not guaranteed to function correctly in this case. See "
"https://github.com/AcademySoftwareFoundation/rez/wiki/Installation#why-not-pip-for-production "
" for futher details.\n"
)
### Entry points
@scriptname("rez")
def run_rez():
check_production_install()
from rez.cli._main import run
return run()
@scriptname("rezolve")
def run_rezolve():
# alias for osx, where rez is a different tool
# https://www.unix.com/man-page/osx/1/REZ/
check_production_install()
from rez.cli._main import run
return run()
@scriptname("_rez-complete")
def run_rez_complete():
check_production_install()
from rez.cli._main import run
return run("complete")
@scriptname("_rez_fwd")
def run_rez_fwd():
check_production_install()
from rez.cli._main import run
return run("forward")
@scriptname("rez-bind")
def METHOD_NAME():
check_production_install()
from rez.cli._main import run
return run("bind")
@scriptname("rez-build")
def run_rez_build():
check_production_install()
from rez.cli._main import run
return run("build")
@scriptname("rez-config")
def run_rez_config():
check_production_install()
from rez.cli._main import run
return run("config")
@scriptname("rez-context")
def run_rez_context():
check_production_install()
from rez.cli._main import run
return run("context")
@scriptname("rez-cp")
def run_rez_cp():
check_production_install()
from rez.cli._main import run
return run("cp")
@scriptname("rez-depends")
def run_rez_depends():
check_production_install()
from rez.cli._main import run
return run("depends")
@scriptname("rez-diff")
def run_rez_diff():
check_production_install()
from rez.cli._main import run
return run("diff")
@scriptname("rez-env")
def run_rez_env():
check_production_install()
from rez.cli._main import run
return run("env")
@scriptname("rez-gui")
def run_rez_gui():
check_production_install()
from rez.cli._main import run
return run("gui")
@scriptname("rez-help")
def run_rez_help():
check_production_install()
from rez.cli._main import run
return run("help")
@scriptname("rez-interpret")
def run_rez_interpret():
check_production_install()
from rez.cli._main import run
return run("interpret")
@scriptname("rez-memcache")
def run_rez_memcache():
check_production_install()
from rez.cli._main import run
return run("memcache")
@scriptname("rez-pip")
def run_rez_pip():
check_production_install()
from rez.cli._main import run
return run("pip")
@scriptname("rez-pkg-cache")
def run_rez_pkg_cache():
check_production_install()
from rez.cli._main import run
return run("pkg-cache")
@scriptname("rez-plugins")
def run_rez_plugins():
check_production_install()
from rez.cli._main import run
return run("plugins")
@scriptname("rez-python")
def run_rez_python():
check_production_install()
from rez.cli._main import run
return run("python")
@scriptname("rez-release")
def run_rez_release():
check_production_install()
from rez.cli._main import run
return run("release")
@scriptname("rez-search")
def run_rez_search():
check_production_install()
from rez.cli._main import run
return run("search")
@scriptname("rez-selftest")
def run_rez_selftest():
check_production_install()
from rez.cli._main import run
return run("selftest")
@scriptname("rez-status")
def run_rez_status():
check_production_install()
from rez.cli._main import run
return run("status")
@scriptname("rez-suite")
def run_rez_suite():
check_production_install()
from rez.cli._main import run
return run("suite")
@scriptname("rez-test")
def run_rez_test():
check_production_install()
from rez.cli._main import run
return run("test")
@scriptname("rez-view")
def run_rez_view():
check_production_install()
from rez.cli._main import run
return run("view")
@scriptname("rez-yaml2py")
def run_rez_yaml2py():
check_production_install()
from rez.cli._main import run
return run("yaml2py")
@scriptname("rez-bundle")
def run_rez_bundle():
check_production_install()
from rez.cli._main import run
return run("bundle")
@scriptname("rez-benchmark")
def run_rez_benchmark():
check_production_install()
# Special case - we have to override config settings here, before rez is
# loaded. TODO this would be cleaner if we had an Application object, see #1043
#
# /start
import json
settings = {
"memcached_uri": [],
"package_filter": [],
"package_orderers": [],
"allow_unversioned_packages": False,
"resource_caching_maxsize": -1,
"cache_packages_path": None
}
for setting, value in settings.items():
os.environ.pop("REZ_" + setting.upper(), None)
os.environ["REZ_" + setting.upper() + "_JSON"] = json.dumps(value)
# /end
from rez.cli._main import run
return run("benchmark")
@scriptname("rez-pkg-ignore")
def run_rez_pkg_ignore():
check_production_install()
from rez.cli._main import run
return run("pkg-ignore")
@scriptname("rez-mv")
def run_rez_mv():
check_production_install()
from rez.cli._main import run
return run("mv")
@scriptname("rez-rm")
def run_rez_rm():
check_production_install()
from rez.cli._main import run
return run("rm")
|
2,317 |
create hyperpipe
|
import types
import unittest
from functools import reduce
import operator
from inspect import signature
from photonai.base import PipelineElement, Switch, Branch, Hyperpipe
from photonai.optimization import GridSearchOptimizer, RandomGridSearchOptimizer, IntegerRange
from photonai.optimization.base_optimizer import PhotonSlaveOptimizer, PhotonMasterOptimizer
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import KFold, ShuffleSplit
class GridSearchOptimizerTest(unittest.TestCase):
def setUp(self):
self.pipeline_elements = [PipelineElement("StandardScaler"),
PipelineElement('PCA', hyperparameters={'n_components': IntegerRange(5, 20)}),
PipelineElement("SVC")]
self.optimizer = GridSearchOptimizer()
self.optimizer_name = 'grid_search'
self.optimizer_params = None
def METHOD_NAME(self):
self.hyperpipe = Hyperpipe('optimizer_test',
project_folder='./tmp',
metrics=['accuracy'],
best_config_metric='accuracy',
inner_cv=KFold(n_splits=2),
outer_cv=ShuffleSplit(n_splits=2),
optimizer=self.optimizer_name,
optimizer_params=self.optimizer_params,
verbosity=0)
def test_run(self):
self.METHOD_NAME()
for p in self.pipeline_elements:
self.hyperpipe += p
X, y = load_breast_cancer(return_X_y=True)
self.hyperpipe.fit(X, y)
def test_all_functions_available(self):
"""Test existence of functions and parameters -> .ask() .tell() .prepare()."""
self.assertTrue(hasattr(self.optimizer, 'prepare'))
self.assertListEqual(list(signature(self.optimizer.prepare).parameters.keys()),
['pipeline_elements', 'maximize_metric'])
self.assertTrue(hasattr(self.optimizer, 'tell'))
self.assertListEqual(list(signature(self.optimizer.tell).parameters.keys()), ['config', 'performance'])
self.assertTrue(hasattr(self.optimizer, 'ask'))
def test_all_attributes_available(self):
"""Test for .ask and .param_grid attribute. .ask is important for next configuration that should be tested."""
self.optimizer.prepare(pipeline_elements=self.pipeline_elements, maximize_metric=True)
self.assertIsInstance(self.optimizer.ask, types.GeneratorType)
def test_ask(self):
"""Test general functionality of .ask()."""
self.optimizer.prepare(pipeline_elements=self.pipeline_elements, maximize_metric=True)
ask_list = list(self.optimizer.ask)
self.assertIsInstance(ask_list, list)
self.assertSetEqual(set([str(type(a)) for a in ask_list]), {"<class 'dict'>"})
generated_elements = reduce(operator.concat, [list(a.keys()) for a in ask_list])
self.assertIn("PCA__n_components", generated_elements)
return generated_elements
def test_ask_advanced(self):
"""Test advanced functionality of .ask()."""
branch = Branch('branch')
branch += PipelineElement('PCA')
branch += PipelineElement('SVC', {'C': [0.1, 1], 'kernel': ['rbf', 'sigmoid']})
pipe_switch = Switch('switch', [PipelineElement("StandardScaler"), PipelineElement("MaxAbsScaler")])
self.pipeline_elements = [PipelineElement("StandardScaler"),
PipelineElement('PCA', hyperparameters={'n_components': IntegerRange(5, 20)},
test_disabled=True),
pipe_switch,
branch,
Switch('Switch_in_switch', [branch, pipe_switch])]
generated_elements = self.test_ask()
self.assertIn("PCA__n_components", generated_elements)
self.assertIn("Switch_in_switch__current_element", generated_elements)
self.assertIn("branch__SVC__C", generated_elements)
self.assertIn("branch__SVC__kernel", generated_elements)
self.assertIn("switch__current_element", generated_elements)
class RandomGridSearchOptimizerTest(GridSearchOptimizerTest):
def setUp(self):
self.pipeline_elements = [PipelineElement("StandardScaler"),
PipelineElement('PCA', hyperparameters={'n_components': IntegerRange(5, 20)}),
PipelineElement("SVC")]
self.optimizer = RandomGridSearchOptimizer()
self.optimizer_name = 'random_grid_search'
self.optimizer_params = None
def test_parameter_k(self):
"""Test for parameter n_configuration and k."""
self.optimizer = RandomGridSearchOptimizer(n_configurations=3)
self.optimizer.prepare(pipeline_elements=self.pipeline_elements, maximize_metric=True)
self.assertEqual(len(self.optimizer.param_grid), 3)
self.optimizer = RandomGridSearchOptimizer(n_configurations=500)
self.optimizer.prepare(pipeline_elements=self.pipeline_elements, maximize_metric=True)
self.assertEqual(len(self.optimizer.param_grid), 15)
class BaseOptimizerTests(unittest.TestCase):
@staticmethod
def test_slave_interface():
opt = PhotonSlaveOptimizer()
opt.prepare(list(), True)
opt.ask()
opt.tell(dict(), float())
@staticmethod
def test_master_interface():
opt = PhotonMasterOptimizer()
opt.prepare(list(), True, None)
opt.optimize()
|
2,318 |
shutup
|
from http.server import HTTPServer, SimpleHTTPRequestHandler
import unittest
import os
import tempfile
from pykickstart import load
from pykickstart.errors import KickstartError
from signal import SIGTERM
class LoadTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self._content = """
auth --enableshadow --passalgo=sha512
graphical
firstboot --enable
ignoredisk --only-use=vda
keyboard --vckeymap=cz --xlayouts='cz'
lang cs_CZ.UTF-8
network --bootproto=dhcp --device=ens3 --ipv6=auto --activate
network --hostname=test
rootpw password
timezone Europe/Prague --isUtc
user --groups=wheel --name=testuser --password=password --iscrypted --gecos="Test User"
xconfig --startxonboot
bootloader --location=mbr --boot-drive=vda
autopart --type=lvm
clearpart --all --initlabel --drives=vda
%packages
@^xfce-desktop-environment
@xfce-apps
@xfce-media
%end
%addon com_redhat_kdump --disable --reserve-mb='128'
%end
"""
def setUp(self):
(handle, self._path) = tempfile.mkstemp(prefix="testfile-", text=True)
os.write(handle, self._content.encode("utf-8"))
os.close(handle)
def tearDown(self):
os.unlink(self._path)
class Load_To_String_TestCase(LoadTest):
def runTest(self):
self.assertEqual(self._content, load.load_to_str(self._path))
class Load_To_File_TestCase(LoadTest):
def __init__(self, *args, **kwargs):
LoadTest.__init__(self, *args, **kwargs)
self._target_path = ""
def runTest(self):
(handle, self._target_path) = tempfile.mkstemp(prefix="testfile", text=True)
os.close(handle)
target_path = load.load_to_file(self._path, self._target_path)
self.assertEqual(target_path, self._target_path)
with open(self._target_path, 'r') as f:
self.assertEqual(self._content, f.read())
with self.assertRaises(KickstartError):
load.load_to_file("/tmp/foo", "/tmp/bar")
def tearDown(self):
super(Load_To_File_TestCase, self).tearDown()
os.unlink(self._target_path)
class Load_From_URL_Test(LoadTest):
def setUp(self):
super(Load_From_URL_Test, self).setUp()
# Disable logging in the handler, mostly to keep the HTTPS binary garbage off the screen
httphandler = SimpleHTTPRequestHandler
def METHOD_NAME(*args, **kwargs):
pass
httphandler.log_message = METHOD_NAME
self._server = HTTPServer(('127.0.0.1', 0), httphandler)
httpd_port = self._server.server_port
self._httpd_pid = os.fork()
if self._httpd_pid == 0:
os.chdir(os.path.dirname(self._path))
self._server.serve_forever()
self._url = 'http://127.0.0.1:%d/%s' % (httpd_port, os.path.basename(self._path))
# wrong URL (HTTPS request won't be handled correctly by the HTTP server)
self._url_https = "https" + self._url.lstrip("http")
def tearDown(self):
super(Load_From_URL_Test, self).tearDown()
self._server.server_close()
os.kill(self._httpd_pid, SIGTERM)
class Load_From_URL_To_Str_TestCase(Load_From_URL_Test):
def runTest(self):
self.assertEqual(self._content, load.load_to_str(self._url))
self.assertRaises(KickstartError, load.load_to_str, self._url_https)
class Load_From_URL_To_File_TestCase(Load_From_URL_Test):
def setUp(self):
super(Load_From_URL_To_File_TestCase, self).setUp()
(handle, self._target_path) = tempfile.mkstemp(prefix="testfile", text=True)
os.close(handle)
def runTest(self):
target_path = load.load_to_file(self._url, self._target_path)
self.assertEqual(target_path, self._target_path)
with open(self._target_path, 'r') as f:
self.assertEqual(self._content, f.read())
self.assertEqual(self._content, load.load_to_str(self._url))
# raises SSLError in _load_url()
with self.assertRaises(KickstartError):
load.load_to_file(self._url_https, self._target_path)
# raises RequestException in _load_url()
with self.assertRaises(KickstartError):
load.load_to_file('http://test.local/ks.cfg', self._target_path)
# raises IOError in load_file()
with self.assertRaises(KickstartError):
load.load_to_file(self._url, '/no/exist')
# request.status_code == 404 in _load_url()
with self.assertRaises(KickstartError):
load.load_to_file(self._url+'.TEST', '/tmp/foo')
def tearDown(self):
super(Load_From_URL_To_File_TestCase, self).tearDown()
os.unlink(self._target_path)
if __name__ == "__main__":
unittest.main()
|
2,319 |
get mfr id
|
#
# psuutil.py
# Platform-specific PSU status interface for SONiC
#
import logging
import os.path
try:
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class PsuUtil(PsuBase):
"""Platform-specific PSUutil class"""
HWMON_PATH = '/sys/class/hwmon/hwmon1/'
PSU1_PREFIX = 'power42_'
PSU2_PREFIX = 'power52_'
MAX_PSUS = 2
def __init__(self):
PsuBase.__init__(self)
# Get sysfs attribute
def get_attr_value(self, attr_path):
retval = 'ERR'
if (not os.path.isfile(attr_path)):
return retval
try:
with open(attr_path, 'r') as fd:
retval = fd.read()
except Exception:
logging.error("Unable to open ", attr_path, " file !")
retval = retval.rstrip('\r\n')
return retval
def get_attr_filename(self, index, attr):
if (index == 1):
attr_file = self.PSU1_PREFIX + attr
elif (index == 2):
attr_file = self.PSU2_PREFIX + attr
else:
logging.error("Invalid PSU number:", index)
return ''
return attr_file
def get_num_psus(self):
"""
Retrieves the number of PSUs available on the device
:return: An integer, the number of PSUs available on the device
"""
return self.MAX_PSUS
def get_psu_status(self, index):
"""
Retrieves the oprational status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is operating properly, False if PSU is\
faulty
"""
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def get_psu_presence(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
status = False
attr_filename = self.get_attr_filename(index, 'present')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = int(attr_value, 16)
# Check PSU status
if (attr_value == 1):
status = True
return status
def get_powergood_status(self, index):
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def get_model(self, index):
attr_filename = self.get_attr_filename(index, 'model')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def METHOD_NAME(self, index):
attr_filename = self.get_attr_filename(index, 'mfrid')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_serial(self, index):
attr_filename = self.get_attr_filename(index, 'sn')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_direction(self, index):
if (index == 1):
direction_file = 'fan40_direction'
elif (index == 2):
direction_file = 'fan50_direction'
else:
logging.error("Invalid PSU number:", index)
return None
direction = self.get_attr_value(self.HWMON_PATH + direction_file)
direction = direction.rstrip()
"""
1: FB 2: BF
Since the fan is at rear of the switch, FB means Exhaust; BF means Intake
"""
if direction == '2':
return "INTAKE"
else:
return "EXHAUST"
def get_output_voltage(self, index):
if (index == 1):
attr_file = 'in47_input'
elif (index == 2):
attr_file = 'in57_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
voltage = self.get_attr_value(self.HWMON_PATH + attr_file)
voltage = voltage.rstrip()
if (voltage != 'ERR'):
voltage, dummy = voltage.split('.', 1)
else:
return 0.0
return float(voltage)/1000
def get_output_current(self, index):
if (index == 1):
attr_file = 'curr39_input'
elif (index == 2):
attr_file = 'curr49_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
current = self.get_attr_value(self.HWMON_PATH + attr_file)
current = current.rstrip()
if (current != 'ERR'):
current, dummy = current.split('.',1)
else:
return 0.0
return float(current)/1000
def get_output_power(self, index):
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return 0.0
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
else:
return 0.0
return float(attr_value/1000)
def get_fan_rpm(self, index, fan_idx):
if (index == 1):
rpm_file = 'fan40_input'
elif (index == 2):
rpm_file = 'fan50_input'
else:
logging.error("Invalid PSU number:", index)
return 0
rpm = self.get_attr_value(self.HWMON_PATH + rpm_file)
rpm = rpm.rstrip()
if (rpm != 'ERR'):
rpm = float(rpm)
else:
return 0
return int(rpm)
|
2,320 |
clean text data
|
# Generated by Django 3.2.16 on 2022-10-18 13:16
import re
import warnings
from typing import Dict, Optional
from django.db import migrations
from django.template.defaultfilters import truncatechars
from django.utils.html import strip_tags
from urllib3.util import parse_url
# Start copy of /saleor/core/utils/editorjs.py
BLACKLISTED_URL_SCHEMES = ("javascript",)
HYPERLINK_TAG_WITH_URL_PATTERN = r"(.*?<a\s+href=\\?\")(\w+://\S+[^\\])(\\?\">)"
def clean_editor_js(definitions: Optional[Dict], *, to_string: bool = False):
"""Sanitize a given EditorJS JSON definitions.
Look for not allowed URLs, replaced them with `invalid` value, and clean valid ones.
`to_string` flag is used for returning concatenated string from all blocks
instead of returning json object.
"""
if definitions is None:
return "" if to_string else definitions
blocks = definitions.get("blocks")
if not blocks or not isinstance(blocks, list):
return "" if to_string else definitions
plain_text_list = []
for index, block in enumerate(blocks):
block_type = block["type"]
data = block.get("data")
if not data or not isinstance(data, dict):
continue
if block_type == "list":
for item_index, item in enumerate(block["data"]["items"]):
if not item:
continue
new_text = METHOD_NAME(item)
if to_string:
plain_text_list.append(strip_tags(new_text))
else:
blocks[index]["data"]["items"][item_index] = new_text
else:
text = block["data"].get("text")
if not text:
continue
new_text = METHOD_NAME(text)
if to_string:
plain_text_list.append(strip_tags(new_text))
else:
blocks[index]["data"]["text"] = new_text
return " ".join(plain_text_list) if to_string else definitions
def METHOD_NAME(text: str):
"""Look for url in text, check if URL is allowed and return the cleaned URL.
By default, only the protocol ``javascript`` is denied.
"""
if not text:
return
end_of_match = 0
new_text = ""
for match in re.finditer(HYPERLINK_TAG_WITH_URL_PATTERN, text):
original_url = match.group(2)
original_url.strip()
url = parse_url(original_url)
new_url = url.url
if url.scheme in BLACKLISTED_URL_SCHEMES:
warnings.warn(
f"An invalid url was sent: {original_url} "
f"-- Scheme: {url.scheme} is blacklisted"
)
new_url = "#invalid"
new_text += match.group(1) + new_url + match.group(3)
end_of_match = match.end()
if end_of_match:
new_text += text[end_of_match:]
return new_text if new_text else text
# End copy of /saleor/core/utils/editorjs.py
def queryset_in_batches(queryset):
"""Slice a queryset into batches.
Input queryset should be sorted be pk.
"""
start_pk = 0
while True:
qs = queryset.filter(pk__gt=start_pk)[:2000]
pks = list(qs.values_list("pk", flat=True))
if not pks:
break
yield pks
start_pk = pks[-1]
def propagate_names_for_plain_text_attribute_value_translations(apps, schema_editor):
AttributeValueTranslation = apps.get_model("attribute", "AttributeValueTranslation")
queryset = (
AttributeValueTranslation.objects.exclude(plain_text=None)
.filter(name="")
.order_by("pk")
)
for batch_pks in queryset_in_batches(queryset):
batch = AttributeValueTranslation.objects.filter(pk__in=batch_pks)
instances = []
for instance in batch:
instance.name = truncatechars(instance.plain_text, 100)
instances.append(instance)
AttributeValueTranslation.objects.bulk_update(instances, ["name"])
class Migration(migrations.Migration):
dependencies = [
("attribute", "0024_merge_20221018_1100"),
]
operations = [
migrations.RunPython(
propagate_names_for_plain_text_attribute_value_translations,
migrations.RunPython.noop,
),
]
|
2,321 |
async set preset mode
|
"""
Setup for different kinds of Tuya fan devices
"""
import logging
from homeassistant.components.fan import FanEntity, FanEntityFeature
from .device import TuyaLocalDevice
from .helpers.config import async_tuya_setup_platform
from .helpers.device_config import TuyaEntityConfig
from .helpers.mixin import TuyaLocalEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
config = {**config_entry.data, **config_entry.options}
await async_tuya_setup_platform(
hass,
async_add_entities,
config,
"fan",
TuyaLocalFan,
)
class TuyaLocalFan(TuyaLocalEntity, FanEntity):
"""Representation of a Tuya Fan entity."""
def __init__(self, device: TuyaLocalDevice, config: TuyaEntityConfig):
"""
Initialise the fan device.
Args:
device (TuyaLocalDevice): The device API instance.
config (TuyaEntityConfig): The entity config.
"""
super().__init__()
dps_map = self._init_begin(device, config)
self._switch_dps = dps_map.pop("switch", None)
self._preset_dps = dps_map.pop("preset_mode", None)
self._speed_dps = dps_map.pop("speed", None)
self._oscillate_dps = dps_map.pop("oscillate", None)
self._direction_dps = dps_map.pop("direction", None)
self._init_end(dps_map)
self._support_flags = 0
if self._preset_dps:
self._support_flags |= FanEntityFeature.PRESET_MODE
if self._speed_dps:
self._support_flags |= FanEntityFeature.SET_SPEED
if self._oscillate_dps:
self._support_flags |= FanEntityFeature.OSCILLATE
if self._direction_dps:
self._support_flags |= FanEntityFeature.DIRECTION
@property
def supported_features(self):
"""Return the features supported by this climate device."""
return self._support_flags
@property
def is_on(self):
"""Return whether the switch is on or not."""
# If there is no switch, it is always on
if self._switch_dps is None:
return self.available
return self._switch_dps.get_value(self._device)
async def async_turn_on(self, **kwargs):
"""Turn the switch on"""
if self._switch_dps is None:
raise NotImplementedError()
await self._switch_dps.async_set_value(self._device, True)
async def async_turn_off(self, **kwargs):
"""Turn the switch off"""
if self._switch_dps is None:
raise NotImplementedError
await self._switch_dps.async_set_value(self._device, False)
@property
def percentage(self):
"""Return the currently set percentage."""
if self._speed_dps is None:
return None
return self._speed_dps.get_value(self._device)
@property
def percentage_step(self):
"""Return the step for percentage."""
if self._speed_dps is None:
return None
if self._speed_dps.values(self._device):
return 100 / len(self._speed_dps.values(self._device))
return self._speed_dps.step(self._device)
@property
def speed_count(self):
"""Return the number of speeds supported by the fan."""
if self._speed_dps is None:
return 0
if self._speed_dps.values(self._device):
return len(self._speed_dps.values(self._device))
return int(round(100 / self.percentage_step))
async def async_set_percentage(self, percentage):
"""Set the fan speed as a percentage."""
# If speed is 0, turn the fan off
if percentage == 0 and self._switch_dps:
return await self.async_turn_off()
if self._speed_dps is None:
return None
# If there is a fixed list of values, snap to the closest one
if self._speed_dps.values(self._device):
percentage = min(
self._speed_dps.values(self._device),
key=lambda x: abs(x - percentage),
)
values_to_set = self._speed_dps.get_values_to_set(self._device, percentage)
if not self.is_on and self._switch_dps:
values_to_set.update(self._switch_dps.get_values_to_set(self._device, True))
await self._device.async_set_properties(values_to_set)
@property
def preset_mode(self):
"""Return the current preset mode."""
if self._preset_dps is None:
return None
return self._preset_dps.get_value(self._device)
@property
def preset_modes(self):
"""Return the list of presets that this device supports."""
if self._preset_dps is None:
return []
return self._preset_dps.values(self._device)
async def METHOD_NAME(self, preset_mode):
"""Set the preset mode."""
if self._preset_dps is None:
raise NotImplementedError()
await self._preset_dps.async_set_value(self._device, preset_mode)
@property
def current_direction(self):
"""Return the current direction [forward or reverse]."""
if self._direction_dps is None:
return None
return self._direction_dps.get_value(self._device)
async def async_set_direction(self, direction):
"""Set the direction of the fan."""
if self._direction_dps is None:
raise NotImplementedError()
await self._direction_dps.async_set_value(self._device, direction)
@property
def oscillating(self):
"""Return whether or not the fan is oscillating."""
if self._oscillate_dps is None:
return None
return self._oscillate_dps.get_value(self._device)
async def async_oscillate(self, oscillating):
"""Oscillate the fan."""
if self._oscillate_dps is None:
raise NotImplementedError()
await self._oscillate_dps.async_set_value(self._device, oscillating)
|
2,322 |
test missing eof
|
import unittest
from drake.tools.lint.formatter import FormatterBase, IncludeFormatter
class TestFormatterBase(unittest.TestCase):
def test_essentials(self):
original_lines = [
'// Line 1\n',
'/* Line 2 */\n',
'\n',
]
dut = FormatterBase('filename.cc', readlines=original_lines)
# Everything starts out unchanged.
self.assertTrue(dut.is_same_as_original())
self.assertTrue(dut.is_permutation_of_original())
self.assertEqual(dut.get_all_lines(), original_lines)
self.assertTrue(dut.get_first_differing_original_index() is None)
# Basic getters.
self.assertEqual(dut.get_num_lines(), 3)
self.assertTrue(dut.is_blank_line(2))
self.assertEqual(dut.get_line(0), '// Line 1\n')
# Reverse it and end up with a permutation.
dut.set_all_lines(reversed(dut.get_all_lines()))
self.assertFalse(dut.is_same_as_original())
self.assertTrue(dut.is_permutation_of_original())
self.assertEqual(dut.get_first_differing_original_index(), 0)
# Rebuild it using insertion and removal.
dut.set_all_lines(['\n'] * 3)
dut.set_line(0, '/* Line 2 */\n')
dut.insert_lines(0, ['AAA\n', '// Line 1\n'])
dut.remove_all([0, 3])
self.assertEqual(dut.get_all_lines(), original_lines)
def test_format_ranges(self):
original_lines = [
'#include "line0"\n',
'// clang-format off\n',
'#include "line2"\n',
'// clang-format on\n',
'#include "line4"\n',
'#include "line5"\n',
'/* clang-format off */\n',
'#include "line7"\n',
'#include "line8"\n',
'/* clang-format on */\n',
'#include "line10"\n',
]
dut = FormatterBase("filename.cc", readlines=original_lines)
self.assertEqual(
dut.get_format_ranges(), [[0], [4, 5], [10]])
self.assertEqual(
dut.get_non_format_ranges(), [[1, 2, 3], [6, 7, 8, 9]])
def test_dos(self):
original_lines = [
'#include "line0"\r\n',
]
with self.assertRaisesRegex(Exception, "DOS newline"):
FormatterBase("filename.cc", readlines=original_lines)
def METHOD_NAME(self):
original_lines = [
'#include "line0"',
]
with self.assertRaisesRegex(Exception, "newline.*end of file"):
FormatterBase("filename.cc", readlines=original_lines)
class TestIncludeFormatter(unittest.TestCase):
def _split(self, triple_quoted_file_contents):
lines = triple_quoted_file_contents.split("\n")
assert len(lines) >= 2
assert lines[0] == "" # Detritus from first triple quote.
assert lines[-1] == "" # Detritus from last triple quote.
del lines[0]
del lines[-1]
return [line + "\n" for line in lines]
def _check(self, basename, original, expected, first_differing):
original_lines = self._split(original)
expected_lines = self._split(expected)
dut = IncludeFormatter(
"drake/dummy/" + basename,
readlines=original_lines)
dut.format_includes()
self.assertEqual(dut.get_all_lines(), expected_lines)
self.assertEqual(dut.get_first_differing_original_index(),
first_differing)
def test_basic(self):
# A pile of headers gets sorted per cppguide:
# - The related header
# - C system files
# - C++ system files
# - Other libraries' .h files
# - Your project's .h files
original = """
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
#include "drake/dummy/dut.h"
#include <gtest/gtest.h>
#include <Eigen/Dense>
#include <algorithm>
#include <poll.h>
#include <sys/wait.h>
#include <vector>
"""
expected = """
#include "drake/dummy/dut.h"
#include <poll.h>
#include <sys/wait.h>
#include <algorithm>
#include <vector>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
"""
self._check("dut.cc", original, expected, 0)
def test_nothing(self):
# A file with _no_ include statements.
original = """
namespace { }
"""
self._check("dut.cc", original, original, None)
def test_regroup(self):
# Wrongly grouped whitespace.
original = """
#include "drake/dummy/dut.h"
#include <Eigen/Dense>
#include <algorithm>
#include <vector>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
#include <gtest/gtest.h>
"""
expected = """
#include "drake/dummy/dut.h"
#include <algorithm>
#include <vector>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include "drake/common/drake_assert.h"
#include "drake/dummy/bar.h"
"""
self._check("dut.cc", original, expected, 2)
def test_format_off(self):
# "clang-format off".
original = """
#include "drake/dummy/dut.h"
// clang-format off
#ifdef FOO
#include <algorithm>
#include <vector>
#else
#include <vector>
#include <algorithm>
#endif
// clang-format on
#include "drake/common/drake_assert.h"
"""
self._check("dut.cc", original, original, None)
def test_target_is_header(self):
# A header file.
original = """
#include "drake/common/drake_assert.h"
#include <algorithm>
namespace { }
"""
expected = """
#include <algorithm>
#include "drake/common/drake_assert.h"
namespace { }
"""
self._check("dut.h", original, expected, 0)
def test_associated_comment(self):
# A comment prior to a line.
original = """
#include "drake/dummy/dut.h"
// Some comment describing the next line.
#include <vector>
namespace { }
"""
self._check("dut.cc", original, original, None)
def test_file_opening_comment(self):
# A comment atop the file with no blank line.
original = """
/// @file dut.cc
/// Mumble mumble
///
#include <string>
#include <vector>
"""
self._check("dut.cc", original, original, None)
def test_internal_related_header(self):
# Two related headers, guarded by "clang-format off".
original = """
/* clang-format off (with explanatory comment) */
#include "drake/dummy/dut.h"
#include "drake/dummy/dut_internal.h"
/* clang-format on (with explanatory comment) */
#include <vector>
#include <string>
#include "drake/dummy/drake_assert.h"
#include "drake/dummy/drake_deprecated.h"
"""
expected = """
/* clang-format off (with explanatory comment) */
#include "drake/dummy/dut.h"
#include "drake/dummy/dut_internal.h"
/* clang-format on (with explanatory comment) */
#include <string>
#include <vector>
#include "drake/dummy/drake_assert.h"
#include "drake/dummy/drake_deprecated.h"
"""
self._check("dut.cc", original, expected, 5)
def test_resort_solo_groups(self):
# Groups of one, but sorted incorrectly.
original = """
#include "drake/dummy/dut.h"
#include "drake/common/drake_assert.h"
#include <vector>
"""
expected = """
#include "drake/dummy/dut.h"
#include <vector>
#include "drake/common/drake_assert.h"
"""
self._check("dut.cc", original, expected, 2)
def test_nontrivial_reformatting(self):
# If clang-format changes any lines, we want to fail-fast.
# (Note the two spaces between #include and the double quote.)
original_lines = ['#include "nontrivial.h"\n']
dut = IncludeFormatter("nontrivial.cc", readlines=original_lines)
dut.format_includes()
with self.assertRaisesRegex(Exception, 'not just a shuffle'):
dut.rewrite_file()
|
2,323 |
gradrhofunc
|
import os, shutil, mpi
from Spheral2d import *
from SpheralTestUtilities import *
from centroidalRelaxNodes import *
from GenerateNodeDistribution2d import *
from siloPointmeshDump import *
from fieldStatistics import *
title("2-D test of centroidal relaxation.")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(KernelConstructor = NBSplineKernel,
order = 7,
nPerh = 1.01,
hmin = 1e-5,
hmax = 1.0,
hminratio = 1.0,
# The initial density function coeffients: rho(x) = a + bx*x + by*y
a = 1.0,
bx = 0.0,
by = 0.0,
cx2 = 0.0,
cxy = 0.0,
cy2 = 0.0,
# Initial geometry
nx = 50,
ny = 50,
x0 = 0.0,
x1 = 1.0,
y0 = 0.0,
y1 = 1.0,
ranfrac = 0.25,
seed = 14892042,
# Material properties
gamma = 5.0/3.0,
mu = 1.0,
# Simulation control
iterations = 100,
tol = 1.0e-3,
graphics = True,
baseName = "centroidal_relaxation_2d",
)
#-------------------------------------------------------------------------------
# Our density and gradient methods.
#-------------------------------------------------------------------------------
def rhofunc(posi):
return a + bx*posi.x + by*posi.y + cx2*posi.x**2 + cxy*posi.x*posi.y + cy2*posi.y**2
def METHOD_NAME(posi):
return Vector(bx + 2.0*cx2*posi.x + cxy*posi.y,
by + 2.0*cy2*posi.y + cxy*posi.x)
#-------------------------------------------------------------------------------
# Create a random number generator.
#-------------------------------------------------------------------------------
import random
rangen = random.Random()
rangen.seed(seed)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
Wbase = NBSplineKernel(order)
else:
Wbase = KernelConstructor()
WT = TableKernel(Wbase, 1000)
output("WT")
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes = makeFluidNodeList("nodes", eos,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh,
kernelExtent = WT.kernelExtent)
output("nodes")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
generator = GenerateNodeDistribution2d(nx, ny, rhofunc, "lattice",
xmin = (x0, y0),
xmax = (x1, y1),
nNodePerh = nPerh,
SPH = True)
if mpi.procs > 1:
from VoronoiDistributeNodes import distributeNodes2d
#from PeanoHilbertDistributeNodes import distributeNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes, generator))
output("mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
# Randomly jitter the node positions.
dx = (x1 - x0)/nx
dy = (y1 - y0)/ny
pos = nodes.positions()
for i in range(nodes.numInternalNodes):
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
pos[i].y += ranfrac * dy * rangen.uniform(-1.0, 1.0)
# Initialize the mass and densities.
m = nodes.mass()
rho = nodes.massDensity()
for i in range(nodes.numNodes):
rho[i] = rhofunc(pos[i])
m[i] = rho[i]*dx*dy
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane0 = Plane(Vector(x0, y0), Vector( 1.0, 0.0))
xPlane1 = Plane(Vector(x1, y0), Vector(-1.0, 0.0))
yPlane0 = Plane(Vector(x0, y0), Vector( 0.0, 1.0))
yPlane1 = Plane(Vector(x1, y1), Vector( 0.0, -1.0))
xbc0 = ReflectingBoundary(xPlane0)
xbc1 = ReflectingBoundary(xPlane1)
ybc0 = ReflectingBoundary(yPlane0)
ybc1 = ReflectingBoundary(yPlane1)
boundaries = [] # [xbc0, xbc1, ybc0, ybc1]
#-------------------------------------------------------------------------------
# Call the centroidal relaxer.
#-------------------------------------------------------------------------------
# Report the initial mass matching.
print("Initial mass (min, max, avg, std dev) : ", fieldStatistics(m))
bcpoints = vector_of_Vector()
for p in [Vector(x0, y0), Vector(x1, y0), Vector(x1, y1), Vector(x0, y1)]:
bcpoints.append(p)
boundary = Polygon(bcpoints)
vol, surfacePoint = centroidalRelaxNodes(nodeListsAndBounds = [(nodes, boundary)],
W = WT,
rho = rhofunc,
gradrho = METHOD_NAME,
maxIterations = iterations,
boundaries = boundaries,
fracTol = tol,
tessellationFileName = baseName)
# Report the final mass matching.
print("Final mass (min, max, avg, std dev) : ", fieldStatistics(m))
#-------------------------------------------------------------------------------
# Plot the final state.
#-------------------------------------------------------------------------------
if graphics:
from SpheralGnuPlotUtilities import *
rPlot = plotNodePositions2d(db, colorNodeLists=0, colorDomains=1)
# rho
rhoPlot = plotFieldList(db.fluidMassDensity,
winTitle = "Density",
plotStyle = "points",
colorNodeLists = False,
plotGhosts = False)
# mass
massPlot = plotFieldList(db.fluidMass,
winTitle = "Mass",
plotStyle = "points",
colorNodeLists = False,
plotGhosts = False)
|
2,324 |
update settings
|
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Main function and startup utilities for vimiv.
Module Attributes:
_tmpdir: TemporaryDirectory when running with ``--temp-basedir``. The
object must exist until vimiv exits.
"""
import argparse
import os
import sys
import tempfile
from typing import cast, List
from vimiv.qt.core import QSize, QCoreApplication
from vimiv.qt.widgets import QApplication
from vimiv import app, api, parser, imutils, plugins
from vimiv.commands import runners, search, wildcards
from vimiv.config import configfile, keyfile, styles
from vimiv.gui import mainwindow
from vimiv.utils import xdg, crash_handler, log, trash_manager, customtypes, migration
# Must be imported to create the commands using the decorators
from vimiv.commands import ( # pylint: disable=unused-import
misccommands,
delete_command,
help_command,
)
from vimiv.config import configcommands # pylint: disable=unused-import
_tmpdir = None
_tmppath = None
_logger = log.module_logger(__name__)
def main() -> int:
"""Run startup and the Qt main loop."""
args = setup_pre_app(sys.argv[1:])
qt_args = parser.get_qt_args(args)
qapp = app.Application(*qt_args)
crash_handler.CrashHandler(qapp)
setup_post_app(args)
_logger.debug("Startup completed, starting Qt main loop")
returncode = qapp.exec()
plugins.cleanup()
_logger.debug("Exiting with status %d", returncode)
return returncode
def setup_pre_app(argv: List[str]) -> argparse.Namespace:
"""Early setup that is done before the QApplication is created.
Includes parsing the command line and setting up logging as well as initializing the
components that do not require an application.
Args:
argv: sys.argv[1:] from the executable or argv passed by test suite.
"""
args = parser.parse_args(argv)
if args.version:
import vimiv.version
print(vimiv.version.info(), vimiv.version.paths(), sep="\n\n")
sys.exit(customtypes.Exit.success)
migration.run()
init_directories(args)
log.setup_logging(args.log_level, *args.debug)
_logger.debug("Start: vimiv %s", " ".join(argv))
METHOD_NAME(args)
trash_manager.init()
return args
def setup_post_app(args: argparse.Namespace) -> None:
"""Setup performed after creating the QApplication."""
api.working_directory.init()
imutils.init()
init_ui(args)
# Must be done after UI so the search signals are processed after the widgets have
# been updated
search.search.connect_signals()
plugins.load()
init_paths(args)
if args.command:
run_startup_commands(*args.command)
if args.output:
def print_output() -> None:
print(wildcards.expand_internal(args.output, api.modes.current()))
# We are sure we have an application here
qapp = cast(QApplication, QCoreApplication.instance())
qapp.aboutToQuit.connect(print_output)
def init_directories(args: argparse.Namespace) -> None:
"""Create vimiv cache, config and data directories.
The directories are either the directories defined in the freedesktop
standard or located in a temporary base directory.
Args:
args: Arguments returned from parser.parse_args().
"""
if args.temp_basedir:
global _tmpdir
# We want the temporary directory to stick around until the end
# pylint: disable=consider-using-with
_tmpdir = tempfile.TemporaryDirectory(prefix="vimiv-tempdir-")
args.basedir = _tmpdir.name
if args.basedir is not None:
xdg.basedir = args.basedir
xdg.makedirs(xdg.vimiv_cache_dir(), xdg.vimiv_config_dir(), xdg.vimiv_data_dir())
def init_paths(args: argparse.Namespace) -> None:
"""Open paths given from commandline or fallback to library if set."""
_logger.debug("Opening paths")
# Path names passed via stdin
if args.stdinput and not sys.stdin.isatty():
print("stdin")
paths = [os.path.realpath(line.strip()) for line in sys.stdin]
# Binary image passed via stdin
elif args.binary_stdinput and not sys.stdin.isatty():
print("binary stdin")
global _tmppath
# We want the temporary image to stick around until the end
# pylint: disable=consider-using-with
_tmppath = tempfile.NamedTemporaryFile(prefix="vimiv-stdin-")
with open(_tmppath.name, "wb") as f:
f.write(sys.stdin.buffer.read())
paths = [_tmppath.name]
# Default
else:
print("default")
paths = args.paths
print(paths)
try:
api.open_paths(paths)
except api.commands.CommandError:
_logger.debug("init_paths: No valid paths retrieved")
if api.settings.startup_library.value:
api.open_paths([os.getcwd()])
api.status.update("startup paths initialized")
def init_ui(args: argparse.Namespace) -> None:
"""Initialize the Qt UI."""
_logger.debug("Initializing UI")
mw = mainwindow.MainWindow()
if args.fullscreen:
mw.fullscreen()
# Center on screen and apply size
screen_geometry = QApplication.primaryScreen().geometry()
geometry = (
args.geometry
if args.geometry
else QSize(screen_geometry.width() // 2, screen_geometry.height() // 2)
)
x = screen_geometry.x() + (screen_geometry.width() - geometry.width()) // 2
y = screen_geometry.y() + (screen_geometry.height() - geometry.height()) // 2
mw.setGeometry(x, y, geometry.width(), geometry.height())
mw.show()
migration.run_welcome_popup(parent=mw)
def METHOD_NAME(args: argparse.Namespace) -> None:
"""Update default settings with command line arguments and configfiles.
Args:
args: Arguments returned from parser.parse_args().
"""
configfile.parse(args.config)
for option, value in args.cmd_settings:
try:
setting = api.settings.get(option)
setting.value = value
except KeyError:
log.error("Unknown setting %s", option)
except ValueError as e:
log.error(str(e))
keyfile.parse(args.keyfile)
styles.parse()
def run_startup_commands(*commands: str) -> None:
"""Run commands given via --command at startup.
Args:
commands: All command strings given via individual --command arguments.
"""
total = len(commands)
for i, command in enumerate(commands, start=1):
_logger.debug("Startup commands: running %d/%d '%s'", i, total, command)
if "quit" in command: # This does not work without a running app
log.warning("Quitting forcefully as the app does not exist")
app.Application.preexit(customtypes.Exit.success)
sys.exit(customtypes.Exit.success)
else:
runners.run(command, mode=api.modes.current())
|
2,325 |
test call kwargs single
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :func:`iris.common.lenient._lenient_client`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
from inspect import getmodule
from unittest.mock import sentinel
from iris.common.lenient import _LENIENT, _lenient_client
class Test(tests.IrisTest):
def setUp(self):
module_name = getmodule(self).__name__
self.client = f"{module_name}" + ".Test.{}.<locals>.myclient"
self.service = f"{module_name}" + ".Test.{}.<locals>.myservice"
self.active = "active"
self.args_in = sentinel.arg1, sentinel.arg2
self.kwargs_in = dict(kwarg1=sentinel.kwarg1, kwarg2=sentinel.kwarg2)
def test_args_too_many(self):
emsg = "Invalid lenient client arguments, expecting 1"
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(None, None)
def test_args_not_callable(self):
emsg = "Invalid lenient client argument, expecting a callable"
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(None)
def test_args_and_kwargs(self):
def func():
pass
emsg = (
"Invalid lenient client, got both arguments and keyword arguments"
)
with self.assertRaisesRegex(AssertionError, emsg):
_lenient_client(func, services=func)
def test_call_naked(self):
@_lenient_client
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_naked")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_naked_alternative(self):
def myclient():
return _LENIENT.__dict__.copy()
result = _lenient_client(myclient)()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_naked_alternative")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_naked_client_args_kwargs(self):
@_lenient_client
def myclient(*args, **kwargs):
return args, kwargs
args_out, kwargs_out = myclient(*self.args_in, **self.kwargs_in)
self.assertEqual(args_out, self.args_in)
self.assertEqual(kwargs_out, self.kwargs_in)
def test_call_naked_doc(self):
@_lenient_client
def myclient():
"""myclient doc-string"""
self.assertEqual(myclient.__doc__, "myclient doc-string")
def test_call_no_kwargs(self):
@_lenient_client()
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_no_kwargs")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_no_kwargs_alternative(self):
def myclient():
return _LENIENT.__dict__.copy()
result = (_lenient_client())(myclient)()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_no_kwargs_alternative")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def test_call_kwargs_none(self):
@_lenient_client(services=None)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_none")
self.assertEqual(result[self.active], qualname_client)
self.assertNotIn(qualname_client, result)
def METHOD_NAME(self):
service = sentinel.service
@_lenient_client(services=service)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_single")
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
self.assertEqual(result[qualname_client], (service,))
def test_call_kwargs_single_callable(self):
def myservice():
pass
@_lenient_client(services=myservice)
def myclient():
return _LENIENT.__dict__.copy()
test_name = "test_call_kwargs_single_callable"
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format(test_name)
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
qualname_services = (self.service.format(test_name),)
self.assertEqual(result[qualname_client], qualname_services)
def test_call_kwargs_iterable(self):
services = (sentinel.service1, sentinel.service2)
@_lenient_client(services=services)
def myclient():
return _LENIENT.__dict__.copy()
result = myclient()
self.assertIn(self.active, result)
qualname_client = self.client.format("test_call_kwargs_iterable")
self.assertEqual(result[self.active], qualname_client)
self.assertIn(qualname_client, result)
self.assertEqual(set(result[qualname_client]), set(services))
def test_call_client_args_kwargs(self):
@_lenient_client()
def myclient(*args, **kwargs):
return args, kwargs
args_out, kwargs_out = myclient(*self.args_in, **self.kwargs_in)
self.assertEqual(args_out, self.args_in)
self.assertEqual(kwargs_out, self.kwargs_in)
def test_call_doc(self):
@_lenient_client()
def myclient():
"""myclient doc-string"""
self.assertEqual(myclient.__doc__, "myclient doc-string")
if __name__ == "__main__":
tests.main()
|
2,326 |
directional index
|
# :copyright (c) URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
# See also https://github.com/urbanopt/geojson-modelica-translator/blob/develop/LICENSE.md
from collections import defaultdict
class CouplingGraph:
"""Manages coupling relationships"""
def __init__(self, couplings):
if len(couplings) == 0:
raise Exception('At least one coupling must be provided')
self._couplings = couplings
self._models_by_id = {}
for coupling in self._couplings:
a, b = coupling._model_a, coupling._model_b
self._models_by_id[a.id] = a
self._models_by_id[b.id] = b
self._couplings_by_model_id = defaultdict(list)
for coupling in self._couplings:
a, b = coupling._model_a, coupling._model_b
self._couplings_by_model_id[a.id].append(coupling)
self._couplings_by_model_id[b.id].append(coupling)
# _grouped_couplings_by_model_id stores couplings a model is involved with
# grouped by the type of the _other_ model involved, e.g.
# {
# ...
# 'my_network_123': {
# 'ets_couplings': [...],
# 'system_couplings': [...]
# },
# ...
# }
self._grouped_couplings_by_model_id = {}
for model_id, couplings in self._couplings_by_model_id.items():
grouped_couplings = defaultdict(list)
for coupling in couplings:
other_model = coupling.get_other_model(self._models_by_id[model_id])
coupling_type = f'{other_model.simple_gmt_type}_couplings'
grouped_couplings[coupling_type].append(coupling)
self._grouped_couplings_by_model_id[model_id] = grouped_couplings
@property
def couplings(self):
return [coupling for coupling in self._couplings]
@property
def models(self):
return [model for _, model in self._models_by_id.items()]
def couplings_by_type(self, model_id):
"""Returns the model's associated couplings keyed by the types of the
_other_ model involved
For example if given model is ets, and its coupled to a load and network,
the result would be:
{
'load_couplings': [<load coupling>],
'network_couplings': [<network coupling>],
}
:param model_id: str
:return: dict
"""
model = self._models_by_id[model_id]
grouped_couplings = self._grouped_couplings_by_model_id[model.id]
result = {}
for type_, couplings in grouped_couplings.items():
result[type_] = [coupling.to_dict() for coupling in couplings]
return result
def METHOD_NAME(self, model_a_id, model_b_id):
"""Returns the index of model_b within model_a's adjacency list for
model_b's type.
For example, if our graph looks like this, and model_b is an ETS
```
{
...
model_a: {
ets_couplings: [
{ ets: model_b, ... }, { ets: model_c, ...}
],
...
},
...
}
```
Then this method would return 0, because it's at index 0
:param model_a_id: str, id of model_a
:param model_b_id: str, id of model_b
:return: int
"""
if model_a_id not in self._models_by_id:
raise Exception('Model A id was not found')
if model_b_id not in self._models_by_id:
raise Exception('Model B id was not found')
model_a, model_b = self._models_by_id[model_a_id], self._models_by_id[model_b_id]
grouped_couplings = self._grouped_couplings_by_model_id[model_a.id]
coupling_type = f'{model_b.simple_gmt_type}_couplings'
try:
couplings = grouped_couplings[coupling_type]
other_models = [coupling.get_other_model(model_a) for coupling in couplings]
other_model_ids = [m.id for m in other_models]
return other_model_ids.index(model_b.id)
except KeyError:
raise Exception(f'model_a has no coupling with model_b\'s type ({model_b.simple_gmt_type})')
except ValueError:
raise Exception('model_a has no coupling with model_b')
def get_coupled_load(self, ets_id):
"""Returns the load coupled to the provided ets
:param ets_id: str
:return: dict
"""
if ets_id not in self._grouped_couplings_by_model_id:
raise Exception(f'No ETS with id {ets_id}')
try:
load_couplings = self._grouped_couplings_by_model_id[ets_id]['load_couplings']
return load_couplings[0].to_dict()['load']
except (KeyError, IndexError):
raise Exception('ETS is not coupled to a load')
def get_coupling(self, coupling_id):
for coupling in self._couplings:
if coupling.id == coupling_id:
return coupling
raise Exception(f'No coupling found with id "{coupling_id}"')
|
2,327 |
test scaleway remove absent nic
|
# Copyright (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from ansible_collections.community.general.plugins.modules import scaleway_compute_private_network
from ansible_collections.community.general.plugins.module_utils.scaleway import Scaleway, Response
from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
from ansible_collections.community.general.tests.unit.compat.mock import patch
def response_without_nics():
info = {"status": 200,
"body": '{ "private_nics": []}'
}
return Response(None, info)
def response_with_nics():
info = {"status": 200,
"body": ('{ "private_nics": [{'
'"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
'"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",'
'"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",'
'"mac_address": "02:00:00:00:12:23",'
'"state": "available",'
'"creation_date": "2022-03-30T06:25:28.155973+00:00",'
'"modification_date": "2022-03-30T06:25:28.155973+00:00",'
'"zone": "fr-par-1"'
'}]}'
)
}
return Response(None, info)
def response_when_add_nics():
info = {"status": 200,
"body": ('{ "private_nics": {'
'"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
'"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",'
'"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",'
'"mac_address": "02:00:00:00:12:23",'
'"state": "available",'
'"creation_date": "2022-03-30T06:25:28.155973+00:00",'
'"modification_date": "2022-03-30T06:25:28.155973+00:00",'
'"zone": "fr-par-1"'
'}}'
)
}
return Response(None, info)
def response_remove_nics():
info = {"status": 200}
return Response(None, info)
def test_scaleway_private_network_without_arguments(capfd):
set_module_args({})
with pytest.raises(SystemExit) as results:
scaleway_compute_private_network.main()
out, err = capfd.readouterr()
assert not err
assert json.loads(out)['failed']
def test_scaleway_add_nic(capfd):
os.environ['SCW_API_TOKEN'] = 'notrealtoken'
pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
url = 'servers/' + cid + '/private_nics'
set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
"state": "present",
"region": "par1",
"compute_id": cid,
"private_network_id": pnid
})
with patch.object(Scaleway, 'get') as mock_scw_get:
mock_scw_get.return_value = response_without_nics()
with patch.object(Scaleway, 'post') as mock_scw_post:
mock_scw_post.return_value = response_when_add_nics()
with pytest.raises(SystemExit) as results:
scaleway_compute_private_network.main()
mock_scw_post.assert_any_call(path=url, data={"private_network_id": pnid})
mock_scw_get.assert_any_call(url)
out, err = capfd.readouterr()
del os.environ['SCW_API_TOKEN']
assert not err
assert json.loads(out)['changed']
def test_scaleway_add_existing_nic(capfd):
os.environ['SCW_API_TOKEN'] = 'notrealtoken'
pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
url = 'servers/' + cid + '/private_nics'
set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
"state": "present",
"region": "par1",
"compute_id": cid,
"private_network_id": pnid
})
with patch.object(Scaleway, 'get') as mock_scw_get:
mock_scw_get.return_value = response_with_nics()
with pytest.raises(SystemExit) as results:
scaleway_compute_private_network.main()
mock_scw_get.assert_any_call(url)
out, err = capfd.readouterr()
del os.environ['SCW_API_TOKEN']
assert not err
assert not json.loads(out)['changed']
def test_scaleway_remove_existing_nic(capfd):
os.environ['SCW_API_TOKEN'] = 'notrealtoken'
pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
nicid = 'c123b4cd-ef5g-678h-90i1-jk2345678l90'
url = 'servers/' + cid + '/private_nics'
urlremove = 'servers/' + cid + '/private_nics/' + nicid
set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
"state": "absent",
"region": "par1",
"compute_id": cid,
"private_network_id": pnid
})
with patch.object(Scaleway, 'get') as mock_scw_get:
mock_scw_get.return_value = response_with_nics()
with patch.object(Scaleway, 'delete') as mock_scw_delete:
mock_scw_delete.return_value = response_remove_nics()
with pytest.raises(SystemExit) as results:
scaleway_compute_private_network.main()
mock_scw_delete.assert_any_call(urlremove)
mock_scw_get.assert_any_call(url)
out, err = capfd.readouterr()
del os.environ['SCW_API_TOKEN']
assert not err
assert json.loads(out)['changed']
def METHOD_NAME(capfd):
os.environ['SCW_API_TOKEN'] = 'notrealtoken'
pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
url = 'servers/' + cid + '/private_nics'
set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
"state": "absent",
"region": "par1",
"compute_id": cid,
"private_network_id": pnid
})
with patch.object(Scaleway, 'get') as mock_scw_get:
mock_scw_get.return_value = response_without_nics()
with pytest.raises(SystemExit) as results:
scaleway_compute_private_network.main()
mock_scw_get.assert_any_call(url)
out, err = capfd.readouterr()
del os.environ['SCW_API_TOKEN']
assert not err
assert not json.loads(out)['changed']
|
2,328 |
pack results2
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 12 17:58:51 2014
@author: david
"""
import numpy as np
try:
import copy_reg
except ImportError:
import copyreg as copy_reg
def pickleSlice(slice):
return unpickleSlice, (slice.start, slice.stop, slice.step)
def unpickleSlice(start, stop, step):
return slice(start, stop, step)
copy_reg.pickle(slice, pickleSlice, unpickleSlice)
def replNoneWith1(n):
if n is None:
return 1
else:
return n
def fmtSlicesUsed(slicesUsed):
if slicesUsed is None:
return ((-1,-1,-1),(-1,-1,-1),(-1,-1,-1))
else:
return tuple([(sl.start, sl.stop, replNoneWith1(sl.step)) for sl in slicesUsed] )
def _tuplify(var):
try:
return tuple(var)
except TypeError:
return var
def pack_results(dtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
""" Pack fit results into a structured array of the given dtype
Collects logic from fit factories to a central place which hopefully makes it easier to
maintain.
Parameters
----------
dtype : np.dtype
the numpy dtype of the structured array we want to pack into
tIndex : int
the current frame number
fitResults : np.ndarray
the fit parameters in the order they are defined in the dtype
fitError : np.ndarray
the fit errors in the order they are defined in the dtype
startParams : np.ndarray, optional
the start parameters in the order they are defined in the dtype
slicesUsed : tuple, optional
a 3-tuple of slice objects (xslice, yslice, zslice) that define the ROI used for this molecule
resultCode : int, optional
the result code as returned by the fitting routine
**kwargs : dict, optional
any additional information which gets stored in the structured array, either a scalar or a numpy array
Returns
-------
np.recarray
The packed results array
TODOS:
- Support length mismatch on data
FIXME: This currently uses tuples which is really gross for a number of reasons (e.g. moves what should be a numpy
low level c loop into python, relies on implicitly coercing types rather than doing it explicitly). For some
reason it is currently faster than assigning to views into an array even though it really should be quite a
lot slower. If numpy history is anything to go by, it's also quite likely to break at some point in the future.
"""
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = fmtSlicesUsed(slicesUsed)
ns = locals()
ns.update(kwargs)
return np.array(tuple([_tuplify(ns[n]) for n in dtype.names]), dtype=dtype)
###############################################
# Below are various experimental alternatives to pack_results. They are still a work in progress, but should
# hopefully let us replace some of the tuple madness in the above one. Of the alternatives, _pack_results4, which
# pushes stuff into a pre-allocated array is ~2 times faster than the tuple based code above, but would need quite
# a lot of additional refactoring in the calling code to make it actually work (the exceptions here are the Multifit
# and GPU fitting classes. Punting that to some point in the future for now.
def _pack_results1(dtype, flatdtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(fmtSlicesUsed(slicesUsed))
ns = locals()
ns.update(kwargs)
res = np.zeros(1, dtype=flatdtype)
for n in dtype.names:
res[n] = ns[n]
return res.view(dtype)
def _pack_results4(out, flat_out, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1,
**kwargs):
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(fmtSlicesUsed(slicesUsed))
ns = locals()
ns.update(kwargs)
for n in out.dtype.names:
flat_out[n] = ns[n]
return out
def _pack_results3(dtype, flatdtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1,
**kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = np.ravel(fmtSlicesUsed(slicesUsed))
ns = locals()
ns.update(kwargs)
#res = np.zeros(1, dtype=flatdtype)
#for n in dtype.names:
# d = ns[n]
return np.array(tuple([ns[n] for n in dtype.names]), flatdtype).view(dtype)
def METHOD_NAME(dtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = fmtSlicesUsed(slicesUsed)
ns = locals()
ns.update(kwargs)
res = np.zeros(1, dtype=dtype)
for n in dtype.names:
res[n] = _tuplify(ns[n])
return res
#generate a flat dtype from a standard nested one (incomplete)
def _gen_flat_dtype(dtype):
dtype = np.dtype(dtype)
out_dt = []
for n in dtype.names:
field_dt = dtype.fields[n][0]
|
2,329 |
min
|
"""paddle backend implementation"""
from packaging.version import Version
import paddle
if Version(paddle.__version__) != Version("0.0.0"):
raise RuntimeError("DeepXDE requires PaddlePaddle==0.0.0(develop).")
if paddle.device.is_compiled_with_cuda():
paddle.device.set_device("gpu")
lib = paddle
def data_type_dict():
return {
"float16": paddle.float16,
"float32": paddle.float32,
"float64": paddle.float64,
"uint8": paddle.uint8,
"int8": paddle.int8,
"int16": paddle.int16,
"int32": paddle.int32,
"int64": paddle.int64,
"bool": paddle.bool,
}
def is_gpu_available():
device = paddle.device.get_device()
# "cpu"/"gpu:x"/"xpu:x"/"mlu:x"/"npu:x"
return "gpu" in device
def is_tensor(obj):
return paddle.is_tensor(obj)
def shape(input_tensor):
return input_tensor.shape
def size(input_tensor):
return int(paddle.numel(input_tensor))
def ndim(input_tensor):
return input_tensor.ndim
def transpose(tensor, axes=None):
if axes is None:
axes = tuple(range(tensor.ndim)[::-1])
return paddle.transpose(tensor, axes)
def reshape(tensor, shape):
return paddle.reshape(tensor, shape)
def Variable(initial_value, dtype=None):
if paddle.in_dynamic_mode():
return paddle.to_tensor(initial_value, dtype=dtype, stop_gradient=False)
return paddle.create_parameter(
shape=[1],
dtype=paddle.get_default_dtype() if dtype is None else dtype,
default_initializer=paddle.nn.initializer.Constant(value=initial_value),
)
def as_tensor(data, dtype=None):
if paddle.is_tensor(data):
if dtype is None or data.dtype == dtype:
return data
return data.astype(dtype)
return paddle.to_tensor(data, dtype=dtype)
def sparse_tensor(indices, values, shape):
return paddle.sparse.sparse_coo_tensor(
list(zip(*indices)), values, shape, stop_gradient=False
)
def from_numpy(np_array):
return paddle.to_tensor(np_array)
def to_numpy(input_tensor):
return input_tensor.detach().cpu().numpy()
def concat(values, axis):
return paddle.concat(values, axis=axis)
def stack(values, axis):
return paddle.stack(values, axis=axis)
def expand_dims(tensor, axis):
return paddle.unsqueeze(tensor, axis=axis)
def reverse(tensor, axis):
return paddle.flip(tensor, axis)
def roll(tensor, shift, axis):
return paddle.roll(tensor, shift, axis)
def lgamma(tensor):
return paddle.lgamma(tensor)
def elu(x):
return paddle.nn.functional.elu(x)
def relu(x):
return paddle.nn.functional.relu(x)
def selu(x):
return paddle.nn.functional.selu(x)
def sigmoid(x):
return paddle.nn.functional.sigmoid(x)
def silu(x):
return paddle.nn.functional.silu(x)
def sin(x):
return paddle.sin(x)
def cos(x):
return paddle.cos(x)
def exp(x):
return paddle.exp(x)
def square(x):
return paddle.square(x)
# pylint: disable=redefined-builtin
def abs(x):
return paddle.abs(x)
def minimum(x, y):
return paddle.minimum(x, y)
def tanh(x):
return paddle.tanh(x)
def pow(x, y):
return paddle.pow(x, y)
def mean(input_tensor, dim, keepdims=False):
return paddle.mean(input_tensor, axis=dim, keepdim=keepdims)
def reduce_mean(input_tensor):
return paddle.mean(input_tensor)
def sum(input_tensor, dim, keepdims=False):
return paddle.sum(input_tensor, axis=dim, keepdim=keepdims)
def reduce_sum(input_tensor):
return paddle.sum(input_tensor)
def prod(input_tensor, dim, keepdims=False):
return paddle.prod(input_tensor, axis=dim, keepdim=keepdims)
def reduce_prod(input_tensor):
return paddle.prod(input_tensor)
# pylint: disable=redefined-builtin
def METHOD_NAME(input_tensor, dim, keepdims=False):
return paddle.METHOD_NAME(input_tensor, axis=dim, keepdim=keepdims)
def reduce_min(input_tensor):
return paddle.METHOD_NAME(input_tensor)
# pylint: disable=redefined-builtin
def max(input_tensor, dim, keepdims=False):
return paddle.max(input_tensor, axis=dim, keepdim=keepdims)
def reduce_max(input_tensor):
return paddle.max(input_tensor)
def norm(x, ord=None, axis=None, keepdims=False):
if ord is None:
ord = 2
return paddle.linalg.norm(x, p=ord, axis=axis, keepdim=keepdims)
def zeros(shape, dtype):
return paddle.zeros(shape, dtype=dtype)
def zeros_like(input_tensor):
return paddle.zeros_like(input_tensor)
def matmul(x, y):
return paddle.mm(x, y)
def sparse_dense_matmul(x, y):
return paddle.sparse.matmul(x, y)
|
2,330 |
kill podman container by name
|
from pathlib import Path
from typing import Callable, Optional
from paramiko import SSHException
from scp import SCPException
import consts
from assisted_test_infra.test_infra.controllers.node_controllers import ssh
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from service_client import log
class Node:
def __init__(self, name, node_controller, private_ssh_key_path: Optional[Path] = None, username="core"):
self.name = name
self.private_ssh_key_path = private_ssh_key_path
self.username = username
self.node_controller = node_controller
self.original_vcpu_count = self.get_cpu_cores()
self.original_ram_kib = self.get_ram_kib()
self._ips = []
self._macs = []
def __str__(self):
return self.name
@property
def is_active(self):
return self.node_controller.is_active(self.name)
def is_master_in_name(self):
return consts.NodeRoles.MASTER in self.name
def is_worker_in_name(self):
return consts.NodeRoles.WORKER in self.name
def _set_ips_and_macs(self):
self._ips, self._macs = self.node_controller.get_node_ips_and_macs(self.name)
# TODO maybe add ttl? need mechanism that
# will zero this value when node is stopped
@property
def ips(self):
if not self._ips:
self._set_ips_and_macs()
return self._ips
@property
def macs(self):
if not self._macs:
self._set_ips_and_macs()
return self._macs
@property
def ssh_connection(self):
if not self.ips:
raise RuntimeError(f"No available IPs for node {self.name}")
log.info("Trying to access through IP addresses: %s", ", ".join(self.ips))
for ip in self.ips:
exception = None
try:
connection = ssh.SshConnection(
ip, private_ssh_key_path=self.private_ssh_key_path, username=self.username
)
connection.connect()
return connection
except (TimeoutError, SCPException, SSHException) as e:
log.warning("Could not SSH through IP %s: %s", ip, str(e))
exception = e
if exception is not None:
raise exception
def upload_file(self, local_source_path, remote_target_path):
with self.ssh_connection as _ssh:
return _ssh.upload_file(local_source_path, remote_target_path)
def download_file(self, remote_source_path, local_target_path):
with self.ssh_connection as _ssh:
return _ssh.download_file(remote_source_path, local_target_path)
def run_command(self, bash_command, background=False):
output = ""
if not self.node_controller.is_active(self.name):
raise RuntimeError("%s is not active, can't run given command")
with self.ssh_connection as _ssh:
if background:
_ssh.background_script(bash_command)
else:
output = _ssh.script(bash_command, verbose=False)
return output
def shutdown(self):
return self.node_controller.shutdown_node(self.name)
def start(self, check_ips=True):
return self.node_controller.start_node(self.name, check_ips)
def restart(self):
self.shutdown()
self.start()
def restart_service(self, service):
log.info("Restarting service: %s on host %s", service, self.name)
self.run_command(f"sudo systemctl restart {service}.service")
def reset(self):
log.info("Resetting host %s", self.name)
self.shutdown()
self.format_disk()
self.start()
def format_disk(self, disk_index: int = 0):
self.node_controller.format_node_disk(self.name, disk_index)
def kill_installer(self):
self.METHOD_NAME("assisted-installer")
def kill_service(self, service):
log.info("Killing service %s on host %s", service, self.name)
self.run_command(f"sudo systemctl kill {service}.service || true")
def METHOD_NAME(self, container_name):
output = self.run_command(f"sudo su root -c 'podman ps | grep {container_name}'")
log.info(
f"Container details on {self.name}: provided container name: {container_name}, output: " f"\n {output}"
)
log.info(f"Killing container: {container_name}")
output = self.run_command(f"sudo su root -c 'podman kill {container_name}'")
log.info(f"Output of kill container command: {output}")
def is_service_active(self, service):
log.info("Verifying if service %s is active on host %s", service, self.name)
output = self.run_command(f"sudo systemctl is-active {service}.service || true")
return output.strip() == "active"
def set_boot_order(self, cd_first=False, cdrom_iso_path=None) -> None:
log.info("Setting boot order with cd_first=%s on %s", cd_first, self.name)
self.node_controller.set_boot_order(node_name=self.name, cd_first=cd_first, cdrom_iso_path=cdrom_iso_path)
def set_per_device_boot_order(self, key: Callable[[Disk], int]):
log.info("Setting boot order on %s", self.name)
self.node_controller.set_per_device_boot_order(node_name=self.name, key=key)
def set_boot_order_flow(self, cd_first=False, start=True):
log.info("Setting boot order , cd_first=%s, start=%s", cd_first, start)
self.shutdown()
self.set_boot_order(cd_first)
if start:
self.start()
def get_host_id(self):
return self.node_controller.get_host_id(self.name)
def get_cpu_cores(self):
return self.node_controller.get_cpu_cores(self.name)
def set_cpu_cores(self, core_count):
self.node_controller.set_cpu_cores(self.name, core_count)
def reset_cpu_cores(self):
self.set_cpu_cores(self.original_vcpu_count)
def get_ram_kib(self):
return self.node_controller.get_ram_kib(self.name)
def set_ram_kib(self, ram_kib):
self.node_controller.set_ram_kib(self.name, ram_kib)
def reset_ram_kib(self):
self.set_ram_kib(self.original_ram_kib)
def get_disks(self):
return self.node_controller.list_disks(self.name)
def attach_test_disk(self, disk_size, **kwargs):
return self.node_controller.attach_test_disk(self.name, disk_size, **kwargs)
def detach_all_test_disks(self):
self.node_controller.detach_all_test_disks(self.name)
def attach_interface(self, network_xml, target_interface=consts.TEST_TARGET_INTERFACE):
return self.node_controller.attach_interface(self.name, network_xml, target_interface)
def add_interface(self, network_name, target_interface=consts.TEST_TARGET_INTERFACE):
return self.node_controller.add_interface(self.name, network_name, target_interface)
def create_network(self, network_xml):
return self.node_controller.create_network(network_xml)
def get_network_by_name(self, network_name):
return self.node_controller.get_network_by_name(network_name)
def destroy_network(self, network):
self.node_controller.destroy_network(network)
def undefine_interface(self, mac):
self.node_controller.undefine_interface(self.name, mac)
|
2,331 |
test categories children
|
import graphene
import pytest
from .....product.models import Category
from ....tests.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_category_view(api_client, category_with_products, count_queries, channel_USD):
query = """
fragment BasicProductFields on Product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductPricingField on Product {
pricing {
onSale
priceRangeUndiscounted {
start {
...Price
}
stop {
...Price
}
}
priceRange {
start {
...Price
}
stop {
...Price
}
}
}
}
query Category($id: ID!, $pageSize: Int, $channel: String) {
products (
first: $pageSize,
filter: {categories: [$id]},
channel: $channel
) {
totalCount
edges {
node {
...BasicProductFields
...ProductPricingField
category {
id
name
}
}
}
pageInfo {
endCursor
hasNextPage
hasPreviousPage
startCursor
}
}
category(id: $id) {
seoDescription
seoTitle
id
name
backgroundImage {
url
}
children(first: 10) {
edges {
node {
id
name
}
}
}
ancestors(last: 5) {
edges {
node {
id
name
}
}
}
}
attributes(filter: {inCategory: $id}, channel: $channel, first: 100) {
edges {
node {
id
name
slug
choices(first: 10) {
edges {
node {
id
name
slug
}
}
}
}
}
}
}
"""
variables = {
"pageSize": 100,
"id": graphene.Node.to_global_id("Category", category_with_products.pk),
"channel": channel_USD.slug,
}
content = get_graphql_content(api_client.post_graphql(query, variables))
assert content["data"]["category"] is not None
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def METHOD_NAME(api_client, categories_with_children, count_queries):
query = """query categories {
categories(first: 30) {
edges {
node {
children(first: 30) {
edges {
node {
id
name
children(first: 30) {
edges {
node {
id
name
}
}
}
}
}
}
}
}
}
}"""
content = get_graphql_content(api_client.post_graphql(query))
assert content["data"]["categories"] is not None
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_category_delete(
staff_api_client,
category_with_products,
permission_manage_products,
settings,
count_queries,
):
query = """
mutation($id: ID!) {
categoryDelete(id: $id) {
category {
name
}
errors {
field
message
}
}
}
"""
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
category = category_with_products
variables = {"id": graphene.Node.to_global_id("Category", category.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["categoryDelete"]["errors"]
assert not errors
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_categories_for_federation_query_count(
api_client,
django_assert_num_queries,
count_queries,
):
categories = Category.objects.bulk_create(
[
Category(
name="category 1", slug="category-1", lft=0, rght=1, tree_id=0, level=0
),
Category(
name="category 2", slug="category-2", lft=2, rght=3, tree_id=0, level=0
),
Category(
name="category 3", slug="category-3", lft=4, rght=5, tree_id=0, level=0
),
]
)
query = """
query GetCategoryInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on Category {
id
name
}
}
}
"""
variables = {
"representations": [
{
"__typename": "Category",
"id": graphene.Node.to_global_id("Category", categories[0].pk),
},
],
}
with django_assert_num_queries(1):
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert len(content["data"]["_entities"]) == 1
variables = {
"representations": [
{
"__typename": "Category",
"id": graphene.Node.to_global_id("Category", category.pk),
}
for category in categories
],
}
with django_assert_num_queries(1):
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert len(content["data"]["_entities"]) == 3
|
2,332 |
test add providers
|
import pytest
from mimesis import BaseProvider, Generic
class TestGeneric:
def test_reseed(self, generic):
generic.reseed(0xFFF)
number_1 = generic.random.uniform(0, 1000)
address_1 = generic.address.address()
generic.reseed(0xFFF)
number_2 = generic.random.uniform(0, 1000)
address_2 = generic.address.address()
assert number_1 == number_2
assert address_1 == address_2
def test_str(self, generic):
assert str(generic).startswith("Generic")
def test_base_person(self, generic):
result = generic.person.username()
assert result is not None
def test_base_text(self, generic):
result = generic.text.words()
assert result is not None
def test_base_payment(self, generic):
result = generic.payment.bitcoin_address()
assert result is not None
def test_base_address(self, generic):
result = generic.address.address()
assert result is not None
def test_base_food(self, generic):
result = generic.food.fruit()
assert result is not None
def test_base_finance(self, generic):
result = generic.finance.currency_symbol()
assert result is not None
def test_base_code(self, generic):
result = generic.code.isbn()
assert result is not None
def test_base_binary_file(self, generic):
result = generic.binaryfile.video()
assert isinstance(result, bytes)
def test_bad_argument(self, generic):
with pytest.raises(AttributeError):
_ = generic.bad_argument # noqa
def METHOD_NAME(self, generic):
class Provider1(BaseProvider):
@staticmethod
def one():
return 1
class Provider2(BaseProvider):
class Meta:
name = "custom_provider"
@staticmethod
def two():
return 2
class Provider3(BaseProvider):
@staticmethod
def three():
return 3
class Provider4:
@staticmethod
def empty():
...
class Provider5(BaseProvider):
@staticmethod
def five():
return 5
generic.add_providers(Provider1, Provider2, Provider3)
assert generic.provider1.one() == 1
assert generic.custom_provider.two() == 2
assert generic.provider3.three() == 3
generic += Provider5
assert generic.provider5.five() == 5
with pytest.raises(TypeError):
generic.add_providers(Provider4)
with pytest.raises(TypeError):
generic.add_providers(3)
class UnnamedProvider(BaseProvider):
@staticmethod
def nothing():
return None
generic.add_provider(UnnamedProvider)
assert generic.unnamedprovider.nothing() is None
def test_add_provider_generic_to_generic(self, generic):
with pytest.raises(TypeError):
generic.add_provider(Generic)
def test_add_providers_generic_to_generic(self, generic):
with pytest.raises(TypeError):
generic.add_providers(Generic)
def test_add_provider(self, generic):
class CustomProvider(BaseProvider):
def __init__(self, seed, a, b, c):
super().__init__(seed=seed)
self.a = a
self.b = b
self.c = c
class Meta:
name = "custom_provider"
generic.add_provider(CustomProvider, a="a", b="b", c="c", seed=0xFFF)
# See https://github.com/lk-geimfari/mimesis/issues/1172
assert generic.custom_provider.seed != 0xFFF
assert generic.custom_provider.seed == generic.seed
assert generic.custom_provider.a == "a"
assert generic.custom_provider.b == "b"
assert generic.custom_provider.c == "c"
def test_dir(self, generic):
providers = generic.__dir__()
for p in providers:
assert not p.startswith("_")
class TestSeededGeneric:
@pytest.fixture
def g1(self, seed):
return Generic(seed=seed)
@pytest.fixture
def g2(self, seed):
return Generic(seed=seed)
def test_generic_address(self, g1, g2):
assert g1.address.street_number() == g2.address.street_number()
assert g1.address.street_name() == g2.address.street_name()
def test_generic_finance(self, g1, g2):
assert g1.finance.company() == g2.finance.company()
def test_generic_code(self, g1, g2):
assert g1.code.locale_code() == g2.code.locale_code()
assert g1.code.issn() == g2.code.issn()
def test_generic_cryptographic(self, g1, g2):
assert g1.cryptographic.uuid() != g2.cryptographic.uuid()
assert g1.cryptographic.hash() != g2.cryptographic.hash()
def test_generic_datetime(self, g1, g2):
assert g1.datetime.week_date() == g2.datetime.week_date()
assert g1.datetime.day_of_week() == g2.datetime.day_of_week()
def test_generic_development(self, g1, g2):
sl1 = g1.development.software_license()
sl2 = g2.development.software_license()
assert sl1 == sl2
def test_generic_file(self, g1, g2):
assert g1.file.size() == g2.file.size()
assert g1.file.file_name() == g2.file.file_name()
def test_generic_food(self, g1, g2):
assert g1.food.dish() == g2.food.dish()
assert g1.food.spices() == g2.food.spices()
def test_generic_hardware(self, g1, g2):
assert g1.hardware.screen_size() == g2.hardware.screen_size()
assert g1.hardware.cpu() == g2.hardware.cpu()
def test_generic_internet(self, g1, g2):
assert g1.internet.content_type() == g2.internet.content_type()
def test_generic_numbers(self, g1, g2):
assert g1.numeric.integers() == g2.numeric.integers()
def test_generic_path(self, g1, g2):
assert g1.path.root() == g2.path.root()
assert g1.path.home() == g2.path.home()
def test_generic_payment(self, g1, g2):
assert g1.payment.cid() == g2.payment.cid()
assert g1.payment.paypal() == g2.payment.paypal()
def test_generic_person(self, g1, g2):
assert g1.person.age() == g2.person.age()
assert g1.person.name() == g2.person.name()
def test_generic_science(self, g1, g2):
assert g1.science.rna_sequence() == g2.science.rna_sequence()
def test_generic_transport(self, g1, g2):
assert g1.transport.airplane() == g2.transport.airplane()
|
2,333 |
generate
|
from conan import ConanFile
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.build import cross_building
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import (
apply_conandata_patches,
copy,
export_conandata_patches,
get,
rename,
rm,
rmdir
)
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conan.tools.layout import basic_layout
from conan.tools.microsoft import is_msvc, unix_path
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class LibiconvConan(ConanFile):
name = "libiconv"
description = "Convert text to and from Unicode"
license = ("LGPL-2.0-or-later", "LGPL-2.1-or-later")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.gnu.org/software/libiconv/"
topics = ("iconv", "text", "encoding", "locale", "unicode", "conversion")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _is_clang_cl(self):
return self.settings.compiler == "clang" and self.settings.os == "Windows" and \
self.settings.compiler.get_safe("runtime")
@property
def _msvc_tools(self):
return ("clang-cl", "llvm-lib", "lld-link") if self._is_clang_cl else ("cl", "lib", "link")
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
if Version(self.version) >= "1.17":
self.license = "LGPL-2.1-or-later"
else:
self.license = "LGPL-2.0-or-later"
def layout(self):
basic_layout(self, src_folder="src")
def build_requirements(self):
if self._settings_build.os == "Windows":
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
self.win_bash = True
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def METHOD_NAME(self):
env = VirtualBuildEnv(self)
env.METHOD_NAME()
tc = AutotoolsToolchain(self)
msvc_version = {"Visual Studio": "12", "msvc": "180"}
if is_msvc(self) and Version(self.settings.compiler.version) >= msvc_version[str(self.settings.compiler)]:
# https://github.com/conan-io/conan/issues/6514
tc.extra_cflags.append("-FS")
if cross_building(self) and is_msvc(self):
triplet_arch_windows = {"x86_64": "x86_64", "x86": "i686", "armv8": "aarch64"}
# ICU doesn't like GNU triplet of conan for msvc (see https://github.com/conan-io/conan/issues/12546)
host_arch = triplet_arch_windows.get(str(self.settings.arch))
build_arch = triplet_arch_windows.get(str(self._settings_build.arch))
if host_arch and build_arch:
host = f"{host_arch}-w64-mingw32"
build = f"{build_arch}-w64-mingw32"
tc.configure_args.extend([
f"--host={host}",
f"--build={build}",
])
env = tc.environment()
if is_msvc(self) or self._is_clang_cl:
cc, lib, link = self._msvc_tools
build_aux_path = os.path.join(self.source_folder, "build-aux")
lt_compile = unix_path(self, os.path.join(build_aux_path, "compile"))
lt_ar = unix_path(self, os.path.join(build_aux_path, "ar-lib"))
env.define("CC", f"{lt_compile} {cc} -nologo")
env.define("CXX", f"{lt_compile} {cc} -nologo")
env.define("LD", link)
env.define("STRIP", ":")
env.define("AR", f"{lt_ar} {lib}")
env.define("RANLIB", ":")
env.define("NM", "dumpbin -symbols")
env.define("win32_target", "_WIN32_WINNT_VISTA")
tc.METHOD_NAME(env)
def build(self):
apply_conandata_patches(self)
autotools = Autotools(self)
autotools.configure()
autotools.make()
def package(self):
copy(self, "COPYING.LIB", self.source_folder, os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
autotools.install()
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
rmdir(self, os.path.join(self.package_folder, "share"))
fix_apple_shared_install_name(self)
if (is_msvc(self) or self._is_clang_cl) and self.options.shared:
for import_lib in ["iconv", "charset"]:
rename(self, os.path.join(self.package_folder, "lib", f"{import_lib}.dll.lib"),
os.path.join(self.package_folder, "lib", f"{import_lib}.lib"))
def package_info(self):
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_file_name", "Iconv")
self.cpp_info.set_property("cmake_target_name", "Iconv::Iconv")
self.cpp_info.libs = ["iconv", "charset"]
# TODO: to remove in conan v2
self.cpp_info.names["cmake_find_package"] = "Iconv"
self.cpp_info.names["cmake_find_package_multi"] = "Iconv"
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
|
2,334 |
test read submodule resource by name
|
import sys
import unittest
from . import data01
from . import zipdata01, zipdata02
from . import util
from importlib import resources, import_module
class ResourceTests:
# Subclasses are expected to set the `data` attribute.
def test_is_resource_good_path(self):
self.assertTrue(resources.is_resource(self.data, 'binary.file'))
def test_is_resource_missing(self):
self.assertFalse(resources.is_resource(self.data, 'not-a-file'))
def test_is_resource_subresource_directory(self):
# Directories are not resources.
self.assertFalse(resources.is_resource(self.data, 'subdirectory'))
def test_contents(self):
contents = set(resources.contents(self.data))
# There may be cruft in the directory listing of the data directory.
# Under Python 3 we could have a __pycache__ directory, and under
# Python 2 we could have .pyc files. These are both artifacts of the
# test suite importing these modules and writing these caches. They
# aren't germane to this test, so just filter them out.
contents.discard('__pycache__')
contents.discard('__init__.pyc')
contents.discard('__init__.pyo')
self.assertEqual(contents, {
'__init__.py',
'subdirectory',
'utf-8.file',
'binary.file',
'utf-16.file',
})
class ResourceDiskTests(ResourceTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase):
pass
class ResourceLoaderTests(unittest.TestCase):
def test_resource_contents(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C'])
self.assertEqual(
set(resources.contents(package)),
{'A', 'B', 'C'})
def test_resource_is_resource(self):
package = util.create_package(
file=data01, path=data01.__file__,
contents=['A', 'B', 'C', 'D/E', 'D/F'])
self.assertTrue(resources.is_resource(package, 'B'))
def test_resource_directory_is_not_resource(self):
package = util.create_package(
file=data01, path=data01.__file__,
contents=['A', 'B', 'C', 'D/E', 'D/F'])
self.assertFalse(resources.is_resource(package, 'D'))
def test_resource_missing_is_not_resource(self):
package = util.create_package(
file=data01, path=data01.__file__,
contents=['A', 'B', 'C', 'D/E', 'D/F'])
self.assertFalse(resources.is_resource(package, 'Z'))
class ResourceCornerCaseTests(unittest.TestCase):
def test_package_has_no_reader_fallback(self):
# Test odd ball packages which:
# 1. Do not have a ResourceReader as a loader
# 2. Are not on the file system
# 3. Are not in a zip file
module = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C'])
# Give the module a dummy loader.
module.__loader__ = object()
# Give the module a dummy origin.
module.__file__ = '/path/which/shall/not/be/named'
if sys.version_info >= (3,):
module.__spec__.loader = module.__loader__
module.__spec__.origin = module.__file__
self.assertFalse(resources.is_resource(module, 'A'))
class ResourceFromZipsTest(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata02 # type: ignore
def test_unrelated_contents(self):
# https://gitlab.com/python-devs/importlib_resources/issues/44
#
# Here we have a zip file with two unrelated subpackages. The bug
# reports that getting the contents of a resource returns unrelated
# files.
self.assertEqual(
set(resources.contents('ziptestdata.one')),
{'__init__.py', 'resource1.txt'})
self.assertEqual(
set(resources.contents('ziptestdata.two')),
{'__init__.py', 'resource2.txt'})
class SubdirectoryResourceFromZipsTest(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata01 # type: ignore
def test_is_submodule_resource(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertTrue(
resources.is_resource(submodule, 'binary.file'))
def METHOD_NAME(self):
self.assertTrue(
resources.is_resource('ziptestdata.subdirectory', 'binary.file'))
def test_submodule_contents(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertEqual(
set(resources.contents(submodule)),
{'__init__.py', 'binary.file'})
def test_submodule_contents_by_name(self):
self.assertEqual(
set(resources.contents('ziptestdata.subdirectory')),
{'__init__.py', 'binary.file'})
class NamespaceTest(unittest.TestCase):
def test_namespaces_cannot_have_resources(self):
contents = resources.contents('test.test_importlib.data03.namespace')
self.assertFalse(list(contents))
# Even though there is a file in the namespace directory, it is not
# considered a resource, since namespace packages can't have them.
self.assertFalse(resources.is_resource(
'test.test_importlib.data03.namespace',
'resource1.txt'))
# We should get an exception if we try to read it or open it.
self.assertRaises(
FileNotFoundError,
resources.open_text,
'test.test_importlib.data03.namespace', 'resource1.txt')
self.assertRaises(
FileNotFoundError,
resources.open_binary,
'test.test_importlib.data03.namespace', 'resource1.txt')
self.assertRaises(
FileNotFoundError,
resources.read_text,
'test.test_importlib.data03.namespace', 'resource1.txt')
self.assertRaises(
FileNotFoundError,
resources.read_binary,
'test.test_importlib.data03.namespace', 'resource1.txt')
if __name__ == '__main__':
unittest.main()
|
2,335 |
build start state
|
# Copyright © 2012-2023 Forschungszentrum Jülich GmbH
# SPDX-License-Identifier: LGPL-3.0-or-later
import math
from pathlib import Path
from jupedsim_visualizer.geometry import Geometry
from jupedsim_visualizer.replay_widget import ReplayWidget
from jupedsim_visualizer.trajectory import Trajectory
from jupedsim_visualizer.view_geometry_widget import ViewGeometryWidget
from PySide6.QtCore import QSettings, QSize
from PySide6.QtStateMachine import QFinalState, QState, QStateMachine
from PySide6.QtWidgets import (
QApplication,
QFileDialog,
QMainWindow,
QMessageBox,
QTabWidget,
)
import jupedsim as jps
from jupedsim.recording import Recording
from jupedsim.serialization import parse_wkt
from jupedsim.util import build_jps_geometry
class MainWindow(QMainWindow):
def __init__(self, parent=None) -> None:
QMainWindow.__init__(self, parent)
self.settings = QSettings("jupedsim", "jupedsim_visualizer")
self.setWindowTitle("jupedsim_visualizer")
self._build_central_tabs_widget()
self._build_menu_bar()
self._build_state_machine()
self.setVisible(True)
def _build_central_tabs_widget(self):
tabs = QTabWidget(self)
tabs.setMinimumSize(QSize(640, 480))
tabs.setMovable(True)
tabs.setDocumentMode(True)
tabs.setTabsClosable(True)
tabs.setTabBarAutoHide(True)
tabs.tabCloseRequested.connect(tabs.removeTab)
self.setCentralWidget(tabs)
self.tabs = tabs
def _build_menu_bar(self) -> None:
menu = self.menuBar()
open_menu = menu.addMenu("File")
open_wkt_act = open_menu.addAction("Open wkt file")
open_wkt_act.triggered.connect(self._open_wkt)
open_replay_act = open_menu.addAction("Open replay file")
open_replay_act.triggered.connect(self._open_replay)
settings_menu = menu.addMenu("Settings")
self._show_triangulation = settings_menu.addAction(
"show triangulation"
)
self._show_triangulation.setCheckable(True)
self._show_triangulation.toggled.connect(self._toggle_triangulation)
self._show_triangulation.setChecked(
bool(
self.settings.value(
"show_triangulation", type=bool, defaultValue=False
)
)
)
def _build_state_machine(self) -> None:
sm = QStateMachine(self)
sm.finished.connect(QApplication.quit)
start = self.METHOD_NAME()
sm.addState(start)
exit = self._build_exit_state()
sm.addState(exit)
# start.addTransition(self.button.clicked, exit)
sm.setInitialState(start)
sm.start()
self.state_machine = sm
def METHOD_NAME(self) -> QState:
state = QState()
return state
def _build_show_wkt_state(self) -> QState:
state = QState()
return state
def _build_exit_state(self) -> QFinalState:
state = QFinalState()
return state
def _toggle_triangulation(self, state: bool) -> None:
self.settings.setValue("show_triangulation", state)
for idx in range(self.tabs.count()):
self.tabs.widget(idx).geo.show_triangulation(state)
self.repaint()
def _open_wkt(self):
base_path_obj = self.settings.value(
"files/last_wkt_location",
type=str,
defaultValue=Path("~").expanduser(),
)
base_path = Path(str(base_path_obj))
file, _ = QFileDialog.getOpenFileName(
self, caption="Open WKT file", dir=str(base_path)
)
if not file:
return
file = Path(file)
self.settings.setValue("files/last_wkt_location", str(file.parent))
try:
wkt = parse_wkt(Path(file).read_text(encoding="UTF-8"))
navi = jps.RoutingEngine(build_jps_geometry(wkt))
xmin, ymin, xmax, ymax = wkt.bounds
info_text = f"Dimensions: {math.ceil(xmax - xmin)}m x {math.ceil(ymax - ymin)}m Triangles: {len(navi.mesh())}"
name_text = f"Geometry: {file}"
self.setUpdatesEnabled(False)
geo = Geometry(navi)
geo.show_triangulation(self._show_triangulation.isChecked())
tab = ViewGeometryWidget(
navi, geo, name_text, info_text, parent=self
)
tab_idx = self.tabs.insertTab(0, tab, file.name)
self.tabs.setCurrentIndex(tab_idx)
self.setUpdatesEnabled(True)
except Exception as e:
QMessageBox.critical(
self,
"Error importing WKT geometry",
f"Error importing WKT geometry:\n{e}",
)
return
def _open_replay(self):
base_path_obj = self.settings.value(
"files/last_replay_location",
type=str,
defaultValue=Path("~").expanduser(),
)
base_path = Path(str(base_path_obj))
file, _ = QFileDialog.getOpenFileName(
self, caption="Open recording", dir=str(base_path)
)
if not file:
return
file = Path(file)
self.settings.setValue("files/last_replay_location", str(file.parent))
try:
rec = Recording(file.as_posix())
self.setUpdatesEnabled(False)
navi = jps.RoutingEngine(build_jps_geometry(rec.geometry()))
geo = Geometry(navi)
geo.show_triangulation(self._show_triangulation.isChecked())
trajectory = Trajectory(rec)
tab = ReplayWidget(navi, rec, geo, trajectory, parent=self)
tab_idx = self.tabs.insertTab(0, tab, file.name)
self.tabs.setCurrentIndex(tab_idx)
self.setUpdatesEnabled(True)
self.update()
except Exception as e:
QMessageBox.critical(
self,
"Error importing simulation recording",
f"Error importing simulation recording:\n{e}",
)
return
|
2,336 |
path prettify
|
# -*- coding: utf-8 -*-
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <[email protected]>
# @copyright Copyright (c) 2009-2023 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
__all__ = [
"camelcase_to_underscore",
"truncate",
"is_json",
"is_uuid4",
"is_fs_uuid",
]
import json
import re
import uuid
def camelcase_to_underscore(value):
return (
re.sub("(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))", "_\\1", value)
.lower()
.strip("_")
)
def truncate(value, max_len):
return value[:max_len] + (value[max_len:] and "...")
def is_json(value):
"""
Finds out whether a string is JSON.
:param value: The string being evaluated.
:type value: str
:return: Returns ``True`` if the string is JSON, otherwise ``False``.
:rtype: bool
"""
if not isinstance(value, str):
return False
try:
_ = json.loads(value)
except ValueError:
return False
return True
def is_uuid4(value):
"""
Finds out whether a variable is an UUID v4.
:param value: The variable being evaluated.
:type value: str
:return: Returns ``True`` if the variable is an UUIDv4,
otherwise ``False``.
:rtype: bool
"""
if not isinstance(value, str):
return False
try:
_ = uuid.UUID(value, version=4)
except ValueError:
return False
return True
def is_fs_uuid(value):
"""
Finds out whether a variable is a filesystem UUID.
Example:
- 78b669c1-9183-4ca3-a32c-80a4e2c61e2d (EXT2/3/4, JFS, XFS)
- 7A48-BA97 (FAT)
- 2ED43920D438EC29 (NTFS)
- 2015-01-13-21-48-46-00 (ISO9660)
See http://wiki.ubuntuusers.de/UUID
:param value: The variable being evaluated.
:type value: str
:return: Returns ``True`` if the variable is a filesystem UUID,
otherwise ``False``.
:rtype: bool
"""
if not isinstance(value, str):
return False
# Check if it is an UUID v4.
if is_uuid4(value):
return True
# Check if it is a NTFS, FAT or ISO9660 filesystem identifier.
return None != re.match(
r'^([a-f0-9]{4}-[a-f0-9]{4}|[a-f0-9]{16}|'
'[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2})$',
value,
flags=re.IGNORECASE,
)
def escape_blank(value, octal=False):
"""
Escape a string to be used in a shell environment. Blank characters
will be replaced with their hexadecimal (\x20) or octal (\040)
representation.
Example:
- /srv/dev-disk-by-label-xx yy => /srv/dev-disk-by-label-xx\x20yy
- /srv/dev-disk-by-label-xx yy => /srv/dev-disk-by-label-xx\040yy
:param value: The value that will be escaped.
:type value: str
:param octal: If ``True``, convert to octal values, otherwise
hexadecimal. Defaults to ``False``.
:type octal: bool
:return: The escaped string.
:rtype: str
"""
return value.replace(' ', '\\040' if octal else '\\x20')
def unescape_blank(value, octal=False):
"""
Unescape a string. A hexadecimal (\x20) or octal (\040) blank will
be replaced by their ASCII representation.
Example:
- /srv/dev-disk-by-label-xx\x20yy => /srv/dev-disk-by-label-xx yy
- /srv/dev-disk-by-label-xx\040yy => /srv/dev-disk-by-label-xx yy
:param value: The value that will be unescaped.
:type value: str
:param octal: If ``True``, convert octal values, otherwise
hexadecimal. Defaults to ``False``.
:type octal: bool
:return: The unescaped string.
:rtype: str
"""
return value.replace('\\040' if octal else '\\x20', ' ')
def binary_format(
value, precision=2, origin_unit='B', max_unit='YiB', return_json=False
):
"""
Convert a value into the highest possible binary unit.
:param value: The number to convert (per default this is in Bytes).
:type value: str|int
:param precision: Defaults to 2.
:param origin_unit: Defaults to ``B``.
:param max_unit: Defaults to ``YiB``.
:param return_json: Return value and unit as JSON object.
Defaults to ``False``.
:return: The converted string value including the unit or dictionary
with the keys ``value`` and ``unit``.
:rtype: str
"""
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
value = float(value)
exp = units.index(origin_unit)
max_exp = units.index(max_unit)
while value >= 1024 and exp < max_exp:
exp += 1
value = value / 1024
if not return_json:
result = '{:.{prec}f} {}'.format(value, units[exp], prec=precision)
else:
result = {'value': value, 'unit': units[exp]}
return result
def METHOD_NAME(path):
"""
Make sure the directory path ends with a slash.
>>> assert path_prettify('/foo/bar') == '/foo/bar/'
>>> assert path_prettify('/foo/bar//') == '/foo/bar/'
:param path: The path to process.
:type path: str
:return: Returns the prettified path.
:rtype: str
"""
assert isinstance(path, str)
return '{}/'.format(path.rstrip('/'))
def add_slashes(value):
"""
Prefix certain characters of a string with '\'.
These characters are:
* backslash
* single quote
* double quote
* dollar sign
* backtick
:param value: The string to be escaped.
:type value: str
:return: Returns a string with backslashes added before characters
that need to be escaped.
:rtype: str
"""
assert isinstance(value, str)
for i in ['\\', '\'', '"', '$', '`']:
value = value.replace(i, '\\{}'.format(i))
return value
def yesno(value: bool, answers='yes,no') -> str:
"""
Convert a boolean value to 'yes' or 'no'.
:return: Returns 'yes' if the value is true, otherwise 'no'.
"""
assert isinstance(value, bool)
assert isinstance(answers, str)
parts = answers.split(',')
return parts[0] if value else parts[1]
|
2,337 |
best effort removal
|
# -*- coding: utf-8 -*-
# vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:textwidth=0:
import errno
import os
import os.path
import shutil
import stat
import subprocess
import time
from . import exception
from .trace_decorator import getLog, traceLog
@traceLog()
def mkdirIfAbsent(*args):
for dirName in args:
getLog().debug("ensuring that dir exists: %s", dirName)
try:
os.makedirs(dirName)
getLog().debug("created dir: %s", dirName)
except OSError as e:
if e.errno != errno.EEXIST:
getLog().exception("Could not create dir %s. Error: %s", dirName, e)
raise exception.Error("Could not create dir %s. Error: %s" % (dirName, e))
@traceLog()
def touch(fileName):
getLog().debug("touching file: %s", fileName)
open(fileName, 'a').close()
@traceLog()
def rmtree(path, selinux=False, exclude=()):
"""Version of shutil.rmtree that ignores no-such-file-or-directory errors,
tries harder if it finds immutable files and supports excluding paths"""
if os.path.islink(path):
raise OSError("Cannot call rmtree on a symbolic link: %s" % path)
try_again = True
retries = 10
failed_to_handle = False
failed_filename = None
if path in exclude:
return
while try_again:
try_again = False
try:
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
if fullname not in exclude:
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
rmtree(fullname, selinux=selinux, exclude=exclude)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY):
# we already tried handling this on lower level and failed,
# there's no point in trying again now
failed_to_handle = True
raise
else:
os.remove(fullname)
os.rmdir(path)
except OSError as e:
if failed_to_handle:
raise
if e.errno == errno.ENOENT: # no such file or directory
pass
elif e.errno == errno.ENOTEMPTY: # there's something left
if exclude: # but it is excluded
pass
else: # likely during Ctrl+C something additional data
try_again = True
retries -= 1
if retries <= 0:
raise
time.sleep(2)
elif selinux and (e.errno == errno.EPERM or e.errno == errno.EACCES):
try_again = True
if failed_filename == e.filename:
raise
failed_filename = e.filename
os.system("chattr -R -i %s" % path)
elif e.errno == errno.EBUSY:
retries -= 1
if retries <= 0:
raise
try_again = True
getLog().debug("retrying failed tree remove after sleeping a bit")
time.sleep(2)
else:
raise
def is_in_dir(path, directory):
"""Tests whether `path` is inside `directory`."""
# use realpath to expand symlinks
path = os.path.realpath(path)
directory = os.path.realpath(directory)
return os.path.commonprefix([path, directory]) == directory
def get_fs_type(path):
cmd = ['/bin/stat', '-f', '-L', '-c', '%T', path]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,
universal_newlines=True)
p.wait()
with p.stdout as f:
return f.readline().strip()
def find_non_nfs_dir():
dirs = ('/dev/shm', '/run', '/tmp', '/usr/tmp', '/')
for d in dirs:
if not get_fs_type(d).startswith('nfs'):
return d
raise exception.Error('Cannot find non-NFS directory in: %s' % dirs)
def unlink_if_exists(path):
"""
Unlink, ignore FileNotFoundError, but keep raising other exceptions.
"""
try:
os.unlink(path)
except FileNotFoundError:
pass
def METHOD_NAME(path, use_rmtree=True):
try:
os.unlink(path)
except OSError:
pass
if not use_rmtree:
return
try:
shutil.rmtree(path)
except OSError:
pass
def update_tree(dest, src):
"""
Copy files from SRC directory into DEST, recursively. The DEST directory
is created, including subdirectories (if not existent). The files in DEST
are created or updated (shutil.copy2). If file is about to replace
directory or vice versa, it is done without asking. Files that are in DEST
and not in SRC are kept untouched.
"""
getLog().debug("Updating files in %s with files from %s", dest, src)
mkdirIfAbsent(dest)
for dirpath, dirnames, filenames in os.walk(src):
raw_subpath = os.path.relpath(dirpath, src)
subpath = os.path.normpath(raw_subpath)
destpath = os.path.join(dest, subpath)
for filename in filenames:
file_from = os.path.join(dirpath, filename)
file_to = os.path.join(destpath, filename)
METHOD_NAME(file_to)
shutil.copy2(file_from, file_to)
for subdir in dirnames:
dest_subdir = os.path.join(destpath, subdir)
METHOD_NAME(dest_subdir, use_rmtree=False)
mkdirIfAbsent(dest_subdir)
|
2,338 |
read stdin lines
|
from pathlib import Path
from sys import stdin
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
PathLike = Union[str, Path]
RawDoc = Union[str, bytes]
try:
from ruamel.yaml import YAML
_YAML_C = YAML(typ="safe", pure=False)
except ImportError:
_YAML_C = None
def _parse_yaml_yaml(s: str) -> Dict[str, Any]:
# pylint: disable=import-outside-toplevel
import yaml
return yaml.load(s, Loader=getattr(yaml, "CSafeLoader", yaml.SafeLoader))
def _parse_yaml_ruamel(s: str) -> Dict[str, Any]:
return _YAML_C.load(s)
parse_yaml = _parse_yaml_yaml if _YAML_C is None else _parse_yaml_ruamel
def _guess_is_file(s: str):
try:
return Path(s).exists()
except IOError:
return False
def parse_yaml_file_or_inline(s: str) -> Dict[str, Any]:
"""
Accept on input either a path to yaml file or yaml text, return parsed yaml document.
"""
if _guess_is_file(s):
txt = slurp(s, binary=False)
assert isinstance(txt, str)
else:
txt = s
result = parse_yaml(txt)
if isinstance(result, str):
raise IOError(f"No such file: {s}")
return result
def METHOD_NAME(skip_empty: bool = False) -> Iterator[str]:
"""Read lines from stdin.
Returns iterator of lines with any whitespace trimmed.
skip_empty - when True whitespace only lines will be omitted
"""
pred = {True: lambda s: len(s) > 0, False: lambda s: True}[skip_empty]
for line in stdin:
line = line.strip()
if pred(line):
yield line
def slurp(fname: PathLike, binary: bool = False) -> RawDoc:
"""fname -> str|bytes.
binary=True -- read bytes not text
"""
mode = "rb" if binary else "rt"
with open(fname, mode) as f:
return f.read()
def slurp_lines(fname: str, *args, **kwargs) -> List[str]:
"""file path -> [lines]"""
if len(args) > 0 or len(kwargs) > 0:
fname = fname.format(*args, **kwargs)
with open(fname, "rt") as f:
return [s.rstrip() for s in f.readlines()]
def read_int(path: PathLike, default=None, base=10) -> Optional[int]:
"""
Read single integer from a text file.
Useful for things like parsing content of /sys/ or /proc.
"""
try:
return int(slurp(path), base)
except (FileNotFoundError, ValueError):
return default
def parse_mtl(txt: str) -> Dict[str, Any]:
def parse_value(s):
if len(s) == 0:
return s
if s[0] == '"':
return s.strip('"')
for parser in [int, float]:
try:
return parser(s)
except ValueError:
pass
return s
def tokenize(lines):
if isinstance(lines, str):
lines = lines.splitlines()
for lineno, s in enumerate(lines):
if len(s) == 0:
continue
i = s.find("=")
if i < 0:
if s.strip() == "END":
break
raise ValueError("Can not parse:[%d]: %s" % (lineno, s))
k = s[:i].strip()
v = s[i + 1 :].strip()
yield (k, v)
tree: Dict[str, Any] = {}
node, name = tree, None
nodes = []
for k, v in tokenize(txt):
if k == "GROUP":
nodes.append((node, name))
parent, node, name = node, {}, v
if name in parent:
raise ValueError("Repeated key: %s" % name)
parent[name] = node
elif k == "END_GROUP":
if len(nodes) == 0:
raise ValueError("Bad END_GROUP: too many")
if name != v:
raise ValueError("Bad END_GROUP: bad name")
node, name = nodes.pop()
else:
if k in node:
raise ValueError("Repeated key: %s" % k)
node[k] = parse_value(v)
return tree
def split_and_check(
s: str, separator: str, n: Union[int, Tuple[int, ...]]
) -> Tuple[str, ...]:
"""Turn string into tuple, checking that there are exactly as many parts as expected.
:param s: String to parse
:param separator: Separator character
:param n: Expected number of parts, can be a single integer value or several,
example `(2, 3)` accepts 2 or 3 parts.
"""
if isinstance(n, int):
n = (n,)
parts = s.split(separator)
if len(parts) not in n:
raise ValueError('Failed to parse "{}"'.format(s))
return tuple(parts)
def parse_range_int(s: str, separator: str = ":") -> Tuple[int, int]:
"""Parse str(<int>:<int>) -> (int, int)"""
try:
_in, _out = (int(x) for x in split_and_check(s, separator, 2))
except ValueError:
raise ValueError(
'Expect <int>{}<int> syntax, got "{}"'.format(separator, s)
) from None
return (_in, _out)
def parse_range2d_int(s: str) -> Tuple[Tuple[int, int], Tuple[int, int]]:
"""Parse string like "0:3,4:5" -> ((0,3), (4,5))"""
try:
a, b = (parse_range_int(p, ":") for p in split_and_check(s, ",", 2))
except ValueError:
raise ValueError(
'Expect <int>:<int>,<int>:<int> syntax, got "{}"'.format(s)
) from None
return a, b
# pylint: disable=import-outside-toplevel,inconsistent-return-statements
def click_range2d(ctx, param, value):
"""
@click.option('--range', callback=click_range2d)
"""
import click
if value is not None:
try:
return parse_range2d_int(value)
except ValueError as e:
raise click.ClickException(str(e)) from None
def parse_slice(s: str) -> slice:
"""
Parse slice syntax in the form start:stop[:step]
Examples "::4", "2:5", "2::10", "3:100:5"
"""
def parse(part: str) -> Optional[int]:
if part == "":
return None
return int(part)
try:
parts = [parse(p) for p in split_and_check(s, ":", (2, 3))]
except ValueError:
raise ValueError(f'Expect <start>:<stop>[:<step>] syntax, got "{s}"') from None
return slice(*parts)
# pylint: disable=import-outside-toplevel,inconsistent-return-statements
def click_slice(ctx, param, value):
"""
@click.option('--slice', callback=click_slice)
Examples "::4", "2:5", "2::10", "3:100:5"
"""
import click
if value is not None:
try:
return parse_slice(value)
except ValueError as e:
raise click.ClickException(str(e)) from None
|
2,339 |
test encrypt
|
import sys
from pathlib import Path
import aiofiles
import pytest
import unpaddedbase64
from Crypto import Random # nosec
from nio import EncryptionError
from nio.crypto import async_encrypt_attachment, decrypt_attachment
FILEPATH = "tests/data/test_bytes"
@pytest.mark.skipif(
sys.version_info < (3, 5),
reason="Python 3 specific asyncio tests",
)
@pytest.mark.asyncio
class TestClass:
async def _get_data_cypher_keys(self, data=b"Test bytes"):
*chunks, keys = [i async for i in async_encrypt_attachment(data)]
return (data, b"".join(chunks), keys)
async def METHOD_NAME(self, data=b"Test bytes", large=False):
_, ciphertext, keys = await self._get_data_cypher_keys(data)
plaintext = decrypt_attachment(
ciphertext,
keys["key"]["k"],
keys["hashes"]["sha256"],
keys["iv"],
)
assert plaintext == b"Test bytes" * (16384 if large else 1)
async def test_encrypt_large_bytes(self):
# Makes sure our bytes chunking in async_generator_from_data
# is working correctly
await self.METHOD_NAME(b"Test bytes" * 16384, large=True)
async def test_encrypt_str(self):
await self.METHOD_NAME(FILEPATH)
async def test_encrypt_path_object(self):
await self.METHOD_NAME(Path(FILEPATH))
async def test_encrypt_iterable(self):
await self.METHOD_NAME([b"Test ", b"bytes"])
async def test_encrypt_async_iterable(self):
async def async_gen():
yield b"Test "
yield b"bytes"
await self.METHOD_NAME(async_gen())
async def test_encrypt_file_object(self):
await self.METHOD_NAME(open(FILEPATH, "rb"))
async def test_encrypt_async_file_object(self):
await self.METHOD_NAME(await aiofiles.open(FILEPATH, "rb"))
async def test_encrypt_bad_argument_type(self):
with pytest.raises(TypeError):
await self.METHOD_NAME(123)
async def test_hash_verification(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
with pytest.raises(EncryptionError):
decrypt_attachment(
ciphertext,
keys["key"]["k"],
"Fake hash",
keys["iv"],
)
async def test_invalid_key(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
with pytest.raises(EncryptionError):
decrypt_attachment(
ciphertext,
"Fake key",
keys["hashes"]["sha256"],
keys["iv"],
)
async def test_invalid_iv(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
with pytest.raises(EncryptionError):
decrypt_attachment(
ciphertext,
keys["key"]["k"],
keys["hashes"]["sha256"],
"Fake iv",
)
async def test_short_key(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
with pytest.raises(EncryptionError):
decrypt_attachment(
ciphertext,
unpaddedbase64.encode_base64(b"Fake key", urlsafe=True),
keys["hashes"]["sha256"],
keys["iv"],
)
async def test_short_iv(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
plaintext = decrypt_attachment(
ciphertext,
keys["key"]["k"],
keys["hashes"]["sha256"],
unpaddedbase64.encode_base64(b"F" + b"\x00" * 8),
)
assert plaintext != data
async def test_fake_key(self):
data, ciphertext, keys = await self._get_data_cypher_keys()
fake_key = Random.new().read(32)
plaintext = decrypt_attachment(
ciphertext,
unpaddedbase64.encode_base64(fake_key, urlsafe=True),
keys["hashes"]["sha256"],
keys["iv"],
)
assert plaintext != data
|
2,340 |
xml decl handler
|
from _typeshed import Incomplete, ReadableBuffer, SupportsRead
from typing import Any, NoReturn
from xml.dom.minidom import Document, DOMImplementation, Node, TypeInfo
from xml.dom.xmlbuilder import DOMBuilderFilter, Options
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation: DOMImplementation | None
class ElementInfo:
tagName: Incomplete
def __init__(self, tagName, model: Incomplete | None = None) -> None: ...
def getAttributeType(self, aname) -> TypeInfo: ...
def getAttributeTypeNS(self, namespaceURI, localName) -> TypeInfo: ...
def isElementContent(self) -> bool: ...
def isEmpty(self) -> bool: ...
def isId(self, aname) -> bool: ...
def isIdNS(self, euri, ename, auri, aname) -> bool: ...
class ExpatBuilder:
document: Document # Created in self.reset()
curNode: Incomplete # Created in self.reset()
def __init__(self, options: Options | None = None) -> None: ...
def createParser(self): ...
def getParser(self): ...
def reset(self) -> None: ...
def install(self, parser) -> None: ...
def parseFile(self, file: SupportsRead[ReadableBuffer | str]) -> Document: ...
def parseString(self, string: str | ReadableBuffer) -> Document: ...
def start_doctype_decl_handler(self, doctypeName, systemId, publicId, has_internal_subset) -> None: ...
def end_doctype_decl_handler(self) -> None: ...
def pi_handler(self, target, data) -> None: ...
def character_data_handler_cdata(self, data) -> None: ...
def character_data_handler(self, data) -> None: ...
def start_cdata_section_handler(self) -> None: ...
def end_cdata_section_handler(self) -> None: ...
def entity_decl_handler(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName) -> None: ...
def notation_decl_handler(self, notationName, base, systemId, publicId) -> None: ...
def comment_handler(self, data) -> None: ...
def external_entity_ref_handler(self, context, base, systemId, publicId) -> int: ...
def first_element_handler(self, name, attributes) -> None: ...
def start_element_handler(self, name, attributes) -> None: ...
def end_element_handler(self, name) -> None: ...
def element_decl_handler(self, name, model) -> None: ...
def attlist_decl_handler(self, elem, name, type, default, required) -> None: ...
def METHOD_NAME(self, version, encoding, standalone) -> None: ...
class FilterVisibilityController:
filter: DOMBuilderFilter
def __init__(self, filter: DOMBuilderFilter) -> None: ...
def startContainer(self, node: Node) -> int: ...
def acceptNode(self, node: Node) -> int: ...
class FilterCrutch:
def __init__(self, builder) -> None: ...
class Rejecter(FilterCrutch):
def start_element_handler(self, *args: Any) -> None: ...
def end_element_handler(self, *args: Any) -> None: ...
class Skipper(FilterCrutch):
def start_element_handler(self, *args: Any) -> None: ...
def end_element_handler(self, *args: Any) -> None: ...
class FragmentBuilder(ExpatBuilder):
fragment: Incomplete | None
originalDocument: Incomplete
context: Incomplete
def __init__(self, context, options: Options | None = None) -> None: ...
class Namespaces:
def createParser(self): ...
def install(self, parser) -> None: ...
def start_namespace_decl_handler(self, prefix, uri) -> None: ...
def start_element_handler(self, name, attributes) -> None: ...
def end_element_handler(self, name) -> None: ...
class ExpatBuilderNS(Namespaces, ExpatBuilder): ...
class FragmentBuilderNS(Namespaces, FragmentBuilder): ...
class ParseEscape(Exception): ...
class InternalSubsetExtractor(ExpatBuilder):
subset: Any | None
def getSubset(self) -> Any | None: ...
def parseFile(self, file: SupportsRead[ReadableBuffer | str]) -> None: ... # type: ignore[override]
def parseString(self, string: str | ReadableBuffer) -> None: ... # type: ignore[override]
def start_doctype_decl_handler(self, name, publicId, systemId, has_internal_subset) -> None: ... # type: ignore[override]
def end_doctype_decl_handler(self) -> NoReturn: ...
def start_element_handler(self, name, attrs) -> NoReturn: ...
def parse(file: str | SupportsRead[ReadableBuffer | str], namespaces: bool = True): ...
def parseString(string: str | ReadableBuffer, namespaces: bool = True): ...
def parseFragment(file, context, namespaces: bool = True): ...
def parseFragmentString(string: str, context, namespaces: bool = True): ...
def makeBuilder(options: Options) -> ExpatBuilderNS | ExpatBuilder: ...
|
2,341 |
test s3 data auto flush one table
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, Union
from unittest.mock import ANY, MagicMock, call, patch
from destination_firebolt.writer import FireboltS3Writer, FireboltSQLWriter
from pytest import fixture, mark
@fixture
def connection() -> MagicMock:
return MagicMock()
@fixture
def sql_writer(connection: MagicMock) -> FireboltSQLWriter:
return FireboltSQLWriter(connection)
@fixture
@patch("destination_firebolt.writer.time", MagicMock(return_value=111))
@patch("destination_firebolt.writer.uuid4", MagicMock(return_value="dummy-uuid"))
def s3_writer(connection: MagicMock) -> FireboltS3Writer:
# Make sure S3FileSystem mock is reset each time
with patch("destination_firebolt.writer.fs.S3FileSystem", MagicMock()):
return FireboltS3Writer(connection, "dummy_bucket", "access_key", "secret_key", "us-east-1")
def test_sql_default(sql_writer: FireboltSQLWriter) -> None:
assert len(sql_writer._buffer) == 0
assert sql_writer.flush_interval == 1000
@mark.parametrize("writer", ["sql_writer", "s3_writer"])
def test_sql_create(connection: MagicMock, writer: Union[FireboltSQLWriter, FireboltS3Writer], request: Any) -> None:
writer = request.getfixturevalue(writer)
expected_query = """
CREATE FACT TABLE IF NOT EXISTS _airbyte_raw_dummy (
_airbyte_ab_id TEXT,
_airbyte_emitted_at TIMESTAMP,
_airbyte_data TEXT
)
PRIMARY INDEX _airbyte_ab_id
"""
writer.create_raw_table("dummy")
connection.cursor.return_value.execute.assert_called_once_with(expected_query)
def test_data_buffering(sql_writer: FireboltSQLWriter) -> None:
sql_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
sql_writer._buffer["dummy"][0] == ("id1", 20200101, '{"key": "value"}')
assert len(sql_writer._buffer["dummy"]) == 1
assert len(sql_writer._buffer.keys()) == 1
sql_writer.queue_write_data("dummy", "id2", 20200102, '{"key2": "value2"}')
sql_writer._buffer["dummy"][0] == ("id2", 20200102, '{"key2": "value2"}')
assert len(sql_writer._buffer["dummy"]) == 2
assert len(sql_writer._buffer.keys()) == 1
sql_writer.queue_write_data("dummy2", "id3", 20200103, '{"key3": "value3"}')
sql_writer._buffer["dummy"][0] == ("id3", 20200103, '{"key3": "value3"}')
assert len(sql_writer._buffer["dummy"]) == 2
assert len(sql_writer._buffer["dummy2"]) == 1
assert len(sql_writer._buffer.keys()) == 2
def test_data_auto_flush_one_table(connection: MagicMock, sql_writer: FireboltSQLWriter) -> None:
sql_writer.flush_interval = 2
sql_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
connection.cursor.return_value.executemany.assert_not_called()
assert sql_writer._values == 1
sql_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
connection.cursor.return_value.executemany.assert_called_once()
assert len(sql_writer._buffer.keys()) == 0
assert sql_writer._values == 0
sql_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
assert len(sql_writer._buffer.keys()) == 1
def test_data_auto_flush_multi_tables(connection: MagicMock, sql_writer: FireboltSQLWriter) -> None:
sql_writer.flush_interval = 2
sql_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
connection.cursor.return_value.executemany.assert_not_called()
assert sql_writer._values == 1
sql_writer.queue_write_data("dummy2", "id1", 20200101, '{"key": "value"}')
assert len(connection.cursor.return_value.executemany.mock_calls) == 2
assert len(sql_writer._buffer.keys()) == 0
assert sql_writer._values == 0
def test_s3_default(s3_writer: FireboltS3Writer) -> None:
assert s3_writer.flush_interval == 100000
assert s3_writer._values == 0
assert len(s3_writer._buffer.keys()) == 0
def test_s3_delete_tables(connection: MagicMock, s3_writer: FireboltS3Writer) -> None:
expected_sql = "DROP TABLE IF EXISTS _airbyte_raw_dummy"
s3_writer.delete_table("dummy")
connection.cursor.return_value.execute.assert_called_once_with(expected_sql)
@patch("pyarrow.parquet.write_to_dataset")
def METHOD_NAME(mock_write: MagicMock, s3_writer: FireboltS3Writer) -> None:
s3_writer.flush_interval = 2
s3_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
mock_write.assert_not_called()
assert s3_writer._values == 1
s3_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
mock_write.assert_called_once_with(table=ANY, root_path="dummy_bucket/airbyte_output/111_dummy-uuid/dummy", filesystem=s3_writer.fs)
assert len(s3_writer._buffer.keys()) == 0
assert s3_writer._values == 0
assert s3_writer._updated_tables == set(["dummy"])
mock_write.reset_mock()
s3_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
mock_write.assert_not_called()
assert len(s3_writer._buffer.keys()) == 1
assert s3_writer._updated_tables == set(["dummy"])
@patch("pyarrow.parquet.write_to_dataset")
def test_s3_data_auto_flush_multi_tables(mock_write: MagicMock, s3_writer: FireboltS3Writer) -> None:
s3_writer.flush_interval = 2
s3_writer.queue_write_data("dummy", "id1", 20200101, '{"key": "value"}')
mock_write.assert_not_called()
assert s3_writer._values == 1
s3_writer.queue_write_data("dummy2", "id1", 20200101, '{"key": "value"}')
assert mock_write.mock_calls == [
call(table=ANY, root_path="dummy_bucket/airbyte_output/111_dummy-uuid/dummy", filesystem=s3_writer.fs),
call(table=ANY, root_path="dummy_bucket/airbyte_output/111_dummy-uuid/dummy2", filesystem=s3_writer.fs),
]
assert len(s3_writer._buffer.keys()) == 0
assert s3_writer._values == 0
assert s3_writer._updated_tables == set(["dummy", "dummy2"])
def test_s3_final_flush(connection: MagicMock, s3_writer: FireboltS3Writer) -> None:
s3_writer._updated_tables = set(["dummy", "dummy2"])
s3_writer.flush()
assert len(connection.cursor.return_value.execute.mock_calls) == 8
expected_url1 = "s3://dummy_bucket/airbyte_output/111_dummy-uuid/dummy"
expected_url2 = "s3://dummy_bucket/airbyte_output/111_dummy-uuid/dummy2"
connection.cursor.return_value.execute.assert_any_call(ANY, parameters=(expected_url1, "access_key", "secret_key"))
connection.cursor.return_value.execute.assert_any_call(ANY, parameters=(expected_url2, "access_key", "secret_key"))
expected_query1 = "INSERT INTO _airbyte_raw_dummy SELECT * FROM ex_airbyte_raw_dummy"
expected_query2 = "INSERT INTO _airbyte_raw_dummy2 SELECT * FROM ex_airbyte_raw_dummy2"
connection.cursor.return_value.execute.assert_any_call(expected_query1)
connection.cursor.return_value.execute.assert_any_call(expected_query2)
def test_s3_cleanup(connection: MagicMock, s3_writer: FireboltS3Writer) -> None:
expected_sql = "DROP TABLE IF EXISTS ex_airbyte_raw_my_table"
bucket_path = "dummy_bucket/airbyte_output/111_dummy-uuid/my_table"
s3_writer.cleanup("my_table")
connection.cursor.return_value.execute.assert_called_once_with(expected_sql)
s3_writer.fs.delete_dir_contents.assert_called_once_with(bucket_path)
|
2,342 |
test controlchars
|
# Copyright © Michal Čihař <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Tests for automatix fixups."""
from django.test import TestCase
from weblate.checks.tests.test_checks import MockUnit
from weblate.trans.autofixes import fix_target
from weblate.trans.autofixes.chars import (
DevanagariDanda,
RemoveControlChars,
RemoveZeroSpace,
ReplaceTrailingDotsWithEllipsis,
)
from weblate.trans.autofixes.custom import DoubleApostrophes
from weblate.trans.autofixes.html import BleachHTML
from weblate.trans.autofixes.whitespace import SameBookendingWhitespace
class AutoFixTest(TestCase):
def test_ellipsis(self):
unit = MockUnit(source="Foo…")
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(fix.fix_target(["Bar..."], unit), (["Bar…"], True))
self.assertEqual(fix.fix_target(["Bar... "], unit), (["Bar... "], False))
def test_no_ellipsis(self):
unit = MockUnit(source="Foo...")
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(fix.fix_target(["Bar..."], unit), (["Bar..."], False))
self.assertEqual(fix.fix_target(["Bar…"], unit), (["Bar…"], False))
def test_whitespace(self):
unit = MockUnit(source="Foo\n")
fix = SameBookendingWhitespace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar\n"], True))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
unit = MockUnit(source=" ")
self.assertEqual(fix.fix_target([" "], unit), ([" "], False))
def test_no_whitespace(self):
unit = MockUnit(source="Foo")
fix = SameBookendingWhitespace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar"], True))
def test_whitespace_flags(self):
fix = SameBookendingWhitespace()
unit = MockUnit(source="str", flags="ignore-begin-space")
self.assertEqual(fix.fix_target([" str"], unit), ([" str"], False))
unit = MockUnit(source="str", flags="ignore-end-space")
self.assertEqual(fix.fix_target([" str "], unit), (["str "], True))
def test_html(self):
fix = BleachHTML()
unit = MockUnit(source='<a href="script:foo()">link</a>', flags="safe-html")
self.assertEqual(
fix.fix_target(['<a href="script:foo()">link</a>'], unit),
(["<a>link</a>"], True),
)
self.assertEqual(
fix.fix_target(['<a href="#" onclick="foo()">link</a>'], unit),
(['<a href="#">link</a>'], True),
)
self.assertEqual(
fix.fix_target(["<https://weblate.org>"], unit),
([""], True),
)
def test_html_markdown(self):
fix = BleachHTML()
unit = MockUnit(
source='<a href="script:foo()">link</a>', flags="safe-html,md-text"
)
self.assertEqual(
fix.fix_target(
['<a href="script:foo()">link</a><https://weblate.org>'], unit
),
(["<a>link</a><https://weblate.org>"], True),
)
self.assertEqual(
fix.fix_target(["<https://weblate.org>"], unit),
(["<https://weblate.org>"], False),
)
def test_zerospace(self):
unit = MockUnit(source="Foo\u200b")
fix = RemoveZeroSpace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\u200b"], unit), (["Bar\u200b"], False))
def test_no_zerospace(self):
unit = MockUnit(source="Foo")
fix = RemoveZeroSpace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\u200b"], unit), (["Bar"], True))
def METHOD_NAME(self):
unit = MockUnit(source="Foo\x1b")
fix = RemoveControlChars()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\x1b"], unit), (["Bar"], True))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
def test_no_controlchars(self):
unit = MockUnit(source="Foo")
fix = RemoveControlChars()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\x1b"], unit), (["Bar"], True))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
def test_fix_target(self):
unit = MockUnit(source="Foo…")
fixed, fixups = fix_target(["Bar..."], unit)
self.assertEqual(fixed, ["Bar…"])
self.assertEqual(len(fixups), 1)
self.assertEqual(str(fixups[0]), "Trailing ellipsis")
def test_apostrophes(self):
unit = MockUnit(source="Foo")
fix = DoubleApostrophes()
# No flags
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
# No format string, but forced
unit.flags = "java-format"
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
# No format string
unit.flags = "auto-java-messageformat"
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
unit.source = "test {0}"
unit.sources = [unit.source]
# Nothing to fix
self.assertEqual(fix.fix_target(["r {0}"], unit), (["r {0}"], False))
# Correct string
self.assertEqual(fix.fix_target(["''r'' {0}"], unit), (["''r'' {0}"], False))
# String with quoted format string
self.assertEqual(
fix.fix_target(["''r'' '{0}'"], unit), (["''r'' '{0}'"], False)
)
# Fixes
self.assertEqual(fix.fix_target(["'r''' {0}"], unit), (["''r'' {0}"], True))
# Fixes keeping double ones
self.assertEqual(
fix.fix_target(["'''''''r'''' {0}"], unit), (["''''r'''' {0}"], True)
)
# Quoted format
self.assertEqual(fix.fix_target(["'r''' {0}"], unit), (["''r'' {0}"], True))
unit.source = "foo"
unit.sources = [unit.source]
unit.flags = "java-format"
self.assertEqual(fix.fix_target(["bar'"], unit), (["bar''"], True))
def test_devanagaridanda(self):
non_unit = MockUnit(source="Foo", code="bn")
bn_unit = MockUnit(source="Foo.", code="bn")
cs_unit = MockUnit(source="Foo.", code="cs")
fix = DevanagariDanda()
self.assertEqual(fix.fix_target(["Bar."], non_unit), (["Bar."], False))
self.assertEqual(fix.fix_target(["Bar."], bn_unit), (["Bar।"], True))
self.assertEqual(fix.fix_target(["Bar|"], bn_unit), (["Bar।"], True))
self.assertEqual(fix.fix_target(["Bar।"], bn_unit), (["Bar।"], False))
self.assertEqual(fix.fix_target(["Bar."], cs_unit), (["Bar."], False))
|
2,343 |
exists
|
import logging
import slugify
from flask_mongoengine import Document
from mongoengine.fields import StringField
from mongoengine.signals import pre_save, post_delete
from .queryset import UDataQuerySet
from udata.utils import is_uuid
log = logging.getLogger(__name__)
class SlugField(StringField):
'''
A field that that produces a slug from the inputs and auto-
increments the slug if the value already exists.
'''
# Do not remove, this is required to trigger field population
_auto_gen = True
def __init__(self, populate_from=None, update=False, lower_case=True,
separator='-', follow=False, **kwargs):
kwargs.setdefault('unique', True)
self.populate_from = populate_from
self.update = update
self.lower_case = lower_case
self.separator = separator
self.follow = follow
self.instance = None
super(SlugField, self).__init__(**kwargs)
def __get__(self, instance, owner):
# mongoengine calls this after document initialization
# We register signals handlers here to have a owner reference
if not hasattr(self, 'owner'):
self.owner = owner
pre_save.connect(self.populate_on_pre_save, sender=owner)
if self.follow:
post_delete.connect(self.cleanup_on_delete, sender=owner)
return super(SlugField, self).__get__(instance, owner)
def __deepcopy__(self, memo):
# Fixes no_dereference by avoiding deep copying instance attribute
copied = self.__class__()
copied.__dict__.update(self.__dict__)
return copied
# Do not remove, this is required when field population is triggered
def generate(self):
pass
def slugify(self, value):
'''
Apply slugification according to specified field rules
'''
if value is None:
return
return slugify.slugify(value, max_length=self.max_length,
separator=self.separator,
to_lower=self.lower_case)
def latest(self, value):
'''
Get the latest object for a given old slug
'''
namespace = self.owner_document.__name__
follow = SlugFollow.objects(namespace=namespace, old_slug=value).first()
if follow:
return self.owner_document.objects(slug=follow.new_slug).first()
return None
def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete()
def populate_on_pre_save(self, sender, document, **kwargs):
field = document._fields.get(self.name)
if field:
populate_slug(document, field)
class SlugFollow(Document):
'''
Keeps track of slug changes for a given namespace/class.
Fields are:
* namespace - A namespace under which this slug falls
(e.g. match, team, user etc)
* old_slug - Before change slug.
* new_slug - After change slug
'''
namespace = StringField(required=True)
old_slug = StringField(required=True)
new_slug = StringField(required=True)
meta = {
'indexes': [
('namespace', 'old_slug'),
],
'queryset_class': UDataQuerySet,
}
def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def METHOD_NAME(s):
return qs(**{field.db_field: s}).clear_cls_query().limit(1).count(True) > 0
while METHOD_NAME(slug):
# keep space for index suffix, trim slug if needed
slug_overflow = len('{0}-{1}'.format(base_slug, index)) - field.max_length
if slug_overflow >= 1:
base_slug = base_slug[:-slug_overflow]
slug = '{0}-{1}'.format(base_slug, index)
index += 1
if is_uuid(slug):
slug = '{0}-uuid'.format(slug)
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug
|
2,344 |
test full random misc balances
|
from random import Random
from eth2spec.test.context import (
with_altair_and_later,
spec_test,
spec_state_test,
with_custom_state,
single_phase,
low_balances, misc_balances,
)
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
from eth2spec.test.helpers.rewards import leaking
import eth2spec.test.helpers.rewards as rewards_helpers
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
def test_half_zero_half_random_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
def test_random_high_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(11111))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(22222))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def METHOD_NAME(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(33333))
yield from rewards_helpers.run_test_full_random(spec, state)
#
# Leaking variants
#
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
@leaking()
def test_half_zero_half_random_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_high_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@spec_state_test
@leaking(epochs=8)
def test_random_high_inactivity_scores_leaking_8_epochs(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
|
2,345 |
create init cells
|
"""Cell factory for Otter Assign"""
import nbformat
from .assignment import Assignment
from .feature_toggle import FeatureToggle
from .utils import lock
class CellFactory:
"""
A factory for cells that make use of Otter's client package (e.g. init cell, check cell).
All (non-static cell-generating) methods in this factory should return a
``list[nbformat.NotebookNode]``.
Args:
assignment (``otter.assign.assignment.Assignment``): the assignment config
"""
assignment: Assignment
"""the assignment config"""
def __init__(self, assignment):
self.assignment = assignment
def check_feature_toggle(self, feature_toggle: FeatureToggle):
"""
Check whether the specified feature is enabled for this assignment.
Args:
feature_toggle (``otter.assign.feature_toggle.FeatureToggle``): the feature
Returns:
``bool``: whether the feature is enabled
"""
return feature_toggle.value.is_enabled(self.assignment)
def METHOD_NAME(self):
"""
Generate a cell to initialize Otter in the notebook.
Returns:
``list[nbformat.NotebookNode]``: the init cell
"""
if self.assignment.runs_on == "colab":
args = "colab=True"
elif self.assignment.runs_on == "jupyterlite":
args = "jupyterlite=True"
else:
args = f"\"{self.assignment.master.name}\""
if self.assignment.tests.url_prefix:
args += f", tests_url_prefix=\"{self.assignment.tests.url_prefix}\""
contents = f'# Initialize Otter\nimport otter\ngrader = otter.Notebook({args})'
cell = nbformat.v4.new_code_cell(contents)
lock(cell)
return [cell]
def create_check_cells(self, question):
"""
Create a cell calling ``otter.Notebook.check`` for the specified question.
Args:
question (``otter.assign.question_config.QuestionConfig``): the question config
Returns:
``list[nbformat.NotebookNode]``: the check cell
"""
cell = nbformat.v4.new_code_cell()
cell.source = ['grader.check("{}")'.format(question.name)]
lock(cell)
return [cell]
def create_check_all_cells(self):
"""
Generate a check-all cell and a Markdown cell with instructions to run all tests in the
notebook.
Returns:
``list[nbformat.NotebookNode]``: the check-all cells
"""
instructions = nbformat.v4.new_markdown_cell()
instructions.source = "---\n\nTo double-check your work, the cell below will rerun all " \
"of the autograder tests."
check_all = nbformat.v4.new_code_cell("grader.check_all()")
lock(instructions)
lock(check_all)
return [instructions, check_all]
def create_export_cells(self):
"""
Generate export cells that instruct the student the run a code cell calling
``otter.Notebook.export`` to generate and download their submission.
Returns:
``list[nbformat.NotebookNode]``: the export cells
"""
if not self.assignment.export_cell:
return []
instructions = nbformat.v4.new_markdown_cell()
instructions.source = "## Submission\n\nMake sure you have run all cells in your " \
"notebook in order before running the cell below, so that all images/graphs appear " \
"in the output. The cell below will generate a zip file for you to submit."
# only include save text if force_save is false
if not self.assignment.export_cell.force_save:
instructions.source += " **Please save before exporting!**"
if self.assignment.export_cell.instructions:
instructions.source += '\n\n' + self.assignment.export_cell.instructions
export = nbformat.v4.new_code_cell()
source_lines = []
# only include save text if force_save is false
if not self.assignment.export_cell.force_save:
source_lines.append(
"# Save your notebook first, then run this cell to export your submission.")
args = []
if not self.assignment.export_cell.filtering:
args += ["filtering=False"]
if not self.assignment.export_cell.pdf:
args += ["pdf=False"]
if self.assignment.export_cell.force_save:
args += ["force_save=True"]
if self.assignment.export_cell.run_tests:
args += ["run_tests=True"]
if len(self.assignment.export_cell.files) != 0:
args += [f"files={self.assignment.export_cell.files}"]
source_lines.append(f"grader.export({', '.join(args)})")
export.source = "\n".join(source_lines)
lock(instructions)
lock(export)
cells = [instructions, export]
if self.check_feature_toggle(FeatureToggle.EMPTY_MD_BOUNDARY_CELLS):
cells.append(nbformat.v4.new_markdown_cell(" ")) # add buffer cell
return cells
@staticmethod
def create_markdown_response_cell():
"""
Generate a Markdown response cell with the following contents:
.. code-block:: markdown
_Type your answer here, replacing this text._
Note that, unlike the other methods, this method returns a single cell rather than a list of
cells (since it is not used in the same context).
Returns:
``nbformat.NotebookNode``: the response cell
"""
return nbformat.v4.new_markdown_cell("_Type your answer here, replacing this text._")
|
2,346 |
process end
|
from udapi.core.block import Block
from collections import Counter
class Stats(Block):
"""Block corefud.Stats prints various coreference-related statistics."""
def __init__(self, m_len_max=5, c_len_max=5, report_mentions=True, report_entities=True,
report_details=True, selected_upos='NOUN PRON PROPN DET ADJ VERB ADV NUM',
exclude_singletons=False, exclude_nonsingletons=False, style='human', **kwargs):
super().__init__(**kwargs)
self.m_len_max = m_len_max
self.c_len_max = c_len_max
self.report_mentions = report_mentions
self.report_entities = report_entities
self.report_details = report_details
self.exclude_singletons = exclude_singletons
self.exclude_nonsingletons = exclude_nonsingletons
self.style = style
if style not in 'tex human'.split():
raise ValueError(f'Unknown style f{style}')
self.counter = Counter()
self.mentions = 0
self.entities = 0
self.total_nodes = 0
self.longest_mention = 0
self.longest_entity = 0
self.m_words = 0
self.selected_upos = None if selected_upos == 'all' else selected_upos.split()
def process_document(self, doc):
self.total_nodes += len(list(doc.nodes))
for entity in doc.coref_entities:
len_mentions = len(entity.mentions)
if len_mentions == 1 and self.exclude_singletons:
continue
elif len_mentions > 1 and self.exclude_nonsingletons:
continue
self.longest_entity = max(len_mentions, self.longest_entity)
self.counter['c_total_len'] += len_mentions
self.counter[f"c_len_{min(len_mentions, self.c_len_max)}"] += 1
self.entities += 1
if not self.report_mentions and not self.report_details:
continue
for mention in entity.mentions:
self.mentions += 1
all_words = len(mention.words)
non_empty = len([w for w in mention.words if not w.is_empty()])
self.m_words += all_words
self.longest_mention = max(non_empty, self.longest_mention)
self.counter['m_total_len'] += non_empty
self.counter[f"m_len_{min(non_empty, self.m_len_max)}"] += 1
if self.report_details:
upos = 'other'
if not self.selected_upos or mention.head.upos in self.selected_upos:
upos = mention.head.upos
self.counter['m_head_upos_' + upos] += 1
self.counter['m_with_empty'] += 1 if all_words > non_empty else 0
self.counter['m_with_gaps'] += 1 if ',' in mention.span else 0
heads, mwords = 0, set(mention.words)
for w in mention.words:
if w.parent:
heads += 0 if w.parent in mwords else 1
else:
heads += 0 if any(d['parent'] in mwords for d in w.deps) else 1
self.counter['m_nontreelet'] += 1 if heads > 1 else 0
def METHOD_NAME(self):
mentions_nonzero = 1 if self.mentions == 0 else self.mentions
entities_nonzero = 1 if self.entities == 0 else self.entities
total_nodes_nonzero = 1 if self.total_nodes == 0 else self.total_nodes
columns =[ ]
if self.report_entities:
columns += [('entities', f"{self.entities:7,}"),
('entities_per1k', f"{1000 * self.entities / total_nodes_nonzero:6.0f}"),
('longest_entity', f"{self.longest_entity:6}"),
('avg_entity', f"{self.counter['c_total_len'] / entities_nonzero:5.1f}")]
for i in range(1, self.c_len_max + 1):
percent = 100 * self.counter[f"c_len_{i}"] / entities_nonzero
columns.append((f"c_len_{i}{'' if i < self.c_len_max else '+'}", f"{percent:5.1f}"))
if self.report_mentions:
columns += [('mentions', f"{self.mentions:7,}"),
('mentions_per1k', f"{1000 * self.mentions / total_nodes_nonzero:6.0f}"),
('longest_mention', f"{self.longest_mention:6}"),
('avg_mention', f"{self.counter['m_total_len'] / mentions_nonzero:5.1f}")]
for i in range(0, self.m_len_max + 1):
percent = 100 * self.counter[f"m_len_{i}"] / mentions_nonzero
columns.append((f"m_len_{i}{'' if i < self.m_len_max else '+'}", f"{percent:5.1f}"))
if self.report_details:
columns += [('with_empty', f"{100 * self.counter['m_with_empty'] / mentions_nonzero:5.1f}"),
('with_gaps', f"{100 * self.counter['m_with_gaps'] / mentions_nonzero:5.1f}"),
('nontreelet', f"{100 * self.counter['m_nontreelet'] / mentions_nonzero:5.1f}"),]
if self.selected_upos:
upos_list = self.selected_upos + ['other']
else:
upos_list = [x[12:] for x in self.counter if x.startswith('m_head_upos_')]
for upos in upos_list:
columns.append(('head_upos=' + upos, f"{100 * self.counter['m_head_upos_' + upos] / mentions_nonzero:5.1f}"))
if self.style == 'tex':
print(" & ".join(c[1] for c in columns))
elif self.style == 'human':
for c in columns:
print(f"{c[0]:>15} = {c[1].strip():>10}")
|
2,347 |
save
|
import csv
import gzip
import json
import logging
import os
import traceback
from io import StringIO
from typing import ClassVar, List, Optional
import pendulum
from calitp_data_infra.storage import (
GTFSDownloadConfig,
PartitionedGCSArtifact,
ProcessingOutcome,
get_fs,
)
from utils import GTFSScheduleFeedFileHourly, get_schedule_files_in_hour
from airflow.models import BaseOperator
SCHEDULE_PARSED_BUCKET = os.environ["CALITP_BUCKET__GTFS_SCHEDULE_PARSED_HOURLY"]
SCHEDULE_UNZIPPED_BUCKET = os.environ["CALITP_BUCKET__GTFS_SCHEDULE_UNZIPPED_HOURLY"]
GTFS_PARSE_ERROR_THRESHOLD = 0.95
class GTFSScheduleFeedJSONL(PartitionedGCSArtifact):
bucket: ClassVar[str] = SCHEDULE_PARSED_BUCKET
partition_names: ClassVar[List[str]] = GTFSScheduleFeedFileHourly.partition_names
ts: pendulum.DateTime
extract_config: GTFSDownloadConfig
gtfs_filename: str
csv_dialect: Optional[
str
] # dialect would be a better name but we have an old field with that name...
num_lines: Optional[int]
# if you try to set table directly, you get an error because it "shadows a BaseModel attribute"
# so set as a property instead
@property
def table(self) -> str:
return self.gtfs_filename
@property
def dt(self) -> pendulum.Date:
return self.ts.date()
@property
def base64_url(self) -> str:
return self.extract_config.base64_encoded_url
class GTFSScheduleParseOutcome(ProcessingOutcome):
feed_file: GTFSScheduleFeedFileHourly
fields: Optional[List[str]]
parsed_file: Optional[GTFSScheduleFeedJSONL]
class ScheduleParseResult(PartitionedGCSArtifact):
bucket: ClassVar[str] = SCHEDULE_PARSED_BUCKET
partition_names: ClassVar[List[str]] = ["dt", "ts"]
ts: pendulum.DateTime
outcomes: List[GTFSScheduleParseOutcome]
@property
def dt(self):
return self.ts.date()
@property
def successes(self) -> List[GTFSScheduleParseOutcome]:
return [outcome for outcome in self.outcomes if outcome.success]
@property
def table(self) -> str:
return f"{self.outcomes[0].feed_file.table}_parsing_results"
@property
def failures(self) -> List[GTFSScheduleParseOutcome]:
return [outcome for outcome in self.outcomes if not outcome.success]
def METHOD_NAME(self, fs):
self.save_content(
fs=fs,
content="\n".join(o.json() for o in self.outcomes).encode(),
exclude={"outcomes"},
)
def parse_csv_str(contents: str):
lines = []
reader = csv.DictReader(StringIO(contents), restkey="calitp_unknown_fields")
field_names = reader.fieldnames
if len(field_names) == 1:
# we probably have a tab-delimited file; check that, but proceed with default
tab_reader = csv.DictReader(
StringIO(contents), dialect="excel-tab", restkey="calitp_unknown_fields"
)
if len(tab_reader.fieldnames) > 1:
reader = tab_reader
field_names = tab_reader.fieldnames
for line_number, row in enumerate(reader, start=1):
row["_line_number"] = line_number
lines.append(row)
return lines, field_names, reader.dialect
def parse_individual_file(
fs,
input_file: GTFSScheduleFeedFileHourly,
gtfs_filename: str,
) -> GTFSScheduleParseOutcome:
logging.info(f"Processing {input_file.path}")
field_names = None
try:
with fs.open(input_file.path, newline="", mode="r", encoding="utf-8-sig") as f:
contents = f.read()
lines, field_names, dialect = parse_csv_str(contents)
num_lines = len(lines)
jsonl_content = gzip.compress(
"\n".join(json.dumps(line) for line in lines).encode()
)
del lines
jsonl_file = GTFSScheduleFeedJSONL(
ts=input_file.ts,
extract_config=input_file.extract_config,
filename=gtfs_filename + ".jsonl.gz",
gtfs_filename=gtfs_filename,
csv_dialect=dialect,
num_lines=num_lines,
)
jsonl_file.save_content(content=jsonl_content, fs=fs)
del jsonl_content
except Exception as e:
logging.warn(f"Failed to process {input_file.path}: {traceback.format_exc()}")
return GTFSScheduleParseOutcome(
success=False,
exception=e,
feed_file=input_file,
fields=field_names,
)
logging.info(f"Parsed {input_file.path}")
return GTFSScheduleParseOutcome(
success=True,
feed_file=input_file,
fields=field_names,
parsed_file=jsonl_file,
)
def parse_files(period: pendulum.Period, input_table_name: str, gtfs_filename: str):
fs = get_fs()
extract_map = get_schedule_files_in_hour(
cls=GTFSScheduleFeedFileHourly,
bucket=SCHEDULE_UNZIPPED_BUCKET,
table=input_table_name,
period=period,
)
if not extract_map:
logging.warn(f"No files found for {input_table_name} for {period}")
return
for ts, files in extract_map.items():
logging.info(f"Processing {len(files)} {input_table_name} records for {ts}")
outcomes = []
for file in files:
outcome = parse_individual_file(fs, file, gtfs_filename)
outcomes.append(outcome)
assert (
len({outcome.feed_file.table for outcome in outcomes}) == 1
), "somehow you're processing multiple input tables"
result = ScheduleParseResult(filename="results.jsonl", ts=ts, outcomes=outcomes)
result.METHOD_NAME(fs)
assert len(files) == len(
result.outcomes
), f"ended up with {len(outcomes)} outcomes from {len(files)} files"
success_rate = len(result.successes) / len(files)
if success_rate < GTFS_PARSE_ERROR_THRESHOLD:
raise RuntimeError(
f"Success rate: {success_rate:.3f} was below error threshold: {GTFS_PARSE_ERROR_THRESHOLD}"
)
class GtfsGcsToJsonlOperatorHourly(BaseOperator):
def __init__(self, input_table_name, gtfs_filename=None, *args, **kwargs):
self.input_table_name = input_table_name
self.gtfs_filename = (
gtfs_filename if gtfs_filename else input_table_name.replace(".txt", "")
)
super().__init__(*args, **kwargs)
def execute(self, context):
period = (
context["data_interval_end"].subtract(microseconds=1)
- context["data_interval_start"]
)
print(f"Processing {period=}")
parse_files(
period,
self.input_table_name,
self.gtfs_filename,
)
if __name__ == "__main__":
with open("/Users/laurie/Downloads/bad_routes.txt", newline="") as f:
content = "\n".join(json.dumps(o) for o in csv.DictReader(f)).encode()
print(content)
|
2,348 |
test undefined occupancy
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pickle
import pytest
import numpy as np
import MDAnalysis as mda
from MDAnalysis import NoDataError
from MDAnalysisTests.datafiles import (
PSF, DCD,
XYZ_mini,
)
from numpy.testing import assert_almost_equal
class TestAtom(object):
# Legacy tests from before 363
"""Tests of Atom."""
"""Set up the standard AdK system in implicit solvent."""
@staticmethod
@pytest.fixture()
def universe():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture()
def atom(universe):
# Leu67:CG
return universe.atoms[1000]
def test_attributes_names(self, atom):
a = atom
assert a.name == 'CG'
assert a.resname == 'LEU'
def test_setting_attribute_name(self, atom):
atom.name = 'AA'
assert atom.name == 'AA'
def test_setting_attribute_type(self, atom):
atom.type = 'Z'
assert atom.type == 'Z'
def test_setting_attribute_mass(self, atom):
atom.mass = 13
assert atom.mass == 13
def test_setting_attributes_charge(self, atom):
atom.charge = 6
assert atom.charge == 6
def test_attributes_positions(self, atom):
known_pos = np.array([3.94543672, -12.4060812, -7.26820087], dtype=np.float32)
a = atom
# new position property (mutable)
assert_almost_equal(a.position, known_pos)
pos = a.position + 3.14
a.position = pos
assert_almost_equal(a.position, pos)
def test_atom_selection(self, universe, atom):
asel = universe.select_atoms('atom 4AKE 67 CG').atoms[0]
assert atom == asel
def test_hierarchy(self, universe, atom):
u = universe
a = atom
assert a.segment == u.select_atoms('segid 4AKE').segments[0]
assert a.residue == u.residues[66]
def test_bad_add(self, atom):
with pytest.raises(TypeError):
atom + 1
def test_add_AG(self, universe, atom):
ag = universe.atoms[:2]
ag2 = atom + ag
for at in [atom, ag[0], ag[1]]:
assert at in ag2
def test_no_velo(self, atom):
with pytest.raises(NoDataError):
atom.velocity
def test_bonded_atoms(self, universe):
at = universe.atoms[0]
ref = [b.partner(at) for b in at.bonds]
assert ref == list(at.bonded_atoms)
def METHOD_NAME(self, universe):
with pytest.raises(AttributeError):
universe.atoms[0].occupancy
@pytest.mark.parametrize("ix", (1, -1))
def test_atom_pickle(self, universe, ix):
atm_out = universe.atoms[ix]
atm_in = pickle.loads(pickle.dumps(atm_out))
assert atm_in == atm_out
class TestAtomNoForceNoVel(object):
@staticmethod
@pytest.fixture()
def a():
u = mda.Universe(XYZ_mini)
return u.atoms[0]
def test_velocity_fail(self, a):
with pytest.raises(NoDataError):
getattr(a, 'velocity')
def test_force_fail(self, a):
with pytest.raises(NoDataError):
getattr(a, 'force')
def test_velocity_set_fail(self, a):
with pytest.raises(NoDataError):
setattr(a, 'velocity', [1.0, 1.0, 1.0])
def test_force_set_fail(self, a):
with pytest.raises(NoDataError):
setattr(a, 'force', [1.0, 1.0, 1.0])
|
2,349 |
update ckpt
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils function for program.py."""
import os
import lingvo.compat as tf
def SummaryToCsv(summaries):
"""Convert summary (Dict[str, tf.Summary]) to csv format."""
res = ''
for k, s in summaries.items():
res += f'{k},{s.value[0].simple_value}\n'
return res
def CsvToSummary(csv):
"""Convert csv format to summary (Dict[str, tf.Summary])."""
summaries = {}
for l in csv.split('\n'):
row = l.split(',')
if len(row) != 2:
tf.logging.warn(f'Failed to parse csv line: {l}, will ignore it.')
continue
s = tf.Summary()
v = s.value.add()
v.tag, v.simple_value = row[0], float(row[1])
summaries.update({v.tag: s})
return summaries
class DecodeStatusCache:
"""Maintain status file to keep decoding datasets status.
Status file should have following format:
- 1st line is checkpoint key, e.g. ckpt-123
- the rest lines are dataset names that has been decoded.
Here's an example:
ckpt-123
Dev
Test
"""
def __init__(self, program_dir):
self.ckpt_key = ''
self.decoded_datasets = []
self.status_file = os.path.join(program_dir, 'decoded_datasets.txt')
# TODO(xingwu): Consider add a TTL.
self.cache_dir = os.path.join(program_dir, 'cache')
tf.io.gfile.makedirs(self.cache_dir)
if tf.io.gfile.exists(self.status_file):
with tf.io.gfile.GFile(self.status_file, 'r') as f:
content = list(l.strip() for l in f.readlines())
if content:
self.ckpt_key = content[0]
if len(content) > 1:
self.decoded_datasets = content[1:]
def METHOD_NAME(self, ckpt_key):
"""Update checkpoint key in the status."""
if ckpt_key != self.ckpt_key:
self.ckpt_key = ckpt_key
self.decoded_datasets = []
with tf.io.gfile.GFile(self.status_file, 'w') as f:
f.write(self.ckpt_key)
def UpdateDataset(self, dataset_name, summaries):
"""Update decoded dataset in the status."""
cache_file = os.path.join(self.cache_dir, f'{dataset_name}.csv')
with tf.io.gfile.GFile(cache_file, 'w') as f:
f.write(SummaryToCsv(summaries))
with tf.io.gfile.GFile(self.status_file, 'w+') as f:
f.write(f.read().strip() + '\n' + dataset_name)
def TryLoadCache(self, ckpt_key, dataset_name):
"""Try load summary cache for ckpt_key, dataset_name.
Args:
ckpt_key: str, checkpoint key, e.g. ckpt-123
dataset_name: str, the dataset name, e.g. Test
Returns:
summaries if load successful, otherwise, return None
"""
if ckpt_key == self.ckpt_key and dataset_name in self.decoded_datasets:
cache_file = os.path.join(self.cache_dir, f'{dataset_name}.csv')
if not tf.io.gfile.exists(cache_file):
tf.logging.warn(f'cached summary {cache_file} is gone!')
return None
with tf.io.gfile.GFile(cache_file, 'r') as f:
summaries = CsvToSummary(f.read())
with tf.io.gfile.GFile(self.status_file, 'w+') as f:
f.write(f.read().strip() + '\n' + dataset_name)
return summaries
return None
class TriggerScheduler:
"""A trigger scheduler with offset, and interval.
Maintains an counter, incremented when Trigger() called. ShouldRun() only
returns True when (counter - offset) % interval == 0.
"""
def __init__(self, offset, interval):
self.offset = offset
self.interval = interval
self.counter = -offset
def Trigger(self):
self.counter += 1
if self.counter >= self.interval:
self.counter = 0
def ShouldRun(self):
return self.counter == 0
|
2,350 |
connectiondatahelper
|
from selenium import webdriver
from bs4 import BeautifulSoup
import json
import time
from selenium.webdriver.common.by import By
import csv
browser = webdriver.Chrome()
pageurl = "https://www.linkedin.com/uas/login?session_redirect=https%3A%2F%2Fwww%2Elinkedin%2Ecom%2Fsearch%2Fresults%2Fpeople%2F%3Fnetwork%3D%255B%2522F%2522%255D%26origin%3DMEMBER_PROFILE_CANNED_SEARCH&fromSignIn=true&trk=cold_join_sign_in"
paginationurl = "https://www.linkedin.com/search/results/people/?network=%5B%22F%22%5D&origin=MEMBER_PROFILE_CANNED_SEARCH&page="
# Function to read username and password from file
def read_creds(filename):
"""This function reads username and password from creddentials.json
Arguments:
filename: name of the file which stores the credentials
:return: returns the credentials
"""
with open(filename) as f:
credentials = json.load(f)
return credentials
# function to find skills
def find_skills(url):
"""This function find the skills from the linkedin profile of the connection
Arguments:
url: url of the profile
:return: returns the list contsining skills
"""
browser.get(url)
time.sleep(3)
skill_set = []
last_height = browser.execute_script("return document.body.scrollHeight")
while True:
browser.execute_script("window.scrollTo(0, window.scrollY + 400);")
time.sleep(2)
new_height = browser.execute_script("return window.scrollY + 400")
if new_height == last_height:
break
last_height = new_height
time.sleep(2)
Skills = browser.find_elements_by_class_name("pv-skill-category-entity")
if len(Skills) == 0:
skill_set.append("Any Skill is not mentioned in the Linkedin Profile")
else:
show_more_button = browser.find_elements_by_class_name(
"pv-skills-section__additional-skills"
)
if len(show_more_button) > 0:
browser.execute_script("arguments[0].click();", show_more_button[0])
pagesource = browser.page_source
content = BeautifulSoup(pagesource, "html.parser")
Skills = content.find_all("li", class_="pv-skill-category-entity")
for skill in Skills:
skill_text = skill.find(
"p", class_="pv-skill-category-entity__name"
).text.strip()
skill_set.append(skill_text.split("\n")[0])
return skill_set
# Function to login
def login_and_npage():
"""This function login to the linkedin page return the number of pages of connection
:return: number of pages
"""
creds = read_creds("credentials.json")
# open login page
browser.get(pageurl)
elementID = browser.find_element_by_id("username")
elementID.send_keys(creds["username"])
elementID = browser.find_element_by_id("password")
elementID.send_keys(creds["password"])
elementID.submit()
url = browser.current_url
browser.get(url)
connectionclass = "mn-connection-card__details"
numberofpages = 0
time.sleep(2)
last_height = browser.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
elements = browser.find_elements_by_class_name(
"artdeco-pagination__indicator--number"
)
if len(elements) > 0:
numberofpages = int(elements[-1].text)
return numberofpages
# Wait to load page
time.sleep(2)
# Calculate new scroll height and compare with last scroll height
new_height = browser.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
return 0
# function to find connection data containing name and profileurl only
def METHOD_NAME(numberofpages):
"""This function returns the data of connections containg name and profile url
Arguments:
numberofpages:number of pages of connections
:return: connectiondata
"""
connectiondata = []
for j in range(1, numberofpages + 1):
browser.get(paginationurl + str(j))
time.sleep(3)
pagesource = browser.page_source
soup = BeautifulSoup(pagesource, "html.parser")
connections = soup.find_all("div", class_="entity-result__content")
for connection in connections:
titlecontainer = connection.find(class_="entity-result__title-text")
nameelement = titlecontainer.find("a", class_="app-aware-link")
nametext = nameelement.find("span", {"dir": "ltr"}).text.split("View")[0]
url = nameelement["href"]
connectionobj = {"name": nametext, "profileurl": url}
connectiondata.append(connectionobj)
return connectiondata
# '''function for finding required data of connection'''
def finalconnectiondata(connectiondata):
"""This function returns the list containing the data of all connections
Arguments:
connectiondata: list containing the name and profileurl of the connections
:return: finalconnectiondata
"""
final_connectiondata = []
for connection in connectiondata:
skill_set = find_skills(connection["profileurl"])
browser.get(connection["profileurl"])
time.sleep(3)
pagesource = browser.page_source
soup = BeautifulSoup(pagesource, "html.parser")
experiencecontainer = soup.find(
"a", {"data-control-name": "background_details_company"}
)
if experiencecontainer is None:
l_jobtitile = "Don't Have any Job Experience"
else:
l_jobtitile = experiencecontainer.find("h3").text.strip()
last_height = browser.execute_script("return document.body.scrollHeight")
time.sleep(2)
linken_connection = {
"Name": connection["name"],
"Linkedin_URL": connection["profileurl"],
"Latest_JOB_Position": l_jobtitile,
"Skills": skill_set,
}
final_connectiondata.append(linken_connection)
return final_connectiondata
def main():
"""function of execution"""
browser.maximize_window()
numberofpages = login_and_npage()
smalldata_of_connections = METHOD_NAME(numberofpages)
final_connectiondata = finalconnectiondata(smalldata_of_connections)
browser.quit()
keys = final_connectiondata[0].keys()
with open("output.csv", "w", newline="") as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(final_connectiondata)
if __name__ == "__main__":
main()
|
2,351 |
monotone fn inverter
|
"""
Empirical CDF Functions
"""
import numpy as np
from scipy.interpolate import interp1d
def _conf_set(F, alpha=.05):
r"""
Constructs a Dvoretzky-Kiefer-Wolfowitz confidence band for the eCDF.
Parameters
----------
F : array_like
The empirical distributions
alpha : float
Set alpha for a (1 - alpha) % confidence band.
Notes
-----
Based on the DKW inequality.
.. math:: P \left( \sup_x \left| F(x) - \hat(F)_n(X) \right| >
\epsilon \right) \leq 2e^{-2n\epsilon^2}
References
----------
Wasserman, L. 2006. `All of Nonparametric Statistics`. Springer.
"""
nobs = len(F)
epsilon = np.sqrt(np.log(2./alpha) / (2 * nobs))
lower = np.clip(F - epsilon, 0, 1)
upper = np.clip(F + epsilon, 0, 1)
return lower, upper
class StepFunction:
"""
A basic step function.
Values at the ends are handled in the simplest way possible:
everything to the left of x[0] is set to ival; everything
to the right of x[-1] is set to y[-1].
Parameters
----------
x : array_like
y : array_like
ival : float
ival is the value given to the values to the left of x[0]. Default
is 0.
sorted : bool
Default is False.
side : {'left', 'right'}, optional
Default is 'left'. Defines the shape of the intervals constituting the
steps. 'right' correspond to [a, b) intervals and 'left' to (a, b].
Examples
--------
>>> import numpy as np
>>> from statsmodels.distributions.empirical_distribution import (
>>> StepFunction)
>>>
>>> x = np.arange(20)
>>> y = np.arange(20)
>>> f = StepFunction(x, y)
>>>
>>> print(f(3.2))
3.0
>>> print(f([[3.2,4.5],[24,-3.1]]))
[[ 3. 4.]
[ 19. 0.]]
>>> f2 = StepFunction(x, y, side='right')
>>>
>>> print(f(3.0))
2.0
>>> print(f2(3.0))
3.0
"""
def __init__(self, x, y, ival=0., sorted=False, side='left'): # noqa
if side.lower() not in ['right', 'left']:
msg = "side can take the values 'right' or 'left'"
raise ValueError(msg)
self.side = side
_x = np.asarray(x)
_y = np.asarray(y)
if _x.shape != _y.shape:
msg = "x and y do not have the same shape"
raise ValueError(msg)
if len(_x.shape) != 1:
msg = 'x and y must be 1-dimensional'
raise ValueError(msg)
self.x = np.r_[-np.inf, _x]
self.y = np.r_[ival, _y]
if not sorted:
asort = np.argsort(self.x)
self.x = np.take(self.x, asort, 0)
self.y = np.take(self.y, asort, 0)
self.n = self.x.shape[0]
def __call__(self, time):
tind = np.searchsorted(self.x, time, self.side) - 1
return self.y[tind]
class ECDF(StepFunction):
"""
Return the Empirical CDF of an array as a step function.
Parameters
----------
x : array_like
Observations
side : {'left', 'right'}, optional
Default is 'right'. Defines the shape of the intervals constituting the
steps. 'right' correspond to [a, b) intervals and 'left' to (a, b].
Returns
-------
Empirical CDF as a step function.
Examples
--------
>>> import numpy as np
>>> from statsmodels.distributions.empirical_distribution import ECDF
>>>
>>> ecdf = ECDF([3, 3, 1, 4])
>>>
>>> ecdf([3, 55, 0.5, 1.5])
array([ 0.75, 1. , 0. , 0.25])
"""
def __init__(self, x, side='right'):
x = np.array(x, copy=True)
x.sort()
nobs = len(x)
y = np.linspace(1./nobs, 1, nobs)
super(ECDF, self).__init__(x, y, side=side, sorted=True)
# TODO: make `step` an arg and have a linear interpolation option?
# This is the path with `step` is True
# If `step` is False, a previous version of the code read
# `return interp1d(x,y,drop_errors=False,fill_values=ival)`
# which would have raised a NameError if hit, so would need to be
# fixed. See GH#5701.
class ECDFDiscrete(StepFunction):
"""
Return the Empirical Weighted CDF of an array as a step function.
Parameters
----------
x : array_like
Data values. If freq_weights is None, then x is treated as observations
and the ecdf is computed from the frequency counts of unique values
using nunpy.unique.
If freq_weights is not None, then x will be taken as the support of the
mass point distribution with freq_weights as counts for x values.
The x values can be arbitrary sortable values and need not be integers.
freq_weights : array_like
Weights of the observations. sum(freq_weights) is interpreted as nobs
for confint.
If freq_weights is None, then the frequency counts for unique values
will be computed from the data x.
side : {'left', 'right'}, optional
Default is 'right'. Defines the shape of the intervals constituting the
steps. 'right' correspond to [a, b) intervals and 'left' to (a, b].
Returns
-------
Weighted ECDF as a step function.
Examples
--------
>>> import numpy as np
>>> from statsmodels.distributions.empirical_distribution import (
>>> ECDFDiscrete)
>>>
>>> ewcdf = ECDFDiscrete([3, 3, 1, 4])
>>> ewcdf([3, 55, 0.5, 1.5])
array([0.75, 1. , 0. , 0.25])
>>>
>>> ewcdf = ECDFDiscrete([3, 1, 4], [1.25, 2.5, 5])
>>>
>>> ewcdf([3, 55, 0.5, 1.5])
array([0.42857143, 1., 0. , 0.28571429])
>>> print('e1 and e2 are equivalent ways of defining the same ECDF')
e1 and e2 are equivalent ways of defining the same ECDF
>>> e1 = ECDFDiscrete([3.5, 3.5, 1.5, 1, 4])
>>> e2 = ECDFDiscrete([3.5, 1.5, 1, 4], freq_weights=[2, 1, 1, 1])
>>> print(e1.x, e2.x)
[-inf 1. 1.5 3.5 4. ] [-inf 1. 1.5 3.5 4. ]
>>> print(e1.y, e2.y)
[0. 0.2 0.4 0.8 1. ] [0. 0.2 0.4 0.8 1. ]
"""
def __init__(self, x, freq_weights=None, side='right'):
if freq_weights is None:
x, freq_weights = np.unique(x, return_counts=True)
else:
x = np.asarray(x)
assert len(freq_weights) == len(x)
w = np.asarray(freq_weights)
sw = np.sum(w)
assert sw > 0
ax = x.argsort()
x = x[ax]
y = np.cumsum(w[ax])
y = y / sw
super(ECDFDiscrete, self).__init__(x, y, side=side, sorted=True)
def METHOD_NAME(fn, x, vectorized=True, **keywords):
"""
Given a monotone function fn (no checking is done to verify monotonicity)
and a set of x values, return an linearly interpolated approximation
to its inverse from its values on x.
"""
x = np.asarray(x)
if vectorized:
y = fn(x, **keywords)
else:
y = []
for _x in x:
y.append(fn(_x, **keywords))
y = np.array(y)
a = np.argsort(y)
return interp1d(y[a], x[a])
|
2,352 |
compute center
|
import os, os.path
import socket
import lbann
import lbann.launcher
from lbann.util import make_iterable, nvprof_command
# ==============================================
# Detect the current compute center
# ==============================================
def is_lc_center():
"""Current system is operated by Livermore Computing at Lawrence
Livermore National Laboratory.
Checks whether the domain name ends with ".llnl.gov".
"""
domain = socket.getfqdn().split('.')
return (len(domain) > 2 and domain[-2] == 'llnl' and domain[-1] == 'gov')
def is_nersc_center():
"""Current system is operated by the National Energy Research
Scientific Computing Center at Lawrence Berkeley National
Laboratory.
Checks whether the environment variable NERSC_HOST is set.
"""
return bool(os.getenv('NERSC_HOST'))
def is_olcf_center():
"""Current system is operated by the Oak Ridge Leadership
Computing Facility at Oak Ridge National Laboratory.
Checks whether the domain name ends with ".ornl.gov".
Checks whether the environment variable OLCF_MODULEPATH_ROOT is set.
"""
domain = socket.getfqdn().split('.')
return (len(domain) > 2 and domain[-2] == 'ornl' and domain[-1] == 'gov')
# return bool(os.getenv('OLCF_MODULEPATH_ROOT'))
def is_riken_center():
"""Current system is operated by RIKEN.
Checks if the system is using a Fujitsu compiler
"""
return bool(os.getenv('FJSVXTCLANGA'))
# Detect compute center and choose launcher
_center = 'unknown'
launcher = lbann.launcher
if is_lc_center():
_center = 'lc'
import lbann.contrib.lc.systems
if lbann.contrib.lc.systems.is_lc_system():
import lbann.contrib.lc.launcher
launcher = lbann.contrib.lc.launcher
elif is_nersc_center():
_center = 'nersc'
import lbann.contrib.nersc.systems
if lbann.contrib.nersc.systems.is_nersc_system():
import lbann.contrib.nersc.launcher
launcher = lbann.contrib.nersc.launcher
elif is_olcf_center():
_center = 'olcf'
import lbann.contrib.olcf.systems
if lbann.contrib.olcf.systems.is_olcf_system():
import lbann.contrib.olcf.launcher
launcher = lbann.contrib.olcf.launcher
elif is_riken_center():
_center = 'riken'
import lbann.contrib.riken.systems
if lbann.contrib.riken.systems.is_riken_system():
import lbann.contrib.riken.launcher
launcher = lbann.contrib.riken.launcher
def METHOD_NAME():
"""Name of organization that operates current system."""
return _center
# ==============================================
# Launcher functions
# ==============================================
def run(
trainer,
model,
data_reader,
optimizer,
lbann_exe=lbann.lbann_exe(),
lbann_args=[],
procs_per_trainer=None,
overwrite_script=False,
setup_only=False,
batch_job=False,
proto_file_name=None,
nvprof=False,
nvprof_output_name=None,
binary_protobuf=False,
*args,
**kwargs,
):
"""Run LBANN with system-specific optimizations.
This is intended to match the behavior of `lbann.run`, with
defaults and optimizations for the current system. See that
function for a full list of options.
"""
# Create batch script generator
script = make_batch_script(*args, **kwargs)
# Batch script prints start time
script.add_command('echo "Started at $(date)"')
# Set default file name and extension
if proto_file_name is None:
proto_file_name = ('experiment.protobin'
if binary_protobuf else 'experiment.prototext')
# Batch script invokes LBANN
lbann_command = [lbann_exe]
if nvprof:
lbann_command = nvprof_command(
work_dir=script.work_dir,
output_name=nvprof_output_name) + lbann_command
lbann_command.extend(make_iterable(lbann_args))
proto_file = os.path.join(script.work_dir, proto_file_name)
lbann.proto.save_prototext(proto_file,
binary=binary_protobuf,
trainer=trainer,
model=model,
data_reader=data_reader,
optimizer=optimizer)
lbann_command.append('--prototext={}'.format(proto_file))
if procs_per_trainer is not None:
lbann_command.append(f'--procs_per_trainer={procs_per_trainer}')
script.add_parallel_command(lbann_command)
script.add_command('status=$?')
# Batch script prints finish time and returns status
script.add_command('echo "Finished at $(date)"')
script.add_command('exit ${status}')
# Write, run, or submit batch script
status = 0
if setup_only:
script.write(overwrite=overwrite_script)
elif batch_job:
status = script.submit(overwrite=overwrite_script)
else:
status = script.run(overwrite=overwrite_script)
return status
def make_batch_script(*args, **kwargs):
"""Construct batch script manager with system-specific optimizations.
This is intended to match the behavior of
`lbann.launcher.make_batch_script`, with defaults and
optimizations for the current system.
"""
return launcher.make_batch_script(*args, **kwargs)
|
2,353 |
test recursive inst
|
import cPickle
import unittest
from cStringIO import StringIO
from test.pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import test_support
class ApproxFloat(unittest.TestCase):
# FIXME for Jython: remove this class - and its use from bases in
# subsequent test classes - when we can guarantee that floats that
# are pickled by cPickle are exact in the same way they are on
# CPython
def test_float(self):
from test.pickletester import protocols
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertAlmostEqual(value, got)
class cPickleTests(ApproxFloat, AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads
error = cPickle.BadPickleGet
module = cPickle
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_callapi(self):
pass
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
class cPicklePicklerTests(ApproxFloat, AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = cPickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleFastPicklerTests(ApproxFloat, AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def METHOD_NAME(self):
self.assertRaises(ValueError,
AbstractPickleTests.METHOD_NAME,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. Jack Jansen reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_dynamic_class(self):
pass
def test_main():
tests = [
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests
]
if test_support.is_jython:
# FIXME Jython currently doesn't support list based picklers
tests.remove(cPickleListPicklerTests)
# FIXME these cause NullPointerException on Jython
del cPickleFastPicklerTests.test_recursive_list
del cPickleFastPicklerTests.METHOD_NAME
del cPickleFastPicklerTests.test_recursive_dict
del cPickleFastPicklerTests.test_recursive_multi
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
2,354 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSecretRotationResult',
'AwaitableGetSecretRotationResult',
'get_secret_rotation',
'get_secret_rotation_output',
]
@pulumi.output_type
class GetSecretRotationResult:
"""
A collection of values returned by getSecretRotation.
"""
def __init__(__self__, METHOD_NAME=None, rotation_enabled=None, rotation_lambda_arn=None, rotation_rules=None, secret_id=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if rotation_enabled and not isinstance(rotation_enabled, bool):
raise TypeError("Expected argument 'rotation_enabled' to be a bool")
pulumi.set(__self__, "rotation_enabled", rotation_enabled)
if rotation_lambda_arn and not isinstance(rotation_lambda_arn, str):
raise TypeError("Expected argument 'rotation_lambda_arn' to be a str")
pulumi.set(__self__, "rotation_lambda_arn", rotation_lambda_arn)
if rotation_rules and not isinstance(rotation_rules, list):
raise TypeError("Expected argument 'rotation_rules' to be a list")
pulumi.set(__self__, "rotation_rules", rotation_rules)
if secret_id and not isinstance(secret_id, str):
raise TypeError("Expected argument 'secret_id' to be a str")
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="rotationEnabled")
def rotation_enabled(self) -> bool:
"""
ARN of the secret.
"""
return pulumi.get(self, "rotation_enabled")
@property
@pulumi.getter(name="rotationLambdaArn")
def rotation_lambda_arn(self) -> str:
"""
Decrypted part of the protected secret information that was originally provided as a string.
"""
return pulumi.get(self, "rotation_lambda_arn")
@property
@pulumi.getter(name="rotationRules")
def rotation_rules(self) -> Sequence['outputs.GetSecretRotationRotationRuleResult']:
"""
Decrypted part of the protected secret information that was originally provided as a binary. Base64 encoded.
"""
return pulumi.get(self, "rotation_rules")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> str:
return pulumi.get(self, "secret_id")
class AwaitableGetSecretRotationResult(GetSecretRotationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecretRotationResult(
METHOD_NAME=self.METHOD_NAME,
rotation_enabled=self.rotation_enabled,
rotation_lambda_arn=self.rotation_lambda_arn,
rotation_rules=self.rotation_rules,
secret_id=self.secret_id)
def get_secret_rotation(secret_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretRotationResult:
"""
Retrieve information about a Secrets Manager secret rotation. To retrieve secret metadata, see the `secretsmanager.Secret` data source. To retrieve a secret value, see the `secretsmanager.SecretVersion` data source.
## Example Usage
### Retrieve Secret Rotation Configuration
```python
import pulumi
import pulumi_aws as aws
example = aws.secretsmanager.get_secret_rotation(secret_id=data["aws_secretsmanager_secret"]["example"]["id"])
```
:param str secret_id: Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret.
"""
__args__ = dict()
__args__['secretId'] = secret_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:secretsmanager/getSecretRotation:getSecretRotation', __args__, opts=opts, typ=GetSecretRotationResult).value
return AwaitableGetSecretRotationResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
rotation_enabled=pulumi.get(__ret__, 'rotation_enabled'),
rotation_lambda_arn=pulumi.get(__ret__, 'rotation_lambda_arn'),
rotation_rules=pulumi.get(__ret__, 'rotation_rules'),
secret_id=pulumi.get(__ret__, 'secret_id'))
@_utilities.lift_output_func(get_secret_rotation)
def get_secret_rotation_output(secret_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSecretRotationResult]:
"""
Retrieve information about a Secrets Manager secret rotation. To retrieve secret metadata, see the `secretsmanager.Secret` data source. To retrieve a secret value, see the `secretsmanager.SecretVersion` data source.
## Example Usage
### Retrieve Secret Rotation Configuration
```python
import pulumi
import pulumi_aws as aws
example = aws.secretsmanager.get_secret_rotation(secret_id=data["aws_secretsmanager_secret"]["example"]["id"])
```
:param str secret_id: Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret.
"""
...
|
2,355 |
test check events warnings null duration
|
"""
Test the design_matrix utilities.
Note that the tests just look whether the data produced has correct dimension,
not whether it is exact.
"""
import os
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from nilearn._utils.data_gen import basic_paradigm
from nilearn.glm.first_level import check_events
from ._utils import (
block_paradigm,
design_with_nan_durations,
design_with_nan_onsets,
design_with_null_durations,
duplicate_events_paradigm,
modulated_block_paradigm,
modulated_event_paradigm,
)
def test_check_events():
events = basic_paradigm()
trial_type, _, _, modulation = check_events(events)
# Check that given trial type is right
assert_array_equal(
trial_type, ["c0", "c0", "c0", "c1", "c1", "c1", "c2", "c2", "c2"]
)
# Check that missing modulation yields an array one ones
assert_array_equal(modulation, np.ones(len(events)))
# Modulation is provided
events["modulation"] = np.ones(len(events))
_, _, _, mod = check_events(events)
assert_array_equal(mod, events["modulation"])
def test_check_events_errors():
"""Test the function which tests that the events
data describes a valid experimental paradigm.
"""
events = basic_paradigm()
# Errors checkins
# Wrong type
with pytest.raises(
TypeError, match="Events should be a Pandas DataFrame."
):
check_events([])
# Missing onset
missing_onset = events.drop(columns=["onset"])
with pytest.raises(
ValueError, match="The provided events data has no onset column."
):
check_events(missing_onset)
# Missing duration
missing_duration = events.drop(columns=["duration"])
with pytest.raises(
ValueError, match="The provided events data has no duration column."
):
check_events(missing_duration)
# Duration wrong type
wrong_duration = events.copy()
wrong_duration["duration"] = "foo"
with pytest.raises(ValueError, match="Could not cast duration to float"):
check_events(wrong_duration)
def test_check_events_warnings():
"""Test the function which tests that the events
data describes a valid experimental paradigm.
"""
events = basic_paradigm()
# Warnings checkins
# Missing trial type
events = events.drop(columns=["trial_type"])
with pytest.warns(UserWarning, match="'trial_type' column not found"):
trial_type, onset, duration, modulation = check_events(events)
# Check that missing trial type yields a 'dummy' array
assert len(np.unique(trial_type)) == 1
assert trial_type[0] == "dummy"
# An unexpected field is provided
events["foo"] = np.zeros(len(events))
with pytest.warns(
UserWarning,
match=(
"The following unexpected columns "
"in events data will be ignored: foo"
),
):
trial_type2, onset2, duration2, modulation2 = check_events(events)
assert_array_equal(trial_type, trial_type2)
assert_array_equal(onset, onset2)
assert_array_equal(duration, duration2)
assert_array_equal(modulation, modulation2)
def test_duplicate_events():
"""Test the function check_events when the paradigm contains
duplicate events.
"""
events = duplicate_events_paradigm()
# Check that a warning is given to the user
with pytest.warns(UserWarning, match="Duplicated events were detected."):
trial_type, onset, duration, modulation = check_events(events)
assert_array_equal(trial_type, ["c0", "c0", "c0", "c1", "c1"])
assert_array_equal(onset, [10, 30, 70, 10, 30])
assert_array_equal(duration, [1.0, 1.0, 1.0, 1.0, 1.0])
# Modulation was updated
assert_array_equal(modulation, [1, 1, 2, 1, 1])
def write_events(events, tmpdir):
"""Function to write events of an experimental paradigm
to a file and return the address.
"""
tsvfile = os.path.join(tmpdir, "events.tsv")
events.to_csv(tsvfile, sep="\t")
return tsvfile
@pytest.mark.parametrize(
"events",
[
block_paradigm(),
modulated_event_paradigm(),
modulated_block_paradigm(),
basic_paradigm(),
],
)
def test_read_events(events, tmp_path):
"""Test that a events for an experimental paradigm are correctly read."""
csvfile = write_events(events, tmp_path)
read_paradigm = pd.read_table(csvfile)
assert (read_paradigm["onset"] == events["onset"]).all()
def METHOD_NAME():
"""Test that events with null duration throw a warning."""
with pytest.warns(
UserWarning,
match="The following conditions contain events with null duration",
):
check_events(design_with_null_durations())
@pytest.mark.parametrize(
"design",
[
design_with_nan_durations,
design_with_nan_onsets,
],
)
def test_check_events_nan_designs(design):
"""Test that events with nan values."""
with pytest.raises(
ValueError, match=("The following column must not contain nan values:")
):
check_events(design())
|
2,356 |
decode
|
#!/usr/bin/env python3
import argparse
import os
import re
import shlex
import sys
from subprocess import run, SubprocessError, DEVNULL, PIPE
from tempfile import NamedTemporaryFile
DESC = """
A `csmith` fuzzing driver for `bindgen`.
Generates random C source files with `csmith` and then passes them to `bindgen`
(via `predicate.py`). If `bindgen` can't emit bindings, `rustc` can't compile
those bindings, or the compiled bindings' layout tests fail, then the driver has
found a bug, and will report the problematic test case to you.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESC.strip())
parser.add_argument(
"--keep-going",
action="store_true",
help="Do not stop after finding a test case that exhibits a bug in `bindgen`. Instead, keep going.")
CSMITH_ARGS="\
--no-checksum \
--nomain \
--max-block-size 1 \
--max-block-depth 1"
parser.add_argument(
"--csmith-args",
type=str,
default=CSMITH_ARGS,
help="Pass this argument string to `csmith`. By default, very small functions are generated.")
BINDGEN_ARGS = "--with-derive-partialeq \
--with-derive-eq \
--with-derive-partialord \
--with-derive-ord \
--with-derive-hash \
--with-derive-default"
parser.add_argument(
"--bindgen-args",
type=str,
default=BINDGEN_ARGS,
help="Pass this argument string to `bindgen`. By default, all traits are derived.")
parser.add_argument(
"--no-creduce",
action="store_false",
dest="creduce",
help="Do not run `creduce` on any buggy test case(s) discovered.")
################################################################################
def cat(path, title=None):
if not title:
title = path
print("-------------------- {} --------------------".format(title))
print()
print()
run(["cat", path])
def METHOD_NAME(f):
return f.METHOD_NAME(encoding="utf-8", errors="ignore")
def run_logged(cmd):
result = run(cmd, stdin=DEVNULL, stdout=PIPE, stderr=PIPE)
result.stdout = METHOD_NAME(result.stdout)
result.stderr = METHOD_NAME(result.stderr)
if result.returncode != 0:
print()
print()
print("Error: {} exited with code {}".format(cmd, result.returncode))
print()
print()
for line in result.stdout.splitlines():
sys.stdout.write("+")
sys.stdout.write(line)
sys.stdout.write("\n")
for line in result.stderr.splitlines():
sys.stderr.write("+")
sys.stderr.write(line)
sys.stderr.write("\n")
return result
def main():
os.environ["RUST_BACKTRACE"] = "full"
args = parser.parse_args()
bindgen_args = args.bindgen_args
if bindgen_args.find(" -- ") == -1:
bindgen_args = bindgen_args + " -- "
bindgen_args = bindgen_args + " -I{}".format(os.path.abspath(os.path.dirname(sys.argv[0])))
args.bindgen_args = bindgen_args
print()
print()
print("Fuzzing `bindgen` with C-Smith...")
print()
print()
iterations = 0
while True:
print("\rIteration: {}".format(iterations), end="", flush=True)
iterations += 1
input = NamedTemporaryFile(delete=False, prefix="input-", suffix=".h")
input.close()
result = run_logged(["csmith", "-o", input.name] + shlex.split(args.csmith_args))
if result.returncode != 0:
exit(1)
predicate_command = [
"./predicate.py",
"--bindgen-args",
args.bindgen_args,
input.name
]
result = run_logged(predicate_command)
if result.returncode != 0:
print()
print()
cat(input.name, title="Failing test case: {}".format(input.name))
print()
print()
if args.creduce:
creduce(args, input.name, result)
print_issue_template(args, input.name, predicate_command, result)
if args.keep_going:
continue
exit(1)
os.remove(input.name)
RUSTC_ERROR_REGEX = re.compile(r".*(error\[.*].*)")
LAYOUT_TEST_FAILURE = re.compile(r".*(test bindgen_test_layout_.* \.\.\. FAILED)")
def creduce(args, failing_test_case, result):
print()
print()
print("Reducing failing test case with `creduce`...")
match = re.search(RUSTC_ERROR_REGEX, result.stderr)
if match:
error_msg = match.group(1)
print("...searching for \"{}\".".format(error_msg))
return creduce_with_predicate_flags(
args,
failing_test_case,
"--bindgen-args '{}' --expect-compile-fail --rustc-grep '{}'".format(
args.bindgen_args,
re.escape(error_msg)
)
)
match = re.search(LAYOUT_TEST_FAILURE, result.stdout)
if match:
layout_failure = match.group(1)
struct_name = layout_failure[len("test bindgen_test_layout_"):layout_failure.rindex(" ... FAILED")]
print("...searching for \"{}\".".format(layout_failure))
return creduce_with_predicate_flags(
args,
failing_test_case,
"--bindgen-args '{}' --expect-layout-tests-fail --bindings-grep '{}' --layout-tests-grep '{}'".format(
args.bindgen_args,
re.escape(struct_name),
re.escape(layout_failure)
)
)
print("...nevermind, don't know how to `creduce` this bug. Skipping.")
def creduce_with_predicate_flags(args, failing_test_case, predicate_flags):
predicate = """
#!/usr/bin/env bash
set -eu
{} {} {}
""".format(
os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "predicate.py")),
predicate_flags,
os.path.basename(failing_test_case)
)
print("...and reducing with this script:")
print()
print()
print(predicate)
print()
print()
predicate_path = failing_test_case + ".predicate.sh"
with open(predicate_path, "w") as p:
p.write(predicate)
os.chmod(predicate_path, 0o755)
creduce_command = ["creduce", "--n", str(os.cpu_count()), predicate_path, failing_test_case]
print("Running:", creduce_command)
result = run(creduce_command)
if result.returncode == 0:
print()
print()
print("`creduce` reduced the failing test case to:")
print()
print()
cat(failing_test_case)
print()
print()
else:
print()
print()
print("`creduce` failed!")
if not args.keep_going:
sys.exit(1)
def print_issue_template(args, failing_test_case, predicate_command, result):
test_case_contents = None
with open(failing_test_case, "r") as f:
test_case_contents = f.read()
print("""
! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
! File this issue at https://github.com/rust-lang/rust-bindgen/issues/new !
! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
--------------- 8< --------------- 8< --------------- 8< ---------------
This bug was found with `csmith` and `driver.py`.
### Input Header
```c
{}
```
### `bindgen` Invocation
```
$ {}
```
### Actual Results
<details>
```
{}
```
</details>
### Expected Results
`bindgen` emits bindings OK, then `rustc` compiles those bindings OK, then the
compiled bindings' layout tests pass OK.
--------------- 8< --------------- 8< --------------- 8< ---------------
<3 <3 <3 Thank you! <3 <3 <3
""".format(
test_case_contents,
" ".join(map(lambda s: "'{}'".format(s), predicate_command)),
result.stdout + result.stderr
))
if __name__ == "__main__":
try:
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
main()
except KeyboardInterrupt:
exit()
|
2,357 |
split
|
from typing import *
from typing_extensions import Literal
import pandas as pd
pi: float
def array(x: Union[Sequence[Any],
Iterable[Any],
ndarray], **kwargs) -> ndarray: ...
class int32: ...
class float32: ...
class ndarray(List):
shape: Sequence[int]
T: 'ndarray'
ndim: int
def __setitem__(self, idx, value) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator: ...
def __invert__(self) -> 'ndarray': ...
def __eq__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __ne__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __lt__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __le__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __gt__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __ge__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __div__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __truediv__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __mul__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __imul__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __sub__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __rsub__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __add__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __pow__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __or__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __xor__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
def __and__(self, other: Any) -> ndarray[bool]: ... # type: ignore # already defined in typeshed - does not like being overriden
# can't really overload this as depends on dimensions of array
def __getitem__(self, # type: ignore # already defined in typeshed - does not like being overriden
key: Union[int, slice,
Sequence[Union[int, bool]],
Iterable[Union[int, bool]],
Tuple[Union[slice, int,
Iterable[Union[int, bool]]],
Union[slice, int,
Iterable[Union[int, bool]]]],
]) -> Union['ndarray', str, int, bool]: ...
def tolist(self) -> List: ...
@overload
def sum(self, axis: Literal[None] = ..., **kwargs) -> float: ...
@overload
def sum(self, axis: int, **kwargs) -> ndarray: ...
@overload
def min(self, axis: Literal[None] = ..., **kwargs) -> float: ...
@overload
def min(self, axis: int, **kwargs) -> ndarray: ...
@overload
def max(self, axis: Literal[None] = ..., **kwargs) -> float: ...
@overload
def max(self, axis: int, **kwargs) -> ndarray: ...
@overload
def mean(self, axis: Literal[None] = ..., **kwargs) -> float: ...
@overload
def mean(self, axis: int, **kwargs) -> ndarray: ...
@overload
def any(self, axis: Literal[None] = ..., **kwargs) -> float: ...
@overload
def any(self, axis: int, **kwargs) -> ndarray: ...
@overload
def astype(self, ty: Literal[str], **kwargs) -> 'ndarray[str]': ...
@overload
def astype(self, ty: Literal[int], **kwargs) -> 'ndarray[int]': ...
@overload
def astype(self, ty: Literal[float], **kwargs) -> 'ndarray[float]': ...
@overload
def astype(self, ty: Literal[bool], **kwargs) -> 'ndarray[bool]': ...
def reshape(self, shape: Tuple, **kwargs) -> 'ndarray': ...
def linspace(*args, **kwargs) -> ndarray: ...
def insert(*args, **kwargs) -> ndarray: ...
def diff(*args, **kwargs) -> ndarray: ...
def cumsum(*args, **kwargs) -> ndarray: ...
def sum(*args, **kwargs) -> ndarray: ...
@overload
def nansum(*args, axis: Literal[None], **kwargs) -> float: ...
@overload
def nansum(*args, axis: int, **kwargs) -> ndarray: ...
def min(*args, **kwargs) -> ndarray: ...
def max(*args, **kwargs) -> ndarray: ...
def mean(*args, **kwargs) -> ndarray: ...
def exp(*args, **kwargs) -> ndarray: ...
def sqrt(*args, **kwargs) -> ndarray: ...
def vstack(*args, **kwargs) -> ndarray: ...
def zeros(*args, **kwargs) -> ndarray: ...
def round(*args, **kwargs) -> ndarray: ...
def unique(*args, **kwargs) -> ndarray: ...
def any(*args, **kwargs) -> ndarray: ...
def append(*args, **kwargs) -> ndarray: ...
def empty(*args, **kwargs) -> ndarray: ...
def isnan(*args, **kwargs) -> ndarray: ...
def arange(*args, **kwargs) -> ndarray: ...
def argmin(*args, **kwargs) -> ndarray: ...
def remainder(*args, **kwargs) -> ndarray: ...
def METHOD_NAME(*args, **kwargs) -> ndarray: ...
def sin(*args, **kwargs) -> ndarray: ...
def cos(*args, **kwargs) -> ndarray: ...
def ones(*args, **kwargs) -> ndarray: ...
def outer(*args, **kwargs) -> ndarray: ...
def size(*args, **kwargs) -> ndarray: ...
def where(*args, **kwargs) -> ndarray: ...
|
2,358 |
test instance of operation
|
from __future__ import annotations
from typing import Optional
import pytest
import ibis
import ibis.expr.datashape as ds
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.types as ir
from ibis.common.annotations import ValidationError
from ibis.common.patterns import EqualTo
t = ibis.table([("a", "int64")], name="t")
true = ir.literal(True)
false = ir.literal(False)
two = ir.literal(2)
three = ir.literal(3)
class Expr:
def __init__(self, op):
self.op = op
class Base(ops.Node):
def to_expr(self):
return Expr(self)
class Name(Base):
name: str
class NamedValue(Base):
value: int
name: Name
class Values(Base):
lst: tuple[ops.Node, ...]
one = NamedValue(value=1, name=Name("one"))
two = NamedValue(value=2, name=Name("two"))
three = NamedValue(value=3, name=Name("three"))
values = Values((one, two, three))
def test_node_base():
assert hasattr(one, "__slots__")
assert not hasattr(one, "__dict__")
assert one.__args__ == (1, Name("one"))
assert values.__args__ == ((one, two, three),)
calls = []
returns = {
Name("one"): "Name_one",
Name("two"): "Name_two",
Name("three"): "Name_three",
NamedValue(1, Name("one")): "NamedValue_1_one",
NamedValue(2, Name("two")): "NamedValue_2_two",
NamedValue(3, Name("three")): "NamedValue_3_three",
values: "final",
}
def record(node, _, *args, **kwargs):
calls.append((node, args, kwargs))
return returns[node]
results = values.map(record)
assert results == returns
assert calls == [
(Name("one"), (), {"name": "one"}),
(Name("two"), (), {"name": "two"}),
(Name("three"), (), {"name": "three"}),
(one, (), {"value": 1, "name": "Name_one"}),
(two, (), {"value": 2, "name": "Name_two"}),
(three, (), {"value": 3, "name": "Name_three"}),
(
values,
(),
{"lst": ("NamedValue_1_one", "NamedValue_2_two", "NamedValue_3_three")},
),
]
def test_node_subtitution():
class Aliased(Base):
arg: ops.Node
name: str
ketto = Aliased(one, "ketto")
first_rule = EqualTo(Name("one")) >> Name("zero")
second_rule = EqualTo(two) >> ketto
new_values = values.replace(first_rule | second_rule)
expected = Values((NamedValue(value=1, name=Name("zero")), ketto, three))
assert expected == new_values
def test_value_annotations():
class Op1(ops.Value):
arg: ops.Value
dtype = dt.int64
shape = ds.scalar
class Op2(ops.Value):
arg: ops.Value[dt.Any, ds.Any]
dtype = dt.int64
shape = ds.scalar
class Op3(ops.Value):
arg: ops.Value[dt.Integer, ds.Any]
dtype = dt.int64
shape = ds.scalar
class Op4(ops.Value):
arg: ops.Value[dt.Integer, ds.Scalar]
dtype = dt.int64
shape = ds.scalar
assert Op1(1).arg.dtype == dt.int8
assert Op2(1).arg.dtype == dt.int8
assert Op3(1).arg.dtype == dt.int8
assert Op4(1).arg.dtype == dt.int8
def test_operation_definition():
class Logarithm(ir.Expr):
pass
class Log(ops.Node):
arg: ops.Value[dt.Float64, ds.Any]
base: Optional[ops.Value[dt.Float64, ds.Any]] = None
def to_expr(self):
return Logarithm(self)
assert Log(1, base=2).arg == ops.Literal(1, dtype=dt.float64)
assert Log(1, base=2).base == ops.Literal(2, dtype=dt.float64)
assert Log(arg=10).arg == ops.Literal(10, dtype=dt.float64)
assert Log(arg=10).base is None
assert isinstance(Log(arg=100).to_expr(), Logarithm)
def METHOD_NAME():
class MyOperation(ops.Node):
arg: ir.IntegerValue
def to_expr(self):
return ir.IntegerScalar(self)
MyOperation(ir.literal(5))
with pytest.raises(ValidationError):
MyOperation(ir.literal("string"))
def test_array_input():
class MyOp(ops.Value):
value: ops.Value[dt.Array[dt.Float64], ds.Any]
dtype = rlz.dtype_like("value")
shape = rlz.shape_like("value")
raw_value = [1.0, 2.0, 3.0]
op = MyOp(raw_value)
expected = ibis.literal(raw_value)
assert op.value == expected.op()
def test_custom_table_expr():
class MyTable(ir.Table):
pass
class SpecialTable(ops.UnboundTable):
def to_expr(self):
return MyTable(self)
node = SpecialTable(name="foo", schema=ibis.schema([("a", "int64")]))
expr = node.to_expr()
assert isinstance(expr, MyTable)
def test_too_many_or_too_few_args_not_allowed():
class DummyOp(ops.Value):
arg: ops.Value
with pytest.raises(TypeError):
DummyOp(1, 2)
with pytest.raises(TypeError):
DummyOp()
def test_getitem_on_column_is_error():
t = ibis.table(dict(a="int"))
with pytest.raises(TypeError, match="#ibis-for-pandas-users"):
t.a[0]
with pytest.raises(TypeError, match="#ibis-for-pandas-users"):
t.a[:1]
def test_operation_class_aliases():
assert ops.ValueOp is ops.Value
assert ops.UnaryOp is ops.Unary
assert ops.BinaryOp is ops.Binary
assert ops.WindowOp is ops.Window
assert ops.AnalyticOp is ops.Analytic
def test_expression_class_aliases():
assert ir.TableExpr is ir.Table
assert ir.ValueExpr is ir.Value
assert ir.ScalarExpr is ir.Scalar
assert ir.ColumnExpr is ir.Column
assert ir.AnyValue is ir.Value
assert ir.AnyScalar is ir.Scalar
assert ir.AnyColumn is ir.Column
|
2,359 |
checkpoint
|
# Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import time
from dlrover.python.common.log import default_logger as logger
from dlrover.python.master.shard.base_dataset_manager import (
DatasetManger,
DatasetShardCheckpoint,
DoingTask,
Task,
)
from dlrover.python.master.shard.dataset_splitter import DatasetSplitter, Shard
_MAX_TASK_RETRIES = 3
class BatchDatasetManager(DatasetManger):
"""BatchDatasetManager create tasks with shards in a static dataset.
Attributes:
task_type: the type of computation task like "training",
"evaluation" and "prediction".
batch_size: the size of a batch.
dataset_splitter: DatasetSplitter instace to split the dataset
into shards.
"""
def __init__(
self,
task_type,
batch_size,
dataset_splitter: DatasetSplitter,
):
super(BatchDatasetManager, self).__init__(
task_type, batch_size, dataset_splitter
)
self._max_task_completed_time = 0
self._task_id = 0
self._completed_step = 0
def get_task(self, node_type, node_id) -> Task:
"""Return next Task"""
if not self.todo and not self._dataset_splitter.epoch_finished():
# Start a new epoch
# num_epochs <= 0 indicates that the master will create data
# shards infinitely. So, the worker can use the dataset like
# `dataset.repeat()`.
shards = self._dataset_splitter.create_shards()
self._create_todo_tasks(shards)
if not self.todo:
# No more tasks
return Task.create_invalid_task()
task: Task = self.todo.pop(0)
self.doing[task.task_id] = DoingTask(
task, node_type, node_id, int(time.time())
)
logger.info(
"Assign task %s of dataset %s to %s %s",
task.task_id,
self._dataset_splitter.dataset_name,
node_type,
node_id,
)
return task
def get_epoch(self):
return self._dataset_splitter.get_epoch()
def completed(self):
return (
self._dataset_splitter.epoch_finished()
and not self.todo
and not self.doing
)
def _create_todo_tasks(self, shards):
tasks = []
for shard in shards:
task = Task(self._task_id, self._task_type, shard)
tasks.append(task)
self._task_id += 1
logger.info(
"todo.extend: %d tasks created for dataset = %s.",
len(tasks),
self._dataset_splitter.dataset_name,
)
self.todo.extend(tasks)
def report_task_status(self, task_id, success):
doing_task = self.doing.pop(task_id, None)
if not doing_task:
logger.warning(
"Unknown task_id: %d of dataset %s"
% (task_id, self._dataset_splitter.dataset_name)
)
success = False
elif not success:
logger.warning(
"Task %d of %s failed "
% (task_id, self._dataset_splitter.dataset_name)
)
self.recover_task(doing_task.task)
else:
self._update_completed_step(doing_task.task)
logger.info(
"Task:%d completed, %d doing tasks and %d todo "
"tasks of dataset %s",
task_id,
len(self.doing),
len(self.todo),
self._dataset_splitter.dataset_name,
)
task_completed_time = time.time() - doing_task.start_time
if task_completed_time > self._max_task_completed_time:
self._max_task_completed_time = task_completed_time
return success, doing_task
def _update_completed_step(self, task: Task):
record_count = task.shard.end - task.shard.start
batch_count = math.ceil(record_count / self._batch_size)
self._completed_step += batch_count
self._latest_task_end_time = int(time.time())
def get_completed_step(self):
return self._completed_step
def recover_task(self, task):
if not self._check_exceed_max_task_retries(task):
self.todo.append(task)
def _check_exceed_max_task_retries(self, task: Task):
task.retry_count += 1
if task.retry_count > _MAX_TASK_RETRIES:
logger.error(
"A task %s of failed with %d retries "
% (task.shard.name, _MAX_TASK_RETRIES)
)
return True
return False
def get_doing_tasks(self):
return self.doing
def METHOD_NAME(self):
todo_shards = []
for task in self.todo:
shard = [task.shard.start, task.shard.end]
if task.shard.record_indices:
shard.append(task.shard.record_indices)
todo_shards.append(shard)
doing_shards = []
for task_id in self.doing:
task = self.doing[task_id].task
shard = [task.shard.start, task.shard.end]
if task.shard.record_indices:
shard.append(task.shard.record_indices)
doing_shards.append(shard)
return DatasetShardCheckpoint(
dataset_name=self._dataset_splitter.dataset_name,
todo=todo_shards,
doing=doing_shards,
epoch=self._dataset_splitter.epoch,
)
def restore_checkpoint(self, METHOD_NAME: DatasetShardCheckpoint):
"""Restore the task manager from a checkpoint"""
self._dataset_splitter.epoch = METHOD_NAME.epoch
self.todo = []
self.doing = {}
for shard_indices in METHOD_NAME.doing + METHOD_NAME.todo:
record_indices = None
if len(shard_indices) > 2:
record_indices = shard_indices[2]
shard = Shard(
name=self._dataset_splitter.dataset_name,
start=shard_indices[0],
end=shard_indices[1],
record_indices=record_indices,
)
self.todo.append(
Task(
self._task_id,
self._task_type,
shard,
)
)
self._task_id += 1
logger.info("Restore %s todo tasks", len(self.todo))
|
2,360 |
is packed layout
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Dense operator declaration and schedule registration for VTA."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from ..environment import get_env
def METHOD_NAME(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
@autotvm.register_topi_compute("dense_packed.vta")
def dense_packed(cfg, data, weight, bias=None, out_dtype=None):
"""Dense function declaration."""
# Make sure that the dense operator is packed
if len(data.shape) != 4 or len(weight.shape) != 4:
raise topi.InvalidShapeError()
# Derive shapes
ishape = topi.utils.get_const_tuple(data.shape)
wshape = topi.utils.get_const_tuple(weight.shape)
oshape = (data.shape[0], weight.shape[0], data.shape[2], weight.shape[2])
# Reduction axes (input channel)
assert ishape[1] == wshape[1]
assert ishape[3] == wshape[3]
k_o = te.reduce_axis((0, ishape[1]), name="k_o")
k_i = te.reduce_axis((0, ishape[3]), name="k_i")
res = te.compute(
oshape,
lambda b_o, c_o, b_i, c_i: te.sum(
data[b_o, k_o, b_i, k_i].astype(out_dtype)
* weight[c_o, k_o, c_i, k_i].astype(out_dtype),
axis=[k_o, k_i],
),
name="res",
tag="dense_pack",
)
cfg.add_flop(2 * np.prod(topi.utils.get_const_tuple(oshape)) * ishape[1] * ishape[3])
return res
@autotvm.register_topi_schedule("dense_packed.vta")
def schedule_dense_packed(cfg, outs):
"""Packed dense schedule."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
dense_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "dense_pack"
dense_res.append(op)
_traverse(output.op)
assert len(dense_res) == 1
dense_stage = dense_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, _, _ = s[dense_stage].op.axis
c_i, _ = s[dense_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
###### space definition end ######
data, weight = dense_stage.op.input_tensors
env = get_env()
cdata = s.cache_read(data, env.inp_scope, [dense_stage])
cweight = s.cache_read(weight, env.wgt_scope, [dense_stage])
s[dense_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# apply tiling for SRAM reuse
x_b, x_c, _, _ = s[output].op.axis
x_bo, x_bi = cfg["tile_b"].apply(s, output, x_b)
x_co, x_ci = cfg["tile_co"].apply(s, output, x_c)
s[output].reorder(x_bo, x_co, x_bi, x_ci)
store_pt = x_co
# set all compute scopes
s[dense_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_bi, _ = s[dense_stage].op.axis
k_o, _ = s[dense_stage].op.reduce_axis
s[dense_stage].reorder(x_bo, k_o, x_co)
k_o, _ = cfg["tile_ci"].apply(s, dense_stage, k_o)
s[cdata].compute_at(s[dense_stage], k_o)
s[cweight].compute_at(s[dense_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[cweight].pragma(s[cweight].op.axis[0], env.dma_copy)
s[dense_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_ci, env.dma_copy)
return s
|
2,361 |
save cognition data
|
import os
from fastapi import UploadFile, File, Security, APIRouter
from starlette.responses import FileResponse
from kairon.api.models import Response, TextData, CognitiveDataRequest
from kairon.events.definitions.faq_importer import FaqDataImporterEvent
from kairon.shared.auth import Authentication
from kairon.shared.constants import DESIGNER_ACCESS
from kairon.shared.data.processor import MongoProcessor
from kairon.shared.models import User
from kairon.shared.utils import Utility
router = APIRouter()
processor = MongoProcessor()
@router.post("/faq/upload", response_model=Response)
def upload_faq_files(
csv_file: UploadFile = File(...),
overwrite: bool = True,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Uploads faq csv/excel file
"""
event = FaqDataImporterEvent(
current_user.get_bot(), current_user.get_user(), overwrite=overwrite
)
event.validate(training_data_file=csv_file)
event.enqueue()
return {"message": "Upload in progress! Check logs."}
@router.get("/faq/download", response_model=Response)
async def download_faq_files(
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Downloads faq into csv file
"""
qna = list(processor.flatten_qna(bot=current_user.get_bot(), fetch_all=True))
file, _ = Utility.download_csv(qna, filename="faq.csv")
response = FileResponse(
file, filename=os.path.basename(file)
)
response.headers[
"Content-Disposition"
] = "attachment; filename=" + os.path.basename(file)
return response
@router.post("/text/faq", response_model=Response)
def save_bot_text(
text: TextData,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Saves text content into the bot
"""
return {
"message": "Text saved!",
"data": {
"_id": processor.save_content(
text.data,
current_user.get_user(),
current_user.get_bot(),
)
}
}
@router.put("/text/faq/{text_id}", response_model=Response)
def update_bot_text(
text_id: str,
text: TextData,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Updates text content into the bot
"""
return {
"message": "Text updated!",
"data": {
"_id": processor.update_content(
text_id,
text.data,
current_user.get_user(),
current_user.get_bot(),
)
}
}
@router.delete("/text/faq/{text_id}", response_model=Response)
def delete_bot_text(
text_id: str,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Deletes text content of the bot
"""
processor.delete_content(text_id, current_user.get_user(), current_user.get_bot())
return {
"message": "Text deleted!"
}
@router.get("/text/faq", response_model=Response)
def get_text(
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Fetches text content of the bot
"""
return {"data": list(processor.get_content(current_user.get_bot()))}
@router.post("/cognition", response_model=Response)
def METHOD_NAME(
cognition: CognitiveDataRequest,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Saves cognition content into the bot
"""
return {
"message": "Record saved!",
"data": {
"_id": processor.METHOD_NAME(
cognition.dict(),
current_user.get_user(),
current_user.get_bot(),
)
}
}
@router.put("/cognition/{cognition_id}", response_model=Response)
def update_cognition_data(
cognition_id: str,
cognition: CognitiveDataRequest,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Updates cognition content into the bot
"""
return {
"message": "Record updated!",
"data": {
"_id": processor.update_cognition_data(
cognition_id,
cognition.dict(),
current_user.get_user(),
current_user.get_bot(),
)
}
}
@router.delete("/cognition/{cognition_id}", response_model=Response)
def delete_cognition_data(
cognition_id: str,
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Deletes cognition content of the bot
"""
processor.delete_cognition_data(cognition_id, current_user.get_bot())
return {
"message": "Record deleted!"
}
@router.get("/cognition", response_model=Response)
def list_cognition_data(
current_user: User = Security(Authentication.get_current_user_and_bot, scopes=DESIGNER_ACCESS),
):
"""
Fetches cognition content of the bot
"""
return {"data": list(processor.list_cognition_data(current_user.get_bot()))}
|
2,362 |
teardown module
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Test the 'relations' connecting Resource instances,
which spans the modules idaes.core.dmf.{dmf, resource, resourcedb}
"""
# stdlib
import logging
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Union
# third-party
import pytest
# local
from idaes.core.dmf import experiment, resource, DMF
from idaes.core.dmf.resource import Predicates
# for testing
from .util import init_logging
__author__ = "Dan Gunter"
init_logging()
_log = logging.getLogger(__name__)
scratch_dir: Union[str, None] = None
scratch_path: Union[Path, None] = None
def setup_module(module):
global scratch_dir, scratch_path
scratch_dir = TemporaryDirectory(prefix="idaes.core.dmf_") # easier to remove later
scratch_path = Path(scratch_dir.name)
def METHOD_NAME(module):
global scratch_dir
del scratch_dir
@pytest.mark.unit
def test_create_relation_in_resource():
a = resource.Resource()
b = resource.Resource()
resource.create_relation(a, "contains", b)
assert len(a.v["relations"]) == 1
assert len(b.v["relations"]) == 1
# bad type
with pytest.raises(TypeError):
resource.create_relation("foo", "contains", b)
@pytest.mark.unit
def test_relation_in_experiment():
tmp_dir = scratch_path / "relation_in_experiment"
dmf = DMF(path=tmp_dir, create=True)
e1 = experiment.Experiment(dmf, name="1")
a = resource.Resource(value={"name": "foo"})
e1.add(a)
assert len(a.v["relations"]) == 1
assert len(e1.v["relations"]) == 1
@pytest.mark.unit
def test_relation_with_remove():
tmp_dir = scratch_path / "relation_with_remove"
dmf = DMF(path=tmp_dir, create=True)
e1 = experiment.Experiment(dmf, name="1")
n, added = 10, []
for i in range(n):
a = resource.Resource({"name": "foo"})
e1.add(a)
added.append(a)
assert len(e1.v["relations"]) == n
# remove, then update e1
for a in added:
dmf.remove(identifier=a.id)
e1.update()
# relation to removed 'a' should be gone
n -= 1
assert (len(e1.v["relations"])) == n
@pytest.mark.unit
def test_find_related():
#
# r0
# | uses
# v
# r1
# | version
# v
# r2
# /\
# / \ derived
# v v
# r3 r4
#
tmp_dir = scratch_path / "find_related"
dmf = DMF(path=tmp_dir, create=True)
r = [resource.Resource({"name": "r{}".format(i)}) for i in range(5)]
# r3 <-- derived <-- r2 <-- version <-- r1
cr = resource.create_relation # shortcut
cr(r[2], Predicates.derived, r[3])
cr(r[1], Predicates.version, r[2])
# r4 <-- derived <-- r2
cr(r[2], Predicates.derived, r[4])
# r0 -- Uses --> r1
cr(r[0], Predicates.uses, r[1])
# add to dmf
for i in range(5):
dmf.add(r[i])
# outgoing from r0 should include 1,2,3,4
names = []
for d, rr, m in dmf.find_related(r[0], meta=["aliases"]):
names.append(m["aliases"][0])
names.sort()
assert names == ["r1", "r2", "r3", "r4"]
# incoming to r4 should include r0, r1, r2
names = []
for d, rr, m in dmf.find_related(r[4], meta=["aliases"], outgoing=False):
names.append(m["aliases"][0])
names.sort()
assert names == ["r0", "r1", "r2"]
@pytest.mark.unit
def test_circular():
#
# r0 -> derived -> r1 -> derived >- r2 -+
# ^ |
# +------------------------------------+
# uses
tmp_dir = scratch_path / "circular"
dmf = DMF(path=tmp_dir, create=True)
r = [resource.Resource({"name": "r{}".format(i)}) for i in range(3)]
resource.create_relation(r[0], Predicates.derived, r[1])
resource.create_relation(r[1], Predicates.derived, r[2])
resource.create_relation(r[2], Predicates.uses, r[0])
for rr in r:
dmf.add(rr)
# outgoing from r0
names = []
for d, rr, m in dmf.find_related(r[0], meta=["aliases"]):
names.append(m["aliases"][0])
names.sort()
assert names == ["r0", "r1", "r2"]
# incoming to r1
names = []
for d, rr, m in dmf.find_related(r[0], meta=["aliases"], outgoing=False):
names.append(m["aliases"][0])
names.sort()
assert names == ["r0", "r1", "r2"]
# reducing depth shortens output
names = []
for d, rr, m in dmf.find_related(r[0], meta=["aliases"], maxdepth=2):
names.append(m["aliases"][0])
names.sort()
assert names == ["r1", "r2"]
names = []
for d, rr, m in dmf.find_related(r[0], meta=["aliases"], maxdepth=1):
names.append(m["aliases"][0])
names.sort()
assert names == ["r1"]
|
2,363 |
get prs
|
import requests, json, datetime, os
from enum import Enum
from github import Github
ORG_NAME = 'kubevirt'
REPO_NAME = 'hyperconverged-cluster-operator'
GITHUB_BASE_API = 'https://api.github.com/repos'
class Result(Enum):
Success = 0
Overridden = 1
Failure = 2
Pending = 3
Error = 4
Aborted = 5
Invalid = 6
class OverrideBot:
def __init__(self):
self.pr_list = []
self.start_time = datetime.datetime.now()
self.finish_time = None
github_token = os.environ['HCO_BOT_TOKEN']
gh = Github(github_token)
repo_name = f'{ORG_NAME}/{REPO_NAME}'
self.repo_obj = gh.get_repo(repo_name)
def METHOD_NAME(self):
get_prs_req = requests.get(f'{GITHUB_BASE_API}/{ORG_NAME}/{REPO_NAME}/pulls')
pr_full_list = json.loads(get_prs_req.text)
for pr in pr_full_list:
if 'do-not-merge/hold' not in [label['name'] for label in pr['labels']]:
self.pr_list.append(PullRequest(pr['number'], pr['title'], pr['url'], pr['_links']['statuses']['href']))
def get_ci_tests(self):
for pr in self.pr_list:
pr.get_ci_tests()
def nominate_lanes_for_override(self):
for pr in self.pr_list:
pr.nominate_lanes_for_override()
def comment_overrides(self):
for pr in self.pr_list:
pr.comment_overrides(self.repo_obj.get_pull(pr.number))
class PullRequest:
def __init__(self, number, title, gh_url, statuses_url):
self.number = number
self.title = title
self.gh_url = gh_url
self.statuses_url = statuses_url
self.ci_tests_list = []
self.override_list = []
def get_ci_tests(self):
statuses_raw = requests.get(self.statuses_url).text
statuses = json.loads(statuses_raw)
for status in statuses:
context = status['context']
if 'ci-index' in context or 'images' in context or 'prow' not in context:
continue
splitted = context.split('/')[-1].split('-')
provider = splitted[-1]
test_name = '-'.join(splitted[:-1])
state = status['state']
overridden = status['description'] and 'Overridden' in status['description']
test_obj = self.get_test_obj(test_name)
if not test_obj:
test_obj = CiTest(test_name, [])
self.ci_tests_list.append(test_obj)
rl = RedundantLane(context, provider, state, overridden, test_obj)
if not self.lane_exists(rl.name):
test_obj.lanes_list.append(rl)
def get_test_obj(self, test_name):
for test_obj in self.ci_tests_list:
if test_name == test_obj.name:
return test_obj
return None
def lane_exists(self, name_to_check):
for test in self.ci_tests_list:
for lane in test.lanes_list:
if lane.name == name_to_check:
return True
return False
def nominate_lanes_for_override(self):
for test in self.ci_tests_list:
if test.succeeded_any:
for lane in test.lanes_list:
if lane.result in [Result.Failure, Result.Error, Result.Pending]:
self.override_list.append((lane, test.succeeded_lanes))
def comment_overrides(self, gh_pr):
if not self.override_list:
return
comment = ''
for override in self.override_list:
for passed in override[1]:
comment += passed.name.split('/')[-1] + ', '
comment = comment[:-2] # removing comma at the end
plural = 's' if len(override[1]) > 1 else ''
comment += f' lane{plural} succeeded.\n'
comment += f'/override {override[0].name}\n'
print (f'comment for PR #{self.number} is:\n{comment}')
gh_pr.create_issue_comment(comment)
class CiTest:
def __init__(self, name, lanes_list):
self.name = name
self.lanes_list = lanes_list
self.succeeded_any = False
self.succeeded_lanes = []
class RedundantLane:
def __init__(self, name, provider, state, overridden, ci_test):
self.name = name
self.provider = provider
self.state = state
self.overriden = overridden
if state == 'success' and not overridden:
self.result = Result.Success
ci_test.succeeded_any = True
ci_test.succeeded_lanes.append(self)
elif state == 'success' and overridden:
self.result = Result.Overridden
elif state == 'failure':
self.result = Result.Failure
elif state == 'pending':
self.result = Result.Pending
elif state == 'error':
self.result = Result.Error
elif state == 'aborted':
self.result = Result.Aborted
else:
self.result = Result.Invalid
def main():
ob = OverrideBot()
ob.METHOD_NAME()
ob.get_ci_tests()
ob.nominate_lanes_for_override()
ob.comment_overrides()
ob.finish_time = datetime.datetime.now()
if __name__ == '__main__':
main(
|
2,364 |
assert pixels equal
|
"""Test the final, drawn results and compare PNG images pixel per pixel."""
import io
import os
from itertools import zip_longest
from PIL import Image
from ..testing_utils import FakeHTML, resource_filename
# NOTE: "r" is not half red on purpose. In the pixel strings it has
# better contrast with "B" than does "R". eg. "rBBBrrBrB" vs "RBBBRRBRB".
PIXELS_BY_CHAR = dict(
_=(255, 255, 255), # white
R=(255, 0, 0), # red
B=(0, 0, 255), # blue
G=(0, 255, 0), # lime green
V=(191, 0, 64), # average of 1*B and 3*R.
S=(255, 63, 63), # R above R above _
K=(0, 0, 0), # black
r=(255, 0, 0), # red
g=(0, 128, 0), # half green
b=(0, 0, 128), # half blue
v=(128, 0, 128), # average of B and R.
s=(255, 127, 127), # R above _
t=(127, 255, 127), # G above _
u=(128, 0, 127), # r above B above _
h=(64, 0, 64), # half average of B and R.
a=(0, 0, 254), # R in lossy JPG
p=(192, 0, 63), # R above R above B above _
z=None,
)
def parse_pixels(pixels):
lines = (line.split('#')[0].strip() for line in pixels.splitlines())
lines = tuple(line for line in lines if line)
widths = {len(line) for line in lines}
assert len(widths) == 1, 'All lines of pixels must have the same width'
width = widths.pop()
height = len(lines)
pixels = tuple(PIXELS_BY_CHAR[char] for line in lines for char in line)
return width, height, pixels
def assert_pixels(name, expected_pixels, html):
"""Helper testing the size of the image and the pixels values."""
expected_width, expected_height, expected_pixels = parse_pixels(
expected_pixels)
width, height, pixels = html_to_pixels(html)
assert (expected_width, expected_height) == (width, height), (
'Images do not have the same sizes:\n'
f'- expected: {expected_width} × {expected_height}\n'
f'- result: {width} × {height}')
METHOD_NAME(name, width, height, pixels, expected_pixels)
def assert_same_renderings(name, *documents, tolerance=0):
"""Render HTML documents to PNG and check that they're the same."""
pixels_list = []
for html in documents:
width, height, pixels = html_to_pixels(html)
pixels_list.append(pixels)
reference = pixels_list[0]
for i, pixels in enumerate(pixels_list[1:], start=1):
METHOD_NAME(
f'{name}_{i}', width, height, pixels, reference, tolerance)
def assert_different_renderings(name, *documents):
"""Render HTML documents to PNG and check that they’re different."""
pixels_list = []
for html in documents:
width, height, pixels = html_to_pixels(html)
pixels_list.append(pixels)
for i, pixels_1 in enumerate(pixels_list, start=1):
for j, pixels_2 in enumerate(pixels_list[i:], start=i+1):
if pixels_1 == pixels_2: # pragma: no cover
name_1, name_2 = f'{name}_{i}', f'{name}_{j}'
write_png(name_1, pixels_1, width, height)
assert False, f'{name_1} and {name_2} are the same'
def METHOD_NAME(name, width, height, raw, expected_raw, tolerance=0):
"""Take 2 matrices of pixels and assert that they are the same."""
if raw != expected_raw: # pragma: no cover
pixels = zip_longest(raw, expected_raw, fillvalue=(-1, -1, -1))
for i, (value, expected) in enumerate(pixels):
if expected is None:
continue
if any(abs(value - expected) > tolerance
for value, expected in zip(value, expected)):
actual_height = len(raw) // width
write_png(name, raw, width, actual_height)
expected_raw = [
pixel or (255, 255, 255) for pixel in expected_raw]
write_png(f'{name}.expected', expected_raw, width, height)
x = i % width
y = i // width
assert 0, (
f'Pixel ({x}, {y}) in {name}: '
f'expected rgba{expected}, got rgba{value}')
def write_png(basename, pixels, width, height): # pragma: no cover
"""Take a pixel matrix and write a PNG file."""
directory = os.path.join(os.path.dirname(__file__), 'results')
if not os.path.isdir(directory):
os.mkdir(directory)
filename = os.path.join(directory, f'{basename}.png')
image = Image.new('RGB', (width, height))
image.putdata(pixels)
image.save(filename)
def html_to_pixels(html):
"""Render an HTML document to PNG, checks its size and return pixel data.
Also return the document to aid debugging.
"""
document = FakeHTML(
string=html,
# Dummy filename, but in the right directory.
base_url=resource_filename('<test>'))
return document_to_pixels(document)
def document_to_pixels(document):
"""Render an HTML document to PNG, check its size and return pixel data."""
image = Image.open(io.BytesIO(document.write_png()))
return image.width, image.height, image.getdata()
|
2,365 |
main
|
# Copyright © 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved.
from io import StringIO
from os.path import dirname, join
from numpy import array
from numpy.testing import assert_array_equal
import pandas as pd
import sys
import unittest
from unittest.mock import patch
sys.path.append(join(dirname(__file__), "..")) # Needed to access examples
from examples import dataset, sim_battery_eol, ensemble, custom_model
from prog_models.datasets import nasa_cmapss, nasa_battery
"""
This file includes tests that are too long to be run as part of the automated tests. Instead, these tests are run manually as part of the release process.
"""
class TestManual(unittest.TestCase):
def setUp(self):
# set stdout (so it won't print)
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = sys.__stdout__
def test_nasa_battery_download(self):
(desc, data) = nasa_battery.load_data(1)
# Verifying desc
self.assertEqual(desc['procedure'], "Uniform random walk discharge at room temperature with variable recharge duration")
self.assertEqual(desc['description'], "Experiment consisting of repeated iteration of a randomized series of discharging pulses followed by a recharging period of variable length. Batteries are charged and discharged at room temperature")
self.assertDictEqual(desc['runs'][0], {'type': 'D', 'desc': 'low current discharge at 0.04A', 'date': '30-Dec-2013 15:53:29'})
self.assertDictEqual(desc['runs'][8532], {'type': 'R', 'desc': 'rest (random walk)', 'date': '22-Feb-2014 07:45:49'})
self.assertDictEqual(desc['runs'][-1], {'type': 'D', 'desc': 'discharge (random walk)', 'date': '02-Jun-2014 16:43:48'})
# Verifying data
assert_array_equal(data[0].columns, pd.core.indexes.base.Index(['relativeTime', 'current', 'voltage', 'temperature'], dtype='object'))
self.assertEqual(data[0]['current'][15], 0.04)
assert_array_equal(data[0].iloc[-1], array([1.8897668e+05, 4.0000000e-02, 3.2000000e+00, 1.7886300e+01]))
assert_array_equal(data[8532].iloc[0], array([1.000000e-02, 0.000000e+00, 3.645000e+00, 3.124247e+01]))
assert_array_equal(data[8532].iloc[-1], array([0.54, 0, 3.716, 31.24247]))
assert_array_equal(data[-1].iloc[0], array([0.04, 3.004, 3.647, 28.08937]))
assert_array_equal(data[-1].iloc[-1], array([178.38, 3, 3.2, 32.53947]))
def test_nasa_cmapss_download(self):
(train, test, results) = nasa_cmapss.load_data(1)
# Testing train data
assert_array_equal(train.iloc[0], array([1.00000e+00, 1.00000e+00, 2.30000e-03, 3.00000e-04, 1.00000e+02, 5.18670e+02, 6.43020e+02, 1.58529e+03, 1.39821e+03, 1.46200e+01, 2.16100e+01, 5.53900e+02, 2.38804e+03, 9.05017e+03, 1.30000e+00, 4.72000e+01, 5.21720e+02, 2.38803e+03, 8.12555e+03, 8.40520e+00, 3.00000e-02, 3.92000e+02, 2.38800e+03, 1.00000e+02, 3.88600e+01, 2.33735e+01]))
assert_array_equal(train.iloc[-1], array([1.00000e+02, 1.98000e+02, 1.30000e-03, 3.00000e-04, 1.00000e+02, 5.18670e+02, 6.42950e+02, 1.60162e+03, 1.42499e+03, 1.46200e+01, 2.16100e+01, 5.52480e+02, 2.38806e+03, 9.15503e+03, 1.30000e+00, 4.78000e+01, 5.21070e+02, 2.38805e+03, 8.21464e+03, 8.49030e+00, 3.00000e-02, 3.96000e+02, 2.38800e+03, 1.00000e+02, 3.87000e+01, 2.31855e+01]))
assert_array_equal(train.iloc[6548], array([5.20000e+01, 6.60000e+01, -1.90000e-03, -0.00000e+00, 1.00000e+02, 5.18670e+02, 6.42070e+02, 1.58397e+03, 1.39125e+03, 1.46200e+01, 2.16100e+01, 5.54590e+02, 2.38804e+03, 9.05261e+03, 1.30000e+00, 4.71200e+01, 5.22480e+02, 2.38803e+03, 8.13633e+03, 8.39150e+00, 3.00000e-02, 3.92000e+02, 2.38800e+03, 1.00000e+02, 3.90500e+01, 2.34304e+01]))
# Testing test data
assert_array_equal(test.iloc[0], array([ 1.00000e+00, 1.00000e+00, -7.00000e-04, -4.00000e-04, 1.00000e+02, 5.18670e+02, 6.41820e+02, 1.58970e+03, 1.40060e+03, 1.46200e+01, 2.16100e+01, 5.54360e+02, 2.38806e+03, 9.04619e+03, 1.30000e+00, 4.74700e+01, 5.21660e+02, 2.38802e+03, 8.13862e+03, 8.41950e+00, 3.00000e-02, 3.92000e+02, 2.38800e+03, 1.00000e+02, 3.90600e+01, 2.34190e+01]))
assert_array_equal(test.iloc[-1], array([ 1.00000e+02, 2.00000e+02, -3.20000e-03, -5.00000e-04, 1.00000e+02, 5.18670e+02, 6.43850e+02, 1.60038e+03, 1.43214e+03, 1.46200e+01, 2.16100e+01, 5.50790e+02, 2.38826e+03, 9.06148e+03, 1.30000e+00, 4.82000e+01, 5.19300e+02, 2.38826e+03, 8.13733e+03, 8.50360e+00, 3.00000e-02, 3.96000e+02, 2.38800e+03, 1.00000e+02, 3.83700e+01, 2.30522e+01]))
assert_array_equal(test.iloc[6548], array([3.30000e+01, 1.37000e+02, 1.70000e-03, 2.00000e-04, 1.00000e+02, 5.18670e+02, 6.42380e+02, 1.58655e+03, 1.41089e+03, 1.46200e+01, 2.16100e+01, 5.53960e+02, 2.38807e+03, 9.06359e+03, 1.30000e+00, 4.74500e+01, 5.21950e+02, 2.38805e+03, 8.14151e+03, 8.43050e+00, 3.00000e-02, 3.91000e+02, 2.38800e+03, 1.00000e+02, 3.90000e+01, 2.33508e+01]))
# Testing results
assert_array_equal(results, array([112., 98., 69., 82., 91., 93., 91., 95., 111., 96., 97.,
124., 95., 107., 83., 84., 50., 28., 87., 16., 57., 111.,
113., 20., 145., 119., 66., 97., 90., 115., 8., 48., 106.,
7., 11., 19., 21., 50., 142., 28., 18., 10., 59., 109.,
114., 47., 135., 92., 21., 79., 114., 29., 26., 97., 137.,
15., 103., 37., 114., 100., 21., 54., 72., 28., 128., 14.,
77., 8., 121., 94., 118., 50., 131., 126., 113., 10., 34.,
107., 63., 90., 8., 9., 137., 58., 118., 89., 116., 115.,
136., 28., 38., 20., 85., 55., 128., 137., 82., 59., 117.,
20.]))
def test_dataset_example(self):
with patch('matplotlib.pyplot.show'):
dataset.run_example()
def test_sim_battery_eol_example(self):
with patch('matplotlib.pyplot.show'):
sim_battery_eol.run_example()
def test_ensemble_example(self):
with patch('matplotlib.pyplot.show'):
ensemble.run_example()
def test_custom_model_example(self):
with patch('matplotlib.pyplot.show'):
custom_model.run_example()
# This allows the module to be executed directly
def METHOD_NAME():
load_test = unittest.TestLoader()
runner = unittest.TextTestRunner()
print("\n\nTesting Manual")
result = runner.run(load_test.loadTestsFromTestCase(TestManual)).wasSuccessful()
if not result:
raise Exception("Failed test")
if __name__ == '__main__':
METHOD_NAME()
|
2,366 |
test is url in portal
|
from Acquisition import aq_parent
from plone.base.interfaces import ILoginSchema
from plone.registry.interfaces import IRegistry
from Products.CMFCore.tests.base.dummy import DummyContent
from Products.CMFCore.tests.base.dummy import DummyFolder
from Products.CMFCore.tests.base.dummy import DummySite
from zope.component import getSiteManager
import unittest
class DummyFolder(DummyFolder):
def absolute_url(self):
return "/".join([aq_parent(self).absolute_url(), self.getId()])
class DummyLoginSettings:
allow_external_login_sites = [
"http://external1",
"http://external2/",
"http://external3/site",
"http://external4/site/",
]
class DummyRegistry(DummyContent):
def __getitem__(self, name, default=None):
if name == "plone.allow_external_login_sites":
return DummyLoginSettings().allow_external_login_sites
return default
def forInterface(self, iface, prefix=""):
if iface == ILoginSchema:
return DummyLoginSettings()
class TestURLTool(unittest.TestCase):
def setUp(self):
self.site = DummySite(id="foo")
self.site._setObject("foo", DummyFolder(id="foo"))
self.site.foo._setObject("doc1", DummyContent(id="doc1"))
mock_registry = DummyRegistry(id="portal_registry")
self.site.portal_registry = mock_registry
sm = getSiteManager()
sm.registerUtility(component=mock_registry, provided=IRegistry)
def _makeOne(self, *args, **kw):
from Products.CMFPlone.URLTool import URLTool
url_tool = URLTool(*args, **kw)
return url_tool.__of__(self.site)
def METHOD_NAME(self):
# First test what the absolute url of the site is, otherwise these
# tests look really weird. Apparently our domain is www.foobar.com.
self.assertEqual(self.site.absolute_url(), "http://www.foobar.com/bar/foo")
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP("http://www.foobar.com/bar/foo/folder"))
self.assertTrue(iURLiP("http://www.foobar.com/bar/foo"))
self.assertFalse(iURLiP("http://www.foobar.com/bar2/foo"))
self.assertTrue(iURLiP("https://www.foobar.com/bar/foo/folder"))
self.assertFalse(iURLiP("http://www.foobar.com:8080/bar/foo/folder"))
self.assertFalse(iURLiP("http://www.foobar.com/bar"))
self.assertTrue(iURLiP("//www.foobar.com/bar/foo"))
self.assertFalse(iURLiP("/images"))
self.assertTrue(iURLiP("/bar/foo/foo"))
def test_isURLInPortalRelative(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
# non-root relative urls will need a current context to be passed in
self.assertTrue(iURLiP("images/img1.jpg"))
self.assertTrue(iURLiP("./images/img1.jpg"))
# /bar/foo/something
self.assertTrue(iURLiP("../something", self.site.foo.doc1))
# /bar/afolder
self.assertFalse(iURLiP("../../afolder", self.site.foo.doc1))
# /afolder
self.assertFalse(iURLiP("../../../afolder", self.site.foo.doc1))
# /../afolder? How do we have more ../'s than there are parts in
# the URL?
self.assertFalse(iURLiP("../../../../afolder", self.site.foo.doc1))
# /bar/foo/afolder
self.assertTrue(iURLiP("../../foo/afolder", self.site.foo.doc1))
def test_isURLInPortalExternal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP("http://external1"))
self.assertTrue(iURLiP("http://external1/"))
self.assertTrue(iURLiP("http://external1/something"))
self.assertTrue(iURLiP("http://external2"))
self.assertTrue(iURLiP("http://external2/"))
self.assertTrue(iURLiP("http://external2/something"))
self.assertTrue(iURLiP("http://external3/site"))
self.assertTrue(iURLiP("http://external3/site/"))
self.assertTrue(iURLiP("http://external3/site/something"))
self.assertTrue(iURLiP("http://external4/site"))
self.assertTrue(iURLiP("http://external4/site/"))
self.assertTrue(iURLiP("http://external4/site/something"))
self.assertFalse(iURLiP("http://external3/other"))
self.assertFalse(iURLiP("http://external4/other"))
self.assertFalse(iURLiP("http://external5"))
self.assertFalse(iURLiP("http://external11"))
def test_script_tag_url_not_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP('<script>alert("hi");</script>'))
self.assertFalse(iURLiP('<sCript>alert("hi");</script>'))
self.assertFalse(iURLiP("%3Cscript%3Ealert(%22hi%22)%3B%3C%2Fscript%3E"))
self.assertFalse(iURLiP("%3CsCript%3Ealert(%22hi%22)%3B%3C%2Fscript%3E"))
def test_inline_url_not_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("javascript%3Aalert(3)"))
self.assertFalse(iURLiP("jaVascript%3Aalert(3)"))
self.assertFalse(iURLiP("javascript:alert(3)"))
self.assertFalse(iURLiP("jaVascript:alert(3)"))
def test_double_back_slash(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("\\\\www.example.com"))
def test_escape(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP(r"\/\/www.example.com"))
self.assertFalse(iURLiP(r"\%2F\%2Fwww.example.com"))
self.assertFalse(iURLiP(r"\%2f\%2fwww.example.com"))
self.assertFalse(iURLiP("%2F%2Fwww.example.com"))
self.assertFalse(iURLiP("%2f%2fwww.example.com"))
def test_regression_absolute_url_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP(url_tool()))
self.assertTrue(iURLiP(url_tool() + "/shrubbery?knights=ni#ekki-ekki"))
def test_mailto_simple_not_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("mailto:[email protected]"))
def test_mailto_complex_not_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(
iURLiP(
"mailto:192.168.163.154:8080/Plone'"
""><html><svg onload=alert(document"
".domain)></html>"
)
)
def test_data_not_in_portal(self):
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(
iURLiP("data:text/html%3bbase64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K")
)
def test_double_slash(self):
# I wondered if this might be a problem after reading
# https://bugs.python.org/issue23505
# Apparently not, but let's test it.
url_tool = self._makeOne()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("//www.google.com"))
self.assertFalse(iURLiP("////www.google.com"))
|
2,367 |
test simple
|
#!/usr/bin/env python
import os
import re
import stat
import subprocess
import pytest
from subprocess import Popen
from pathlib import Path
import lief
from lief.ELF import Segment
from utils import get_sample, has_recent_glibc, is_linux, is_x86_64, is_aarch64
is_updated_linux = pytest.mark.skipif(not (is_linux() and is_x86_64() and has_recent_glibc()),
reason="needs a recent x86-64 Linux system")
is_linux_x64 = pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="needs a Linux x86-64")
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
CWD = Path(__file__).parent
@is_updated_linux
def METHOD_NAME(tmp_path: Path):
sample_path = get_sample('ELF/ELF64_x86-64_binary_ls.bin')
stub = lief.parse((CWD / "hello_lief.bin").as_posix())
output = tmp_path / "ls.segment"
target = lief.parse(sample_path)
for _ in range(4):
segment = stub.segments[0]
original_va = segment.virtual_address
segment.virtual_address = 0
segment = target.add(segment)
new_ep = (stub.header.entrypoint - original_va) + segment.virtual_address
target.header.entrypoint = new_ep
target.write(output.as_posix())
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
with Popen(output.as_posix(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as P:
stdout = P.stdout.read().decode("utf8")
print(stdout)
assert re.search(r'LIEF is Working', stdout) is not None
@is_updated_linux
def test_gcc(tmp_path: Path):
sample_path = get_sample('ELF/ELF64_x86-64_binary_gcc.bin')
stub = lief.parse((CWD / "hello_lief.bin").as_posix())
output = tmp_path / "gcc.segment"
target = lief.parse(sample_path)
segment = stub.segments[0]
original_va = segment.virtual_address
segment.virtual_address = 0
segment = target.add(segment)
new_ep = (stub.header.entrypoint - original_va) + segment.virtual_address
target.header.entrypoint = new_ep
target.write(output.as_posix())
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
with Popen(output.as_posix(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as P:
stdout = P.stdout.read().decode("utf8")
print(stdout)
assert re.search(r'LIEF is Working', stdout) is not None
@is_linux_x64
def test_static(tmp_path: Path):
sample_path = get_sample('ELF/ELF64_x86-64_binary_static-binary.bin')
stub = lief.parse((CWD / "hello_lief.bin").as_posix())
output = tmp_path / "static.segment"
target = lief.parse(sample_path)
segment = stub.segments[0]
original_va = segment.virtual_address
segment.virtual_address = 0
segment = target.add(segment)
new_ep = (stub.header.entrypoint - original_va) + segment.virtual_address
target.header.entrypoint = new_ep
target.write(output.as_posix())
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
with Popen(output.as_posix(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as P:
stdout = P.stdout.read().decode("utf8")
print(stdout)
assert re.search(r'LIEF is Working', stdout) is not None
@pytest.mark.skipif(not is_linux(), reason="needs a Linux system")
@pytest.mark.parametrize("binpath", [
'/usr/bin/ls', '/bin/ls',
'/usr/bin/ssh', '/usr/bin/nm',
'/usr/bin/openssl', '/usr/bin/bc',
'/usr/bin/bzip2', '/bin/bzip2',
'/usr/bin/cp', '/bin/cp',
'/usr/bin/find', '/usr/bin/file',
])
def test_add_segment(tmp_path: Path, binpath):
target = Path(binpath)
if not target.is_file():
print(f"{target} does not exists. Skip!")
return
stub = None
if is_x86_64():
stub = lief.parse((CWD / "hello_lief.bin").as_posix())
elif is_aarch64():
stub = lief.parse((CWD / "hello_lief_aarch64.bin").as_posix())
name = target.name
target = lief.parse(target.as_posix())
output = tmp_path / f"{name}.segment"
for _ in range(6):
stub_segment = stub.segments[0]
segment = lief.ELF.Segment()
segment.content = stub.segments[0].content
segment.type = stub_segment.type
segment.alignment = stub_segment.alignment
segment.flags = stub_segment.flags
new_segment = target.add(segment)
new_ep = (stub.header.entrypoint - stub.imagebase - stub_segment.file_offset) + new_segment.virtual_address
target.header.entrypoint = new_ep
target.write(output.as_posix())
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
with Popen(output.as_posix(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as P:
stdout = P.stdout.read().decode("utf8")
print(stdout)
assert re.search(r'LIEF is Working', stdout) is not None
|
2,368 |
real np types
|
import numpy as np
from numba.core.compiler import compile_isolated
from numba import jit
from numba.core import types
from numba.tests.support import TestCase, tag
import unittest
def dobool(a):
return bool(a)
def doint(a):
return int(a)
def dofloat(a):
return float(a)
def docomplex(a):
return complex(a)
def docomplex2(a, b):
return complex(a, b)
def complex_calc(a):
z = complex(a)
return z.real ** 2 + z.imag ** 2
def complex_calc2(a, b):
z = complex(a, b)
return z.real ** 2 + z.imag ** 2
def converter(tp):
def f(a):
return tp(a)
return f
def METHOD_NAME():
for tp_name in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'intc', 'uintc', 'intp', 'uintp',
'float32', 'float64', 'bool_'):
yield tp_name
def complex_np_types():
for tp_name in ('complex64', 'complex128'):
yield tp_name
class TestScalarNumberCtor(TestCase):
"""
Test <number class>(some scalar)
"""
def check_int_constructor(self, pyfunc):
x_types = [
types.boolean, types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 0, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_bool(self):
self.check_int_constructor(dobool)
def test_int(self):
self.check_int_constructor(doint)
def test_float(self):
pyfunc = dofloat
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(x), cfunc(x),
prec='single' if ty is types.float32 else 'exact')
def test_complex(self):
pyfunc = docomplex
x_types = [
types.int32, types.int64, types.float32, types.float64,
types.complex64, types.complex128,
]
x_values = [1, 1000, 12.2, 23.4, 1.5-5j, 1-4.75j]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
got = cfunc(x)
expected = pyfunc(x)
self.assertPreciseEqual(pyfunc(x), cfunc(x),
prec='single' if ty is types.float32 else 'exact')
# Check that complex(float32) really creates a complex64,
# by checking the accuracy of computations.
pyfunc = complex_calc
x = 1.0 + 2**-50
cres = compile_isolated(pyfunc, [types.float32])
cfunc = cres.entry_point
self.assertPreciseEqual(cfunc(x), 1.0)
# Control (complex128)
cres = compile_isolated(pyfunc, [types.float64])
cfunc = cres.entry_point
self.assertGreater(cfunc(x), 1.0)
def test_complex2(self):
pyfunc = docomplex2
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
y_values = [x - 3 for x in x_values]
for ty, x, y in zip(x_types, x_values, y_values):
cres = compile_isolated(pyfunc, [ty, ty])
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(x, y), cfunc(x, y),
prec='single' if ty is types.float32 else 'exact')
# Check that complex(float32, float32) really creates a complex64,
# by checking the accuracy of computations.
pyfunc = complex_calc2
x = 1.0 + 2**-50
cres = compile_isolated(pyfunc, [types.float32, types.float32])
cfunc = cres.entry_point
self.assertPreciseEqual(cfunc(x, x), 2.0)
# Control (complex128)
cres = compile_isolated(pyfunc, [types.float64, types.float32])
cfunc = cres.entry_point
self.assertGreater(cfunc(x, x), 2.0)
def check_type_converter(self, tp, np_type, values):
pyfunc = converter(tp)
cfunc = jit(nopython=True)(pyfunc)
if issubclass(np_type, np.integer):
# Converting from a Python int to a small Numpy int on 32-bit
# builds can raise "OverflowError: Python int too large to
# convert to C long". Work around by going through a large
# Numpy int first.
np_converter = lambda x: np_type(np.int64(x))
else:
np_converter = np_type
dtype = np.dtype(np_type)
for val in values:
if dtype.kind == 'u' and isinstance(val, float) and val < 0.0:
# Converting negative float to unsigned int yields undefined
# behaviour (and concretely different on ARM vs. x86)
continue
expected = np_converter(val)
got = cfunc(val)
self.assertPreciseEqual(got, expected,
msg="for type %s with arg %s" % (np_type, val))
def check_number_types(self, tp_factory):
values = [0, 1, -1, 100003, 10000000000007, -100003, -10000000000007,
1.5, -3.5]
for tp_name in METHOD_NAME():
np_type = getattr(np, tp_name)
tp = tp_factory(tp_name)
self.check_type_converter(tp, np_type, values)
values.append(1.5+3j)
for tp_name in complex_np_types():
np_type = getattr(np, tp_name)
tp = tp_factory(tp_name)
self.check_type_converter(tp, np_type, values)
def test_numba_types(self):
"""
Test explicit casting to Numba number types.
"""
def tp_factory(tp_name):
return getattr(types, tp_name)
self.check_number_types(tp_factory)
def test_numpy_types(self):
"""
Test explicit casting to Numpy number types.
"""
def tp_factory(tp_name):
return getattr(np, tp_name)
self.check_number_types(tp_factory)
class TestArrayNumberCtor(TestCase):
"""
Test <number class>(some sequence)
"""
def check_type_constructor(self, np_type, values):
pyfunc = converter(np_type)
cfunc = jit(nopython=True)(pyfunc)
for val in values:
expected = np_type(val)
got = cfunc(val)
self.assertPreciseEqual(got, expected)
def test_1d(self):
values = [
(1.0, 2.5),
(1, 2.5),
[1.0, 2.5],
(),
]
for tp_name in METHOD_NAME():
np_type = getattr(np, tp_name)
self.check_type_constructor(np_type, values)
values = [
(1j, 2.5),
[1.0, 2.5],
]
for tp_name in complex_np_types():
np_type = getattr(np, tp_name)
self.check_type_constructor(np_type, values)
def test_2d(self):
values = [
((1.0, 2.5), (3.5, 4)),
[(1.0, 2.5), (3.5, 4.0)],
([1.0, 2.5], [3.5, 4.0]),
[(), ()],
]
for tp_name in METHOD_NAME():
np_type = getattr(np, tp_name)
self.check_type_constructor(np_type, values)
for tp_name in complex_np_types():
np_type = getattr(np, tp_name)
self.check_type_constructor(np_type, values)
if __name__ == '__main__':
unittest.main()
|
2,369 |
drop block2d
|
import torch
import torch.fx
import torch.nn.functional as F
from torch import nn, Tensor
from ..utils import _log_api_usage_once
def METHOD_NAME(
input: Tensor, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06, training: bool = True
) -> Tensor:
"""
Implements DropBlock2d from `"DropBlock: A regularization method for convolutional networks"
<https://arxiv.org/abs/1810.12890>`.
Args:
input (Tensor[N, C, H, W]): The input tensor or 4-dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): Probability of an element to be dropped.
block_size (int): Size of the block to drop.
inplace (bool): If set to ``True``, will do this operation in-place. Default: ``False``.
eps (float): A value added to the denominator for numerical stability. Default: 1e-6.
training (bool): apply dropblock if is ``True``. Default: ``True``.
Returns:
Tensor[N, C, H, W]: The randomly zeroed tensor after dropblock.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(METHOD_NAME)
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}.")
if input.ndim != 4:
raise ValueError(f"input should be 4 dimensional. Got {input.ndim} dimensions.")
if not training or p == 0.0:
return input
N, C, H, W = input.size()
block_size = min(block_size, W, H)
# compute the gamma of Bernoulli distribution
gamma = (p * H * W) / ((block_size**2) * ((H - block_size + 1) * (W - block_size + 1)))
noise = torch.empty((N, C, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device)
noise.bernoulli_(gamma)
noise = F.pad(noise, [block_size // 2] * 4, value=0)
noise = F.max_pool2d(noise, stride=(1, 1), kernel_size=(block_size, block_size), padding=block_size // 2)
noise = 1 - noise
normalize_scale = noise.numel() / (eps + noise.sum())
if inplace:
input.mul_(noise).mul_(normalize_scale)
else:
input = input * noise * normalize_scale
return input
def drop_block3d(
input: Tensor, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06, training: bool = True
) -> Tensor:
"""
Implements DropBlock3d from `"DropBlock: A regularization method for convolutional networks"
<https://arxiv.org/abs/1810.12890>`.
Args:
input (Tensor[N, C, D, H, W]): The input tensor or 5-dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): Probability of an element to be dropped.
block_size (int): Size of the block to drop.
inplace (bool): If set to ``True``, will do this operation in-place. Default: ``False``.
eps (float): A value added to the denominator for numerical stability. Default: 1e-6.
training (bool): apply dropblock if is ``True``. Default: ``True``.
Returns:
Tensor[N, C, D, H, W]: The randomly zeroed tensor after dropblock.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(drop_block3d)
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}.")
if input.ndim != 5:
raise ValueError(f"input should be 5 dimensional. Got {input.ndim} dimensions.")
if not training or p == 0.0:
return input
N, C, D, H, W = input.size()
block_size = min(block_size, D, H, W)
# compute the gamma of Bernoulli distribution
gamma = (p * D * H * W) / ((block_size**3) * ((D - block_size + 1) * (H - block_size + 1) * (W - block_size + 1)))
noise = torch.empty(
(N, C, D - block_size + 1, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device
)
noise.bernoulli_(gamma)
noise = F.pad(noise, [block_size // 2] * 6, value=0)
noise = F.max_pool3d(
noise, stride=(1, 1, 1), kernel_size=(block_size, block_size, block_size), padding=block_size // 2
)
noise = 1 - noise
normalize_scale = noise.numel() / (eps + noise.sum())
if inplace:
input.mul_(noise).mul_(normalize_scale)
else:
input = input * noise * normalize_scale
return input
torch.fx.wrap("drop_block2d")
class DropBlock2d(nn.Module):
"""
See :func:`drop_block2d`.
"""
def __init__(self, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06) -> None:
super().__init__()
self.p = p
self.block_size = block_size
self.inplace = inplace
self.eps = eps
def forward(self, input: Tensor) -> Tensor:
"""
Args:
input (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
return METHOD_NAME(input, self.p, self.block_size, self.inplace, self.eps, self.training)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, block_size={self.block_size}, inplace={self.inplace})"
return s
torch.fx.wrap("drop_block3d")
class DropBlock3d(DropBlock2d):
"""
See :func:`drop_block3d`.
"""
def __init__(self, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06) -> None:
super().__init__(p, block_size, inplace, eps)
def forward(self, input: Tensor) -> Tensor:
"""
Args:
input (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
return drop_block3d(input, self.p, self.block_size, self.inplace, self.eps, self.training)
|
2,370 |
schedule registration reminder
|
from django.conf import settings
from django.db.models.signals import post_delete, post_save
from django.utils import timezone
from events.models import Event, EventRegistration
from members.models import Member
from utils.models.signals import suspendingreceiver
from ..models import Category, EventStartReminderMessage, RegistrationReminderMessage
@suspendingreceiver(
post_save, sender=Event, dispatch_uid="schedule_event_start_reminder"
)
def schedule_event_start_reminder(sender, instance, **kwargs):
"""Create, update or delete a scheduled start reminder for the event if necessary."""
message = getattr(instance, "start_reminder", None)
if not instance.published:
# Remove existing not-sent notification if the event isn't published.
if message is not None and not message.sent:
instance.start_reminder = None
message.delete()
else:
reminder_time = instance.start - timezone.timedelta(hours=1)
# Delete reminder if the event is changed so that the reminder time has now passed.
if (
message is not None
and message.time != reminder_time
and reminder_time < timezone.now()
):
instance.start_reminder = None
message.delete()
return
# Don't update if the message has already been sent or the reminder time has passed.
if (message is not None and message.sent) or reminder_time < timezone.now():
return
if message is None:
message = EventStartReminderMessage(event=instance)
message.title = "Event"
message.body = f"'{instance.title}' starts in 1 hour"
message.url = f"{settings.BASE_URL}{instance.get_absolute_url()}"
message.category = Category.objects.get(key=Category.EVENT)
message.time = reminder_time
message.save()
if instance.registration_required:
message.users.set([r.member for r in instance.participants if r.member])
else:
message.users.set(Member.current_members.all())
@suspendingreceiver(
post_save, sender=Event, dispatch_uid="schedule_registration_reminder"
)
def METHOD_NAME(sender, instance, **kwargs):
"""Create, update or delete a registration reminder for the event if necessary."""
message = getattr(instance, "registration_reminder", None)
if not instance.published or not instance.registration_required:
# Remove existing not-sent notification if the event
# isn't published or registration isn't required.
if message is not None and not message.sent:
instance.registration_reminder = None
message.delete()
else:
reminder_time = instance.registration_start - timezone.timedelta(hours=1)
# Delete reminder if the event is changed so that the reminder time has now passed.
if (
message is not None
and message.time != reminder_time
and reminder_time < timezone.now()
):
instance.registration_reminder = None
message.delete()
return
# Don't update if the message has already been sent or the reminder time has passed.
if (message is not None and message.sent) or reminder_time < timezone.now():
return
if message is None:
message = RegistrationReminderMessage(event=instance)
message.title = "Event registration"
message.body = f"Registration for '{instance.title}' starts in 1 hour"
message.url = f"{settings.BASE_URL}{instance.get_absolute_url()}"
message.category = Category.objects.get(key=Category.EVENT)
message.time = reminder_time
message.save()
message.users.set(Member.current_members.all())
@suspendingreceiver(
post_save,
sender=EventRegistration,
dispatch_uid="update_event_start_reminder_users_on_registration_save",
)
def update_event_start_reminder_users_on_registration_save(sender, instance, **kwargs):
"""Add or remove the member from the event start reminder."""
message = getattr(instance.event, "start_reminder", None)
if message is None or message.sent:
return
if instance.member is not None:
if instance.event.registration_required:
if instance.date_cancelled:
message.users.remove(instance.member)
else:
message.users.add(instance.member)
@suspendingreceiver(
post_delete,
sender=EventRegistration,
dispatch_uid="update_event_start_reminder_users_on_registration_delete",
)
def update_event_start_reminder_users_on_registration_delete(
sender, instance, **kwargs
):
"""Remove the member from the event start reminder if registration is required."""
message = getattr(instance.event, "start_reminder", None)
if message is None or message.sent:
return
if instance.member is not None:
if instance.event.registration_required:
message.users.remove(instance.member)
|
2,371 |
resnet calibration reader
|
#
# Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import torch
from onnxruntime.quantization.calibrate import CalibrationDataReader
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import CIFAR10
import onnx
import onnxruntime
from onnxruntime.quantization import CalibrationDataReader, QuantType, QuantFormat, CalibrationMethod, quantize_static
import vai_q_onnx
class CIFAR10DataSet:
def __init__(
self,
data_dir,
**kwargs,
):
super().__init__()
self.train_path = data_dir
self.vld_path = data_dir
self.setup("fit")
def setup(self, stage: str):
transform = transforms.Compose(
[transforms.Pad(4), transforms.RandomHorizontalFlip(), transforms.RandomCrop(32), transforms.ToTensor()]
)
self.train_dataset = CIFAR10(root=self.train_path, train=True, transform=transform, download=False)
self.val_dataset = CIFAR10(root=self.vld_path, train=True, transform=transform, download=False)
class PytorchResNetDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
sample = self.dataset[index]
input_data = sample[0]
label = sample[1]
return input_data, label
def create_dataloader(data_dir, batch_size):
cifar10_dataset = CIFAR10DataSet(data_dir)
_, val_set = torch.utils.data.random_split(cifar10_dataset.val_dataset, [49000, 1000])
benchmark_dataloader = DataLoader(PytorchResNetDataset(val_set), batch_size=batch_size, drop_last=True)
return benchmark_dataloader
class ResnetCalibrationDataReader(CalibrationDataReader):
def __init__(self, data_dir: str, batch_size: int = 16):
super().__init__()
self.iterator = iter(create_dataloader(data_dir, batch_size))
def get_next(self) -> dict:
try:
images, labels = next(self.iterator)
return {"input": images.numpy()}
except Exception:
return None
def METHOD_NAME(data_dir, batch_size=16):
return ResnetCalibrationDataReader(data_dir, batch_size=batch_size)
def main():
# `input_model_path` is the path to the original, unquantized ONNX model.
input_model_path = "models/resnet_trained_for_cifar10.onnx"
# `output_model_path` is the path where the quantized model will be saved.
output_model_path = "models/resnet.qdq.U8S8.onnx"
# `calibration_dataset_path` is the path to the dataset used for calibration during quantization.
calibration_dataset_path = "data/"
# `dr` (Data Reader) is an instance of ResNet50DataReader, which is a utility class that
# reads the calibration dataset and prepares it for the quantization process.
dr = METHOD_NAME(calibration_dataset_path)
# `quantize_static` is a function that applies static quantization to the model.
# The parameters of this function are:
# - `input_model_path`: the path to the original, unquantized model.
# - `output_model_path`: the path where the quantized model will be saved.
# - `dr`: an instance of a data reader utility, which provides data for model calibration.
# - `quant_format`: the format of quantization operators. Need to set to QDQ or QOperator.
# - `activation_type`: the data type of activation tensors after quantization. In this case, it's QUInt8 (Quantized Unsigned Int 8).
# - `weight_type`: the data type of weight tensors after quantization. In this case, it's QInt8 (Quantized Int 8).
vai_q_onnx.quantize_static(
input_model_path,
output_model_path,
dr,
quant_format=QuantFormat.QDQ,
calibrate_method=vai_q_onnx.PowerOfTwoMethod.MinMSE,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QInt8,
)
print('Calibrated and quantized model saved at:', output_model_path)
if __name__ == '__main__':
main()
|
2,372 |
merge candidates
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Encode using wordpiece models.
Implements the segmentation algorithm described in the last paragraph of
p. 5150, in the following publication:
M. Schuster and K. Nakajima, "Japanese and Korean voice
search," 2012 IEEE International Conference on Acoustics,
Speech and Signal Processing, 2012
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf
"""
import lingvo.compat as tf
from lingvo.core import ops
from lingvo.core import py_utils
import six
# Must be a large ID.
NO_TOKEN = 1 << 31 - 1
NO_TOKEN_STRING = '<unk>'
SENTENCE_START_STRING = '<s>'
SENTENCE_END_STRING = '</s>'
BOW_STR = '▁'
class WpmEncoder:
"""WPM encoder."""
def __init__(self, wpm_filepath, merge_prob=1.):
"""Create a WPM encoder.
Args:
wpm_filepath: a path to the file containing the vocabulary.
merge_prob: the probability of merging tokens while encoding.
"""
# Load vocabulary file.
lines = py_utils.ReadFileLines(wpm_filepath)
self._pieces = []
for line in lines:
if isinstance(line, bytes):
line = six.ensure_text(line, 'utf-8')
piece = line.strip().split('\t')[0]
self._pieces.append(piece)
self._merge_prob = merge_prob
def _TokenToString(self, token):
return ops.vocab_id_to_token(token, vocab=self._pieces)
def _StringToToken(self, tokstr):
return tf.where(
ops.token_in_vocab(tokstr, vocab=self._pieces),
ops.vocab_token_to_id(tokstr, vocab=self._pieces),
tf.broadcast_to(NO_TOKEN, tf.shape(tokstr)))
def _MergeTokens(self, tokens):
return self._StringToToken(
self._TokenToString(tokens[0]) + self._TokenToString(tokens[1]))
def _EncodeToIds(self, word):
# Below:
# * a token is a wordpiece ID.
# * the tokens array will be merged in-place.
# * the candidates array is an array of size len(tokens) - 1.
# It contains the token for the merged wordpiece, if it exists,
# -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]).
# First, split into basic UTF-8 characters (letters).
chars = tf.strings.unicode_split(word, 'UTF-8')
tokens = self._StringToToken(chars)
tokens = tf.where(
tf.equal(tokens, NO_TOKEN),
# Unseen character.
tf.broadcast_to(self.unk_id, tf.shape(tokens)),
tokens)
# Create initial candidate list.
candidates = tf.map_fn(
self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype)
def _ShouldMerge(unused_tokens, candidates):
"""Merge until not possible, or we abort early according to merge_prob."""
return tf.math.logical_and(
tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)),
tf.random.uniform([]) < self._merge_prob)
def _MergeOneToken(tokens, i):
return tf.expand_dims(
self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1)
def METHOD_NAME(tokens, candidates):
"""Merge in the reverse binary tree."""
best_id = tf.argmin(candidates, output_type=tf.int32)
# Perform the merge at position best_id.
tokens = tf.concat(
[tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]],
axis=0)
# Recompute the merge candidates.
# Only the neighbors of best_id need to be recomputed.
empty = tf.zeros([0], dtype=candidates.dtype)
def _MergeLeft():
return tf.concat(
[candidates[:best_id - 1],
_MergeOneToken(tokens, best_id - 1)],
axis=0)
left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft)
def _MergeRight():
return tf.concat(
[_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0)
right_candidates = tf.cond(
tf.greater_equal(best_id,
tf.size(tokens) - 1), lambda: empty, _MergeRight)
candidates = tf.concat([left_candidates, right_candidates], axis=0)
return tokens, candidates
return tf.while_loop(
_ShouldMerge,
METHOD_NAME, (tokens, candidates),
parallel_iterations=1,
back_prop=False)[0]
def Encode(self, text):
"""Converts string `text` to integer ids and the encoded string.
Encoding includes prefixing the beginning-of-word token to each word.
Returns:
(ids, tokens) where ids is the encoded integer ids and tokens is the
encoded string.
"""
words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0]
num_words = tf.size(words)
ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True)
def _WordsToIds(i, words, ids_ta):
encoded_ids = self._EncodeToIds(BOW_STR + words[i])
ids_ta = ids_ta.scatter(
tf.range(ids_ta.size(),
ids_ta.size() + tf.size(encoded_ids)), encoded_ids)
return i + 1, words, ids_ta
_, _, ids_ta = tf.while_loop(
lambda i, *_: i < num_words,
_WordsToIds,
loop_vars=(tf.constant(0, tf.int32), words, ids_ta),
parallel_iterations=30,
back_prop=False)
ids = ids_ta.stack()
return ids, self._TokenToString(ids)
def Decode(self, ids):
txt = tf.strings.reduce_join(self._TokenToString(ids))
txt = tf.strings.regex_replace(txt, BOW_STR, ' ')
# Note that this strips spaces from the end of the input as well.
# We assume no inputs rely on the existence of trailing whitespace.
txt = tf.strings.strip(txt)
return txt
@property
def sentence_start_id(self):
return self._pieces.index(SENTENCE_START_STRING)
@property
def sentence_start_string(self):
return SENTENCE_START_STRING
@property
def sentence_end_id(self):
return self._pieces.index(SENTENCE_END_STRING)
@property
def sentence_end_string(self):
return SENTENCE_END_STRING
@property
def unk_id(self):
return self._pieces.index(NO_TOKEN_STRING)
|
2,373 |
test is ipaddress false
|
from __future__ import annotations
import ssl
import typing
from unittest import mock
import pytest
from urllib3.exceptions import ProxySchemeUnsupported, SSLError
from urllib3.util import ssl_
class TestSSL:
@pytest.mark.parametrize(
"addr",
[
# IPv6
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
# IPv4
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
# IPv6 w/ Zone IDs
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(self, addr: bytes | str) -> None:
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def METHOD_NAME(self, addr: bytes | str) -> None:
assert not ssl_.is_ipaddress(addr)
def test_create_urllib3_context_set_ciphers(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
ciphers = "ECDH+AESGCM:ECDH+CHACHA20"
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(ciphers)
def test_create_urllib3_no_context(self) -> None:
with mock.patch("urllib3.util.ssl_.SSLContext", None):
with pytest.raises(TypeError):
ssl_.create_urllib3_context()
def test_wrap_socket_given_context_no_load_default_certs(self) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
def test_wrap_socket_given_ca_certs_no_load_default_certs(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None, None)
def test_wrap_socket_default_loads_default_certs(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
def test_wrap_socket_no_ssltransport(self) -> None:
with mock.patch("urllib3.util.ssl_.SSLTransport", None):
with pytest.raises(ProxySchemeUnsupported):
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, tls_in_tls=True)
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(
self,
monkeypatch: pytest.MonkeyPatch,
pha: bool | None,
expected_pha: bool | None,
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
def test_create_urllib3_context_default_ciphers(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
ssl_.create_urllib3_context()
context.set_ciphers.assert_not_called()
@pytest.mark.parametrize(
"kwargs",
[
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_maximum_version": ssl.TLSVersion.TLSv1,
},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
"ssl_maximum_version": ssl.TLSVersion.MAXIMUM_SUPPORTED,
},
],
)
def test_create_urllib3_context_ssl_version_and_ssl_min_max_version_errors(
self, kwargs: dict[str, typing.Any]
) -> None:
with pytest.raises(ValueError) as e:
ssl_.create_urllib3_context(**kwargs)
assert str(e.value) == (
"Can't specify both 'ssl_version' and either 'ssl_minimum_version' or 'ssl_maximum_version'"
)
@pytest.mark.parametrize(
"kwargs",
[
{
"ssl_version": ssl.PROTOCOL_TLS,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": ssl.PROTOCOL_TLS_CLIENT,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
{
"ssl_version": None,
"ssl_minimum_version": ssl.TLSVersion.MINIMUM_SUPPORTED,
},
],
)
def test_create_urllib3_context_ssl_version_and_ssl_min_max_version_no_warning(
self, kwargs: dict[str, typing.Any]
) -> None:
ssl_.create_urllib3_context(**kwargs)
@pytest.mark.parametrize(
"kwargs",
[
{"ssl_version": ssl.PROTOCOL_TLSv1, "ssl_minimum_version": None},
{"ssl_version": ssl.PROTOCOL_TLSv1, "ssl_maximum_version": None},
{
"ssl_version": ssl.PROTOCOL_TLSv1,
"ssl_minimum_version": None,
"ssl_maximum_version": None,
},
],
)
def test_create_urllib3_context_ssl_version_and_ssl_min_max_version_no_error(
self, kwargs: dict[str, typing.Any]
) -> None:
with pytest.warns(
DeprecationWarning,
match=r"'ssl_version' option is deprecated and will be removed in "
r"urllib3 v2\.1\.0\. Instead use 'ssl_minimum_version'",
):
ssl_.create_urllib3_context(**kwargs)
def test_assert_fingerprint_raises_exception_on_none_cert(self) -> None:
with pytest.raises(SSLError):
ssl_.assert_fingerprint(
cert=None, fingerprint="55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"
)
|
2,374 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVideoAnalyzerResult',
'AwaitableGetVideoAnalyzerResult',
'get_video_analyzer',
'get_video_analyzer_output',
]
@pulumi.output_type
class GetVideoAnalyzerResult:
"""
A Video Analyzer account.
"""
def __init__(__self__, encryption=None, endpoints=None, id=None, identity=None, location=None, name=None, storage_accounts=None, system_data=None, tags=None, METHOD_NAME=None):
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if endpoints and not isinstance(endpoints, list):
raise TypeError("Expected argument 'endpoints' to be a list")
pulumi.set(__self__, "endpoints", endpoints)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_accounts and not isinstance(storage_accounts, list):
raise TypeError("Expected argument 'storage_accounts' to be a list")
pulumi.set(__self__, "storage_accounts", storage_accounts)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def encryption(self) -> 'outputs.AccountEncryptionResponse':
"""
The account encryption properties.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def endpoints(self) -> Sequence['outputs.EndpointResponse']:
"""
The list of endpoints associated with this resource.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.VideoAnalyzerIdentityResponse']:
"""
The set of managed identities associated with the Video Analyzer resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccounts")
def storage_accounts(self) -> Sequence['outputs.StorageAccountResponse']:
"""
The storage accounts for this resource.
"""
return pulumi.get(self, "storage_accounts")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system data of the Video Analyzer account.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetVideoAnalyzerResult(GetVideoAnalyzerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVideoAnalyzerResult(
encryption=self.encryption,
endpoints=self.endpoints,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
storage_accounts=self.storage_accounts,
system_data=self.system_data,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_video_analyzer(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVideoAnalyzerResult:
"""
Get the details of the specified Video Analyzer account
:param str account_name: The Video Analyzer account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:videoanalyzer/v20210501preview:getVideoAnalyzer', __args__, opts=opts, typ=GetVideoAnalyzerResult).value
return AwaitableGetVideoAnalyzerResult(
encryption=pulumi.get(__ret__, 'encryption'),
endpoints=pulumi.get(__ret__, 'endpoints'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
storage_accounts=pulumi.get(__ret__, 'storage_accounts'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_video_analyzer)
def get_video_analyzer_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVideoAnalyzerResult]:
"""
Get the details of the specified Video Analyzer account
:param str account_name: The Video Analyzer account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,375 |
parse simple
|
from __future__ import annotations
import contextlib
import os
import re
import urllib.parse
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import TypeVar
from typing import Union
from typing import cast
from poetry.core.packages.dependency import Dependency
from tomlkit.items import InlineTable
from poetry.packages.direct_origin import DirectOrigin
if TYPE_CHECKING:
from poetry.core.packages.vcs_dependency import VCSDependency
from poetry.utils.cache import ArtifactCache
from poetry.utils.env import Env
DependencySpec = Dict[str, Union[str, bool, Dict[str, Union[str, bool]], List[str]]]
BaseSpec = TypeVar("BaseSpec", DependencySpec, InlineTable)
GIT_URL_SCHEMES = {"git+http", "git+https", "git+ssh"}
def dependency_to_specification(
dependency: Dependency, specification: BaseSpec
) -> BaseSpec:
if dependency.is_vcs():
dependency = cast("VCSDependency", dependency)
assert dependency.source_url is not None
specification[dependency.vcs] = dependency.source_url
if dependency.reference:
specification["rev"] = dependency.reference
elif dependency.is_file() or dependency.is_directory():
assert dependency.source_url is not None
specification["path"] = dependency.source_url
elif dependency.is_url():
assert dependency.source_url is not None
specification["url"] = dependency.source_url
elif dependency.pretty_constraint != "*" and not dependency.constraint.is_empty():
specification["version"] = dependency.pretty_constraint
if not dependency.marker.is_any():
specification["markers"] = str(dependency.marker)
if dependency.extras:
specification["extras"] = sorted(dependency.extras)
return specification
class RequirementsParser:
def __init__(
self,
*,
artifact_cache: ArtifactCache,
env: Env | None = None,
cwd: Path | None = None,
) -> None:
self._direct_origin = DirectOrigin(artifact_cache)
self._env = env
self._cwd = cwd or Path.cwd()
def parse(self, requirement: str) -> DependencySpec:
requirement = requirement.strip()
specification = self._parse_pep508(requirement)
if specification is not None:
return specification
extras = []
extras_m = re.search(r"\[([\w\d,-_ ]+)\]$", requirement)
if extras_m:
extras = [e.strip() for e in extras_m.group(1).split(",")]
requirement, _ = requirement.split("[")
specification = (
self._parse_url(requirement)
or self._parse_path(requirement)
or self.METHOD_NAME(requirement)
)
if specification:
if extras and "extras" not in specification:
specification["extras"] = extras
return specification
raise ValueError(f"Invalid dependency specification: {requirement}")
def _parse_pep508(self, requirement: str) -> DependencySpec | None:
if " ; " not in requirement and re.search(r"@[\^~!=<>\d]", requirement):
# this is of the form package@<semver>, do not attempt to parse it
return None
with contextlib.suppress(ValueError):
dependency = Dependency.create_from_pep_508(requirement)
specification: DependencySpec = {}
specification = dependency_to_specification(dependency, specification)
if specification:
specification["name"] = dependency.name
return specification
return None
def _parse_git_url(self, requirement: str) -> DependencySpec | None:
from poetry.core.vcs.git import Git
from poetry.core.vcs.git import ParsedUrl
parsed = ParsedUrl.parse(requirement)
url = Git.normalize_url(requirement)
pair = {"name": parsed.name, "git": url.url}
if parsed.rev:
pair["rev"] = url.revision
if parsed.subdirectory:
pair["subdirectory"] = parsed.subdirectory
source_root = self._env.path.joinpath("src") if self._env else None
package = self._direct_origin.get_package_from_vcs(
"git",
url=url.url,
rev=pair.get("rev"),
subdirectory=parsed.subdirectory,
source_root=source_root,
)
pair["name"] = package.name
return pair
def _parse_url(self, requirement: str) -> DependencySpec | None:
url_parsed = urllib.parse.urlparse(requirement)
if not (url_parsed.scheme and url_parsed.netloc):
return None
if url_parsed.scheme in GIT_URL_SCHEMES:
return self._parse_git_url(requirement)
if url_parsed.scheme in ["http", "https"]:
package = self._direct_origin.get_package_from_url(requirement)
assert package.source_url is not None
return {"name": package.name, "url": package.source_url}
return None
def _parse_path(self, requirement: str) -> DependencySpec | None:
if (os.path.sep in requirement or "/" in requirement) and (
self._cwd.joinpath(requirement).exists()
or Path(requirement).expanduser().exists()
and Path(requirement).expanduser().is_absolute()
):
path = Path(requirement).expanduser()
is_absolute = path.is_absolute()
if not path.is_absolute():
path = self._cwd.joinpath(requirement)
if path.is_file():
package = self._direct_origin.get_package_from_file(path.resolve())
else:
package = self._direct_origin.get_package_from_directory(path.resolve())
return {
"name": package.name,
"path": (
path.relative_to(self._cwd).as_posix()
if not is_absolute
else path.as_posix()
),
}
return None
def METHOD_NAME(
self,
requirement: str,
) -> DependencySpec | None:
extras: list[str] = []
pair = re.sub(
"^([^@=: ]+)(?:@|==|(?<![<>~!])=|:| )(.*)$", "\\1 \\2", requirement
)
pair = pair.strip()
require: DependencySpec = {}
if " " in pair:
name, version = pair.split(" ", 1)
extras_m = re.search(r"\[([\w\d,-_]+)\]$", name)
if extras_m:
extras = [e.strip() for e in extras_m.group(1).split(",")]
name, _ = name.split("[")
require["name"] = name
if version != "latest":
require["version"] = version
else:
m = re.match(
r"^([^><=!: ]+)((?:>=|<=|>|<|!=|~=|~|\^).*)$", requirement.strip()
)
if m:
name, constraint = m.group(1), m.group(2)
extras_m = re.search(r"\[([\w\d,-_]+)\]$", name)
if extras_m:
extras = [e.strip() for e in extras_m.group(1).split(",")]
name, _ = name.split("[")
require["name"] = name
require["version"] = constraint
else:
extras_m = re.search(r"\[([\w\d,-_]+)\]$", pair)
if extras_m:
extras = [e.strip() for e in extras_m.group(1).split(",")]
pair, _ = pair.split("[")
require["name"] = pair
if extras:
require["extras"] = extras
return require
|
2,376 |
test traffic violation max statute
|
"""
Rules for 800 Level charges are as follows:
800 level misdemeanors and felonies are eligible Only If the case was dismissed
800 level cases of any kind that are convicted are not eligible
800 level infractions do not block other cases
800 level misdemeanor and felony convictions do block
800 level misdemeanor and felony arrests block like other arrests
800 level convictions of any kind are not type eligible
"""
from expungeservice.models.charge_types.dismissed_charge import DismissedCharge
from expungeservice.models.disposition import DispositionCreator
from expungeservice.models.expungement_result import EligibilityStatus
from expungeservice.models.charge_types.traffic_violation import TrafficViolation
from expungeservice.models.charge_types.duii import DivertedDuii
from tests.factories.charge_factory import ChargeFactory
from tests.models.test_charge import Dispositions
# TODO: we can separate these three types to different test files too.
def test_traffic_violation_min_statute():
charge = ChargeFactory.create(statute="801.000", level="Violation")
assert isinstance(charge.charge_type, TrafficViolation)
def METHOD_NAME():
charge = ChargeFactory.create(statute="825.999", level="Violation")
assert isinstance(charge.charge_type, TrafficViolation)
def test_convicted_violation_is_not_type_eligible():
charge = ChargeFactory.create(
statute="801.000", level="Class C Traffic Violation", disposition=Dispositions.CONVICTED
)
assert isinstance(charge.charge_type, TrafficViolation)
assert charge.type_eligibility.status is EligibilityStatus.INELIGIBLE
assert charge.type_eligibility.reason == "Ineligible under 137.225(7)(a)"
assert not charge.charge_type.blocks_other_charges
def test_dismissed_violation_is_not_type_eligible():
charge = ChargeFactory.create(
statute="801.000", level="Class C Traffic Violation", disposition=Dispositions.DISMISSED
)
assert isinstance(charge.charge_type, TrafficViolation)
assert charge.type_eligibility.status is EligibilityStatus.NEEDS_MORE_ANALYSIS
assert (
charge.type_eligibility.reason
== "Dismissed violations are eligible under 137.225(1)(b) but administrative reasons may make this difficult to expunge."
)
assert not charge.charge_type.blocks_other_charges
def test_convicted_infraction_is_not_type_eligible():
charge = ChargeFactory.create(statute="811135", level="Infraction Class B", disposition=Dispositions.CONVICTED)
assert isinstance(charge.charge_type, TrafficViolation)
assert charge.type_eligibility.status is EligibilityStatus.INELIGIBLE
assert charge.type_eligibility.reason == "Ineligible under 137.225(7)(a)"
assert not charge.charge_type.blocks_other_charges
def test_dismissed_infraction_is_not_type_eligible():
charge = ChargeFactory.create(statute="811135", level="Infraction Class B", disposition=Dispositions.DISMISSED)
assert isinstance(charge.charge_type, TrafficViolation)
assert charge.type_eligibility.status is EligibilityStatus.NEEDS_MORE_ANALYSIS
assert (
charge.type_eligibility.reason
== "Dismissed violations are eligible under 137.225(1)(b) but administrative reasons may make this difficult to expunge."
)
assert not charge.charge_type.blocks_other_charges
def test_traffic_infraction_without_statute():
charge = ChargeFactory.create(level="Infraction Class B")
assert isinstance(charge.charge_type, TrafficViolation)
def test_old_traffic_statute():
charge = ChargeFactory.create(
statute="483050", name="Defective Equipment", level="Infraction Class B", disposition=Dispositions.DISMISSED
)
assert isinstance(charge.charge_type, TrafficViolation)
assert charge.type_eligibility.status is EligibilityStatus.NEEDS_MORE_ANALYSIS
assert (
charge.type_eligibility.reason
== "Dismissed violations are eligible under 137.225(1)(b) but administrative reasons may make this difficult to expunge."
)
assert not charge.charge_type.blocks_other_charges
"""
800 level misdemeanors and felonies are eligible Only If the case was dismissed
"""
def test_misdemeanor_conviction_is_not_eligible():
charge = ChargeFactory.create(statute="814.010(4)", level="Misdemeanor Class A", disposition=Dispositions.CONVICTED)
assert charge.type_eligibility.status is EligibilityStatus.INELIGIBLE
assert charge.type_eligibility.reason == "Ineligible under 137.225(7)(a)"
assert charge.charge_type.blocks_other_charges
def test_misdemeanor_dismissal_is_eligible():
charge = ChargeFactory.create(statute="814.010(4)", level="Misdemeanor Class A", disposition=Dispositions.DISMISSED)
assert charge.type_eligibility.status is EligibilityStatus.ELIGIBLE
assert charge.type_eligibility.reason == "Dismissals are generally eligible under 137.225(1)(d)"
assert charge.charge_type.blocks_other_charges
def test_felony_conviction_is_not_eligible():
charge = ChargeFactory.create(statute="819.300", level="Felony Class C", disposition=Dispositions.CONVICTED)
assert charge.type_eligibility.status is EligibilityStatus.INELIGIBLE
assert charge.type_eligibility.reason == "Ineligible under 137.225(7)(a)"
assert charge.charge_type.blocks_other_charges
def test_felony_dismissal_is_eligible():
charge = ChargeFactory.create(statute="819.300", level="Felony Class C", disposition=Dispositions.DISMISSED)
assert charge.type_eligibility.status is EligibilityStatus.ELIGIBLE
assert charge.type_eligibility.reason == "Dismissals are generally eligible under 137.225(1)(d)"
assert charge.charge_type.blocks_other_charges
def test_duii():
charges = ChargeFactory.create_ambiguous_charge(statute="813.010", disposition=Dispositions.DISMISSED)
assert isinstance(charges[0].charge_type, DivertedDuii)
assert isinstance(charges[1].charge_type, DismissedCharge)
def test_pedestrian_jwalking():
charge = ChargeFactory.create(name="Pedestrian J-Walking", statute="1634020", level="Infraction Unclassified")
assert isinstance(charge.charge_type, TrafficViolation)
|
2,377 |
fieldvalue test1
|
import os
import sys
from shapeworks import *
import numpy as np
success = True
def METHOD_NAME():
dist = Mesh(os.environ["DATA"] + "/meshdistance2.vtk")
a = dist.getFieldValue("distance", 0)
b = dist.getFieldValue("distance", 1000)
c = dist.getFieldValue("distance", dist.numPoints()-1)
return abs(a - 0.375761) < 1e-4 and abs(b - 2.18114) < 1e-4 and abs(c - 6.915) < 1e-4
success &= utils.test(METHOD_NAME)
def fieldvalueTest2():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
a = mesh.getFieldValue("scalars", 0)
b = mesh.getFieldValue("scalars", 1000)
c = mesh.getFieldValue("Normals", 4231) # returns first value of vector fields
d = mesh.getFieldValue("Normals", 5634)
return (abs(a - 0.35219) < 1e-4 and
abs(b - 0.46825) < 1e-4 and
abs(c - 0.85125) < 1e-4 and
abs(d - -0.47862) < 1e-4)
success &= utils.test(fieldvalueTest2)
def multifieldvalueTest():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
a = mesh.getMultiFieldValue("Normals", 1024)
c = mesh.getMultiFieldValue("Normals", 768)
return (np.allclose(a, np.array([ 0.21653531, 0.34913558, -0.91171086])) and
np.allclose(c, np.array([ 0., 0., -1. ])))
success &= utils.test(multifieldvalueTest)
def fieldrangeTest():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
scalarRange = range(mesh.getField("scalars", Mesh.Point))
return abs(scalarRange[0] - -4.21119) < 1e-4 and abs(scalarRange[1] - 4.52366) < 1e-4
success &= utils.test(fieldrangeTest)
def missingfieldTest():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = range(mesh.getField("nonexistent_fieldname", Mesh.Point))
success &= utils.expectException(missingfieldTest, ValueError)
def getfieldTest1():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
field = mesh.getField("scalars", Mesh.Point)
return abs(field[640] - -0.91761) < 1e-4 and abs(field[4800] - 0.56277) < 1e-4
success &= utils.test(getfieldTest1)
def getfieldTest2():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
field = mesh.getField("scalars", Mesh.Point)
return field.shape == (36599,)
success &= utils.test(getfieldTest2)
def getfieldTest3():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
return field.shape == (530, 3)
success &= utils.test(getfieldTest3)
def getfieldTest4():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
return field.strides == (12, 4)
success &= utils.test(getfieldTest4)
def getfieldTest5():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
return field.flags["OWNDATA"] == False
success &= utils.test(getfieldTest5)
def getfieldTest6():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
return field.flags["C_CONTIGUOUS"] == True and field.flags["F_CONTIGUOUS"] == False
success &= utils.test(getfieldTest6)
def getfieldTest7():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
field = mesh.getField("scalars", Mesh.Point)
return field[3456] == mesh.getFieldValue("scalars", 3456) # (it's 0.1789221419275439)
success &= utils.test(getfieldTest7)
def getfieldTest8():
mesh = Mesh(os.environ["DATA"] + "/mesh1.vtk")
field = mesh.getField("scalars", Mesh.Point)
origval = field[6543]
field[6543] = 42
return origval != mesh.getFieldValue("scalars", 6543) and mesh.getFieldValue("scalars", 6543) == 42
success &= utils.test(getfieldTest8)
def setfieldTest1():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
mesh.setField("newfieldname", field, Mesh.Point) # python doesn't own field, so it can't be transferred
success &= utils.expectException(setfieldTest1, ValueError)
def setfieldTest2():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = mesh.getField("Normals", Mesh.Point)
mesh.setField("newfieldname", field.copy(), Mesh.Point)
return "newfieldname" in mesh.getFieldNames()
success &= utils.test(setfieldTest2)
def setfieldTest3():
mesh = Mesh(os.environ["DATA"] + "/ellipsoid_01.vtk")
field = np.zeros(1000)
assert(field.flags["OWNDATA"] == True)
mesh.setField("newfieldname", field, Mesh.Point)
return field.flags["OWNDATA"] == False and "newfieldname" in mesh.getFieldNames()
success &= utils.test(setfieldTest3)
sys.exit(not success)
|
2,378 |
selected repositories
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Mauricio Martinez <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import Attribute, NotSet
from github.PaginatedList import PaginatedList
from github.Repository import Repository
from github.Variable import Variable
class OrganizationVariable(Variable):
"""
This class represents a org level GitHub variable. The reference can be found here https://docs.github.com/en/rest/actions/variables
"""
def _initAttributes(self) -> None:
self._name: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._visibility: Attribute[str] = NotSet
self._selected_repositories: Attribute[PaginatedList[Repository]] = NotSet
self._selected_repositories_url: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
@property
def visibility(self) -> str:
"""
:type: string
"""
self._completeIfNotSet(self._visibility)
return self._visibility.value
@property
def METHOD_NAME(self) -> PaginatedList[Repository]:
return PaginatedList(
Repository,
self._requester,
self._selected_repositories_url.value,
None,
list_item="repositories",
)
def edit(
self,
value: str,
visibility: str = "all",
) -> bool:
"""
:calls: `PATCH /orgs/{org}/actions/variables/{variable_name} <https://docs.github.com/en/rest/reference/actions/variables#update-an-organization-variable>`_
:param variable_name: string
:param value: string
:param visibility: string
:rtype: bool
"""
assert isinstance(value, str), value
assert isinstance(visibility, str), visibility
patch_parameters: Dict[str, Any] = {
"name": self.name,
"value": value,
"visibility": visibility,
}
status, _, _ = self._requester.requestJson(
"PATCH",
f"{self.url}/actions/variables/{self.name}",
input=patch_parameters,
)
return status == 204
def add_repo(self, repo: Repository) -> bool:
"""
:calls: 'PUT {org_url}/actions/variables/{variable_name} <https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("PUT", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def remove_repo(self, repo: Repository) -> bool:
"""
:calls: 'DELETE {org_url}/actions/variables/{variable_name} <https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("DELETE", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "name" in attributes:
self._name = self._makeStringAttribute(attributes["name"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes:
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "visibility" in attributes:
self._visibility = self._makeStringAttribute(attributes["visibility"])
if "selected_repositories_url" in attributes:
self._selected_repositories_url = self._makeStringAttribute(attributes["selected_repositories_url"])
if "url" in attributes:
self._url = self._makeStringAttribute(attributes["url"])
|
2,379 |
save image
|
# This file is part of Trackma.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import threading
import urllib.request
from io import BytesIO
from gi.repository import GLib, GdkPixbuf, Gtk
from trackma import utils
try:
from PIL import Image
imaging_available = True
except ImportError:
imaging_available = False
class ImageThread(threading.Thread):
def __init__(self, url, filename, width, height, callback):
threading.Thread.__init__(self)
self._url = url
self._filename = filename
self._width = width
self._height = height
self._callback = callback
self._stop_request = threading.Event()
def run(self):
self.METHOD_NAME(self._download_file())
if self._stop_request.is_set():
return
if os.path.exists(self._filename):
GLib.idle_add(self._callback, self._filename)
def _download_file(self):
request = urllib.request.Request(self._url)
request.add_header(
"User-Agent", "TrackmaImage/{}".format(utils.VERSION))
return BytesIO(urllib.request.urlopen(request).read())
def METHOD_NAME(self, img_bytes):
if imaging_available:
image = Image.open(img_bytes)
image.thumbnail((self._width, self._height), Image.BICUBIC)
image.convert("RGB").save(self._filename)
else:
with open(self._filename, 'wb') as img_file:
img_file.write(img_bytes.read())
def stop(self):
self._stop_request.set()
class ImageBox(Gtk.HBox):
def __init__(self, width, height):
Gtk.HBox.__init__(self)
self._width = width
self._height = height
self._image = Gtk.Image()
self._image.set_size_request(width, height)
self._label_holder = Gtk.Label()
self._label_holder.set_size_request(width, height)
self._image_thread = None
if imaging_available:
self.pack_start(self._label_holder, False, False, 0)
self.pack_start(self._image, False, False, 0)
else:
self.pack_start(self._label_holder, False, False, 0)
self.reset()
def reset(self):
if imaging_available:
self.set_image(utils.DATADIR + '/icon.png')
else:
self.set_text("PIL library\nnot available")
def set_text(self, text):
self._label_holder.set_text(text)
self._label_holder.show()
self._image.hide()
def set_image(self, filename):
if not imaging_available:
return
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
width, height = scale(pixbuf.get_width(),
pixbuf.get_height(), self._width, self._height)
scaled_buf = pixbuf.scale_simple(
width, height, GdkPixbuf.InterpType.BILINEAR)
self._image.set_from_pixbuf(scaled_buf)
self._image.show()
self._label_holder.hide()
def set_image_remote(self, url, filename):
if not imaging_available:
return
if self._image_thread:
self._image_thread.stop()
self.set_text("Loading...")
self._image_thread = ImageThread(
url, filename, self._width, self._height, self.set_image)
self._image_thread.start()
def scale(w, h, x, y, maximum=True):
nw = y * w / h
nh = x * h / w
if maximum ^ (nw >= x):
return nw or 1, y
return x, nh or 1
|
2,380 |
outputs format
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from functools import partial
from django.utils.translation import ugettext_lazy as _
from .execute_task_base import JobExecuteTaskServiceBase
from ..base import GetJobHistoryResultMixin, get_job_tagged_ip_dict_complex
from pipeline.component_framework.component import Component
from pipeline.core.flow.io import StringItemSchema
from gcloud.conf import settings
from gcloud.utils.handlers import handle_api_error
__group_name__ = _("作业平台(JOB)")
get_client_by_user = settings.ESB_GET_CLIENT_BY_USER
job_handle_api_error = partial(handle_api_error, __group_name__)
class JobExecuteTaskService(JobExecuteTaskServiceBase, GetJobHistoryResultMixin):
need_get_sops_var = True
need_is_tagged_ip = True
def METHOD_NAME(self):
return super().METHOD_NAME() + [
self.OutputItem(
name=_("JOB执行IP分组"),
key="job_tagged_ip_dict",
type="string",
schema=StringItemSchema(
description=_(
'按照执行结果将 IP 进行分组:1. 使用 job_tagged_ip_dict["value"]["SUCCESS"]["TAGS"]["ALL"] 获取「执行成功」的 IP, '
"ALL 代表所有 IP,可指定分组名获取特定分组的 IP ;"
'2. 使用 job_tagged_ip_dict["value"]["SCRIPT_NOT_ZERO_EXIT_CODE"]["TAGS"]["ALL"]'
" 获取「脚本返回值非零」的 IP"
)
),
),
]
def is_need_log_outputs_even_fail(self, data):
"""
默认开启失败时提取变量
"""
return True
def check_ip_is_exist(self, data):
"""
默认不校验IP
"""
return False
def build_ip_list(self, biz_across, val, executor, biz_cc_id, data, ip_is_exist):
result, ip_list = self.get_target_server_hybrid(executor, biz_cc_id, data, val, logger_handle=self.logger)
if not result:
return {}
return ip_list
def get_tagged_ip_dict(self, data, parent_data, job_instance_id):
"""
默认使用ip新版分组
"""
result, tagged_ip_dict = get_job_tagged_ip_dict_complex(
data.outputs.client,
self.logger,
job_instance_id,
data.get_one_of_inputs("biz_cc_id", parent_data.inputs.biz_cc_id),
job_scope_type=self.biz_scope_type,
)
return result, tagged_ip_dict
def execute(self, data, parent_data):
job_success_id = data.get_one_of_inputs("job_success_id")
if not job_success_id:
return super().execute(data, parent_data)
history_result = self.get_job_history_result(data, parent_data)
self.logger.info(history_result)
if history_result:
self.__need_schedule__ = False
return history_result
class JobExecuteTaskComponent(Component):
name = _("执行作业")
code = "job_execute_task"
bound_service = JobExecuteTaskService
form = "%scomponents/atoms/job/execute_task/v1_2.js" % settings.STATIC_URL
output_form = "%scomponents/atoms/job/job_execute_task_output.js" % settings.STATIC_URL
version = "1.2"
desc = _(
"1.当用户选择JOB成功历史后,插件将不再创建新的JOB实例,直接继承JOB成功状态. \n"
"2.在接收到用户编辑的全局变量后,v1.0及v1.1会默认用英文双引号将默认变量值包裹起来,再将得到的字符串作为一个整体在调用API时进行传参。\n"
"如果不需要双引号包裹,可以使用legacy或v1.2版本插件,也可以手动在表格中去掉。\n"
"3. 去除IP存在性校验,默认开启新版IP tag分组, 默认开启失败时提取变量,job成功历史调整为只在重试时显示"
)
|
2,381 |
get node config
|
import h5py
import json
from nndct_shared.nndct_graph.base_tensor import Tensor
def save_graph(nndct_graph, hdf5_path='graph.hdf5'):
GraphHDF5Saver(nndct_graph).save(hdf5_path)
class GraphHDF5Saver():
def __init__(self, nndct_graph):
self.graph = nndct_graph
def METHOD_NAME(self, node):
node_info = dict()
node_info['idx'] = node.idx
node_info['name'] = node.name
node_info['dtype'] = str(node.dtype)
for idx, tensor in enumerate(node.in_tensors):
node_info['in_tensors{}.name'.format(idx)] = tensor.name
node_info['in_tensors{}.shape'.format(idx)] = tensor.shape
node_info['in_tensors{}.dtype'.format(idx)] = tensor.dtype
for idx, tensor in enumerate(node.out_tensors):
node_info['out_tensors{}.name'.format(idx)] = tensor.name
node_info['out_tensors{}.shape'.format(idx)] = tensor.shape
node_info['out_tensors{}.dtype'.format(idx)] = tensor.dtype
for attr_enum, attr in node.op.attrs.items():
if isinstance(attr.value, Tensor):
continue
elif isinstance(attr.value, (tuple, list)):
has_tensor = False
for val in attr.value:
if isinstance(val, Tensor):
has_tensor = True
break
if not has_tensor:
node_info['Attr.{}'.format(attr_enum.name)] = attr.value
else:
node_info['Attr.{}'.format(attr_enum.name)] = attr.value
return node_info
def get_model_config(self):
model_config = {'name': self.graph.name}
model_config['layers'] = list()
for node in self.graph.nodes:
node_info = dict()
node_info['class_name'] = node.op_type
node_info['name'] = node.name
node_info['inbound_nodes'] = [[[i, 0, 0, {}] for i in node.in_nodes]]
node_info['config'] = self.METHOD_NAME(node)
model_config['layers'].append(node_info)
return model_config
def save(self, hdf5_path):
config = self.get_model_config()
model_config = {'class_name': 'Functional', 'config': config}
metadata = dict(model_config=model_config)
f = h5py.File(hdf5_path, mode='w')
try:
for k, v in metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(v).encode('utf8')
else:
f.attrs[k] = v
f.flush()
finally:
f.close()
class GraphConvertToCFG():
# Visualizing network structure with multiple inputs is not supported.
def __init__(self, nndct_graph, cfg_savepath='test.cfg'):
self.graph = nndct_graph
self.cfg_path = cfg_savepath
self.content = []
def read_node(self, node):
self.content.append('name={}'.format(node.name))
self.content.append('scope_name={}'.format(node.scope_name))
self.content.append('idx={}'.format(str(node.idx)))
self.content.append('dtype={}'.format(node.dtype))
self.content.append('in_nodes={}'.format(str(node.in_nodes)))
self.content.append('out_nodes={}'.format(str(node.out_nodes)))
for idx, tensor in enumerate(node.in_tensors):
self.content.append('in_tensors{}.name={}'.format(str(idx), tensor.name))
self.content.append('in_tensors{}.shape={}'.format(
str(idx), str(tensor.shape)))
self.content.append('in_tensors{}.dtype={}'.format(
str(idx), str(tensor.dtype)))
for idx, tensor in enumerate(node.out_tensors):
self.content.append('out_tensors{}.name={}'.format(str(idx), tensor.name))
self.content.append('out_tensors{}.shape={}'.format(
str(idx), str(tensor.shape)))
self.content.append('out_tensors{}.dtype={}'.format(
str(idx), str(tensor.dtype)))
for name, attr in node.op.attrs.items():
self.content.append('op_{}={}'.format(name, attr.value))
def convert(self):
# Traverse every node in graph sequentially once. And all input nodes of the current node must have been traversed before.
index = 0
nodename_index = {}
last_nodename = None
is_first_layer = True
for node in self.graph.nodes:
nodename = node.name
op_type = node.op.type
if is_first_layer:
self.content.append('height={}'.format(node.out_tensors[0].shape[1]))
self.content.append('width={}'.format(node.out_tensors[0].shape[2]))
self.content.append('channels={}'.format(node.out_tensors[0].shape[3]))
self.read_node(node)
is_first_layer = False
last_nodename = nodename
continue
num_innodes = len(node.in_nodes)
nodename_index[nodename] = index
index += 1
if num_innodes == 0:
self.content.append('[{}]'.format(op_type))
self.read_node(node)
elif num_innodes == 1:
in_nodename = node.in_nodes[0]
if in_nodename == last_nodename:
self.content.append('[{}]'.format(op_type))
self.read_node(node)
else:
self.content.append('[route]')
self.content.append('layers={}'.format(
str(nodename_index[in_nodename])))
nodename_index[nodename] = index
index += 1
self.content.append('[{}]'.format(op_type))
self.read_node(node)
else:
self.content.append('[route]')
str_layers = 'layers='
for i in range(len(node.in_nodes)):
if i == 0:
str_layers += str(nodename_index[node.in_nodes[i]])
else:
str_layers += ',' + str(nodename_index[node.in_nodes[i]])
self.content.append(str_layers)
self.content.append('op_type={}'.format(op_type))
self.read_node(node)
last_nodename = nodename
with open(self.cfg_path, 'w') as f:
f.write('[net]' + '\n')
f.writelines(line + '\n' for line in self.content)
|
2,382 |
setup model loss criterion
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
import unittest
from multiprocessing import Manager
import torch
import torch.nn as nn
from fairseq import distributed_utils, optim
from omegaconf import OmegaConf
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def METHOD_NAME(cfg, args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
cfg.distributed_training.distributed_rank = args.distributed_rank
if cfg.distributed_training.distributed_world_size > 1:
distributed_utils.distributed_init(cfg)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(
cfg=cfg.bmuf,
optimizer=optimizer
)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer, **unused):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(cfg, args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = METHOD_NAME(cfg, args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.model_parallel_size = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
cfg = OmegaConf.create()
cfg.optimization = OmegaConf.create()
cfg.common = OmegaConf.create()
cfg.distributed_training = OmegaConf.create()
cfg.dataset = OmegaConf.create()
cfg.bmuf = OmegaConf.create()
cfg.optimizer = OmegaConf.create()
cfg.bmuf.global_sync_iter = args.global_sync_iter
cfg.bmuf.block_momentum = args.block_momentum
cfg.bmuf.block_lr = args.block_lr
cfg.dataset.batch_size = args.batch_size
cfg.optimization.lr = args.lr
cfg.optimizer.momentum = args.momentum
cfg.optimizer.weight_decay = args.weight_decay
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.use_nbm = args.use_nbm
cfg.bmuf.average_sync = args.average_sync
cfg.common.model_parallel_size = args.model_parallel_size
cfg.distributed_training.distributed_backend = args.distributed_backend
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.distributed_training.distributed_init_method = args.distributed_init_method
cfg.distributed_training.distributed_port = args.distributed_port
return cfg, args
@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs")
class TestBMUF(unittest.TestCase):
def bmuf_process(self, cfg, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context("spawn")
for rank in range(args.distributed_world_size):
p = ctx.Process(
target=single_gpu_training, args=(cfg, args, rank, iterations, results)
)
p.start()
processes.append(p)
for p in processes:
p.join()
return results
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
cfg, args = setup_args()
iterations = 1
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
cfg, args = setup_args()
args.warmup_iterations = 20
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
cfg, args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.global_sync_iter = args.global_sync_iter
iterations = 25
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_single_gpu_bmuf(self):
# Train model for 5 iterations and use GPU 1
cfg, args = setup_args()
args.distributed_world_size = 1
args.warmup_iterations = 5
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert len(results) == 1
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == "__main__":
unittest.main()
|
2,383 |
normalize analyzer type
|
import logging
import re
from pathlib import Path
from pprint import pformat
from typing import Union, Sequence
import cpyMSpec
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
logger = logging.getLogger(__name__)
PolarityType = Literal['positive', 'negative']
ANALYZER_TYPES = ('tof', 'orbitrap', 'ft-icr')
AnalyzerType = Literal['tof', 'orbitrap', 'ft-icr']
DefaultType = Literal['default']
DEFAULT: DefaultType = 'default'
DB_ROOT = Path(__file__).parent / 'dbs'
MATRIX_DBS = [f.stem for f in DB_ROOT.glob('matrix_*.csv')]
PARSED_MATRIX_DBS = sorted([matrix_db.split('_')[1:] for matrix_db in MATRIX_DBS])
MATRIXES = set(matrix for matrix, polarity in PARSED_MATRIX_DBS)
FORMATTED_MATRIXES = ', '.join(
f'{matrix} ({"/".join(pols[::-1])})'
for matrix in MATRIXES
for pols in [[p for m, p in PARSED_MATRIX_DBS if m == matrix]]
)
BUILTIN_DBS = {
'hmdb': DB_ROOT / 'HMDB-v4.csv',
'cm3': DB_ROOT / 'CoreMetabolome-v3.csv',
'lipid_maps': DB_ROOT / 'lipidmaps_2017-12-12-v2.tsv',
**{matrix_db: DB_ROOT / f'{matrix_db}.csv' for matrix_db in MATRIX_DBS},
}
def METHOD_NAME(analyzer) -> AnalyzerType:
"""Detects analyzer type from a string and returns an MSIWarp-compatible analyzer string"""
analyzer = (analyzer or '').lower()
if 'orbitrap' in analyzer:
return 'orbitrap'
if any(phrase in analyzer for phrase in ['fticr', 'ft-icr', 'ftms', 'ft-ms']):
return 'ft-icr'
if 'tof' in analyzer:
return 'tof'
raise AssertionError(f'Unrecognized analyzer type "{analyzer}"')
def _default_adducts(polarity, source):
if polarity.lower() == 'positive':
if source.lower() == 'maldi':
return ['', '+H', '+Na', '+K']
else:
return ['', '+H', '+Na', '-Cl', '+NH4']
else:
if source.lower() == 'maldi':
return ['', '-H', '+Cl']
else:
return ['', '-H', '+Cl', '+HCO2']
class RecalParams:
def __init__(
self,
analyzer: str = 'orbitrap',
source: str = 'maldi',
matrix: str = DEFAULT,
polarity: PolarityType = 'positive',
rp: float = 140000.0,
base_mz: float = 200.0,
peak_width_ppm: Union[float, DefaultType] = DEFAULT,
jitter_ppm: float = 3.0,
adducts: Union[Sequence[str], DefaultType] = DEFAULT,
profile_mode: bool = False,
dbs: Union[Sequence[Union[Path, str]], DefaultType] = DEFAULT,
targeted_dbs: Union[Sequence[Union[Path, str]], DefaultType] = (),
transforms: Union[Sequence[Sequence[str]], DefaultType] = DEFAULT,
):
from msi_recal.math import ppm_to_sigma_1 # Avoid circular import
assert polarity in ('positive', 'negative'), f'Invalid polarity "{polarity}"'
self.analyzer = analyzer = METHOD_NAME(analyzer)
self.rp = rp
self.base_mz = base_mz
if peak_width_ppm is DEFAULT:
self.peak_width_ppm = 15 if profile_mode else 0
else:
self.peak_width_ppm = peak_width_ppm
self.peak_width_sigma_1 = ppm_to_sigma_1(self.peak_width_ppm, analyzer, base_mz)
self.jitter_ppm = jitter_ppm
self.jitter_sigma_1 = ppm_to_sigma_1(self.jitter_ppm, analyzer, base_mz)
if adducts is DEFAULT:
adducts = _default_adducts(polarity, source)
if matrix is DEFAULT:
if 'maldi' in source.lower():
matrix = {'positive': 'dhb', 'negative': 'dan'}.get(polarity.lower())
else:
matrix = None
if dbs is DEFAULT:
dbs = ['cm3']
if matrix is not None and matrix.lower() != 'none':
for mat in re.split('[,;/|]', matrix):
norm_mat = mat.lower().strip()
norm_mat = {'norharmane': 'nor'}.get(norm_mat, norm_mat)
matrix_db = f'matrix_{norm_mat}_{polarity[:3]}'
if matrix_db in BUILTIN_DBS:
dbs.append(matrix_db)
else:
logger.warning(
f'No peak database available for matrix {mat}. Supported MALDI matrices:'
+ FORMATTED_MATRIXES
)
self.charge = {'positive': 1, 'negative': -1}[polarity]
self.adducts = adducts
self.profile_mode = profile_mode
self.db_paths = [Path(BUILTIN_DBS.get(db, db)) for db in dbs]
for db_path in self.db_paths:
assert db_path.exists(), f'{db_path} not found'
self.targeted_dbs = [Path(BUILTIN_DBS.get(db, db)) for db in targeted_dbs]
if transforms is DEFAULT:
transforms = [
['align_msiwarp', '5', '1', '0.2'],
['recal_ransac', '50'],
# ['recal_msiwarp', '20', '4', '0.1'],
]
self.transforms = transforms
if analyzer == 'ft-icr':
self.instrument_model = cpyMSpec.InstrumentModel('fticr', rp, base_mz)
else:
self.instrument_model = cpyMSpec.InstrumentModel(analyzer, rp, base_mz)
def __repr__(self):
return 'RecalParams ' + pformat(self.__dict__, sort_dicts=False)
|
2,384 |
get insight selectors
|
"""Handles incoming cloudtrail requests, invokes methods, returns responses."""
import json
from typing import Any, Dict
from moto.core.responses import BaseResponse
from .models import cloudtrail_backends, CloudTrailBackend
from .exceptions import InvalidParameterCombinationException
class CloudTrailResponse(BaseResponse):
"""Handler for CloudTrail requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="cloudtrail")
@property
def cloudtrail_backend(self) -> CloudTrailBackend:
"""Return backend instance specific for this region."""
return cloudtrail_backends[self.current_account][self.region]
def create_trail(self) -> str:
name = self._get_param("Name")
bucket_name = self._get_param("S3BucketName")
is_global = self._get_bool_param("IncludeGlobalServiceEvents", True)
is_multi_region = self._get_bool_param("IsMultiRegionTrail", False)
if not is_global and is_multi_region:
raise InvalidParameterCombinationException(
"Multi-Region trail must include global service events."
)
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
log_validation = self._get_bool_param("EnableLogFileValidation", False)
is_org_trail = self._get_bool_param("IsOrganizationTrail", False)
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
tags_list = self._get_param("TagsList", [])
trail = self.cloudtrail_backend.create_trail(
name,
bucket_name,
s3_key_prefix,
sns_topic_name,
is_global,
is_multi_region,
log_validation,
is_org_trail,
cw_log_group_arn,
cw_role_arn,
kms_key_id,
tags_list,
)
return json.dumps(trail.description())
def get_trail(self) -> str:
name = self._get_param("Name")
trail = self.cloudtrail_backend.get_trail(name)
return json.dumps({"Trail": trail.description()})
def get_trail_status(self) -> str:
name = self._get_param("Name")
status = self.cloudtrail_backend.get_trail_status(name)
return json.dumps(status.description())
def describe_trails(self) -> str:
include_shadow_trails = self._get_bool_param("includeShadowTrails", True)
trails = self.cloudtrail_backend.describe_trails(include_shadow_trails)
return json.dumps(
{"trailList": [t.description(include_region=True) for t in trails]}
)
def list_trails(self) -> str:
all_trails = self.cloudtrail_backend.list_trails()
return json.dumps({"Trails": [t.short() for t in all_trails]})
def start_logging(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.start_logging(name)
return json.dumps({})
def stop_logging(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.stop_logging(name)
return json.dumps({})
def delete_trail(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.delete_trail(name)
return json.dumps({})
def update_trail(self) -> str:
name = self._get_param("Name")
s3_bucket_name = self._get_param("S3BucketName")
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
include_global_service_events = self._get_param("IncludeGlobalServiceEvents")
is_multi_region_trail = self._get_param("IsMultiRegionTrail")
enable_log_file_validation = self._get_param("EnableLogFileValidation")
is_organization_trail = self._get_param("IsOrganizationTrail")
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
trail = self.cloudtrail_backend.update_trail(
name=name,
s3_bucket_name=s3_bucket_name,
s3_key_prefix=s3_key_prefix,
sns_topic_name=sns_topic_name,
include_global_service_events=include_global_service_events,
is_multi_region_trail=is_multi_region_trail,
enable_log_file_validation=enable_log_file_validation,
is_organization_trail=is_organization_trail,
cw_log_group_arn=cw_log_group_arn,
cw_role_arn=cw_role_arn,
kms_key_id=kms_key_id,
)
return json.dumps(trail.description())
def put_event_selectors(self) -> str:
params = json.loads(self.body)
trail_name = params.get("TrailName")
event_selectors = params.get("EventSelectors")
advanced_event_selectors = params.get("AdvancedEventSelectors")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.put_event_selectors(
trail_name=trail_name,
event_selectors=event_selectors,
advanced_event_selectors=advanced_event_selectors,
)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def get_event_selectors(self) -> str:
params = json.loads(self.body)
trail_name = params.get("TrailName")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.get_event_selectors(trail_name=trail_name)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def add_tags(self) -> str:
params = json.loads(self.body)
resource_id = params.get("ResourceId")
tags_list = params.get("TagsList")
self.cloudtrail_backend.add_tags(resource_id=resource_id, tags_list=tags_list)
return json.dumps(dict())
def remove_tags(self) -> str:
resource_id = self._get_param("ResourceId")
tags_list = self._get_param("TagsList")
self.cloudtrail_backend.remove_tags(
resource_id=resource_id, tags_list=tags_list
)
return json.dumps(dict())
def list_tags(self) -> str:
params = json.loads(self.body)
resource_id_list = params.get("ResourceIdList")
resource_tag_list = self.cloudtrail_backend.list_tags(
resource_id_list=resource_id_list
)
return json.dumps(dict(ResourceTagList=resource_tag_list))
def put_insight_selectors(self) -> str:
trail_name = self._get_param("TrailName")
insight_selectors = self._get_param("InsightSelectors")
trail_arn, insight_selectors = self.cloudtrail_backend.put_insight_selectors(
trail_name=trail_name, insight_selectors=insight_selectors
)
return json.dumps(dict(TrailARN=trail_arn, InsightSelectors=insight_selectors))
def METHOD_NAME(self) -> str:
trail_name = self._get_param("TrailName")
trail_arn, insight_selectors = self.cloudtrail_backend.METHOD_NAME(
trail_name=trail_name
)
resp: Dict[str, Any] = {"TrailARN": trail_arn}
if insight_selectors:
resp["InsightSelectors"] = insight_selectors
return json.dumps(resp)
|
2,385 |
generate arguments
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests fuzzers.builtin."""
import os
import unittest
import parameterized
from pyfakefs import fake_filesystem_unittest
from clusterfuzz._internal.bot.fuzzers import builtin
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import test_utils
class TestEngineFuzzer(builtin.EngineFuzzer):
"""A test engine fuzzer."""
def METHOD_NAME(self, *_): # pylint: disable=arguments-differ
return '-arg1 -arg2'
class BaseEngineFuzzerTest(fake_filesystem_unittest.TestCase):
"""Engine fuzzer tests."""
def setUp(self):
"""Setup for base engine fuzzer test."""
helpers.patch_environ(self)
helpers.patch(self, [
'clusterfuzz._internal.base.utils.default_project_name',
'clusterfuzz._internal.bot.fuzzers.builtin.fuzzers_utils.get_fuzz_targets'
])
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/input')
self.fs.create_dir('/output')
environment.set_value('BUILD_DIR', '/build_dir')
environment.set_value('FAIL_RETRIES', 1)
environment.set_value('PROJECT_NAME', 'proj')
self.mock.default_project_name.return_value = 'default-proj'
self.mock.get_fuzz_targets.return_value = [
'/build_dir/target',
]
self.fs.create_file(
'/build_dir/target.owners',
contents='[email protected]\[email protected]')
class EngineFuzzerTest(BaseEngineFuzzerTest):
"""Engine fuzzer tests."""
def test_run(self):
"""Test running an engine fuzzer."""
fuzzer = TestEngineFuzzer()
result = fuzzer.run('/input', '/output', 1)
self.assertEqual(
'Generated 1 testcase for fuzzer target.\n'
'metadata::fuzzer_binary_name: target\n'
'metadata::issue_owners: [email protected],[email protected]\n',
result.output)
self.assertEqual('/input/proj_target', result.corpus_directory)
self.assertTrue(os.path.exists('/output/fuzz-0'))
self.assertTrue(os.path.exists('/output/flags-0'))
with open('/output/fuzz-0') as f:
self.assertEqual(' ', f.read())
with open('/output/flags-0') as f:
self.assertEqual('%TESTCASE% target -arg1 -arg2', f.read())
def test_run_with_labels(self):
"""Test running an engine fuzzer with a labels file."""
self.fs.create_file('/build_dir/target.labels', contents='label1\nlabel2\n')
fuzzer = TestEngineFuzzer()
result = fuzzer.run('/input', '/output', 1)
self.assertEqual(
'Generated 1 testcase for fuzzer target.\n'
'metadata::fuzzer_binary_name: target\n'
'metadata::issue_owners: [email protected],[email protected]\n'
'metadata::issue_labels: label1,label2\n', result.output)
def test_run_no_build_dir(self):
"""Test running without a build dir."""
environment.set_value('BUILD_DIR', '')
fuzzer = TestEngineFuzzer()
with self.assertRaisesRegex(builtin.BuiltinFuzzerError, 'BUILD_DIR'):
fuzzer.run('/input', '/output', 1)
def test_run_no_fuzzers(self):
"""Test running without fuzzers."""
self.mock.get_fuzz_targets.return_value = []
fuzzer = TestEngineFuzzer()
with self.assertRaises(builtin.BuiltinFuzzerError):
fuzzer.run('/input', '/output', 1)
def _generate_targets_list(self, count):
"""Generate a targets list."""
fake_targets_list = []
for i in range(count):
fake_targets_list.append('/build_dir/target' + str(i))
return fake_targets_list
def test_run_chosen_fuzz_target(self):
"""Test running with chosen fuzz target."""
os.environ['FUZZ_TARGET'] = 'chosen_target'
fake_targets_list = self._generate_targets_list(100)
fake_targets_list.append('/build_dir/chosen_target')
self.mock.get_fuzz_targets.return_value = fake_targets_list
fuzzer = TestEngineFuzzer()
result = fuzzer.run('/input', '/output', 1)
self.assertEqual(
'Generated 1 testcase for fuzzer chosen_target.\n'
'metadata::fuzzer_binary_name: chosen_target\n', result.output)
self.assertEqual('/input/proj_chosen_target', result.corpus_directory)
def test_sanitizer_options_from_options_file(self):
"""Tests that sanitizer options are set in *SAN_OPTIONS using the overrides
provided in .options file."""
environment.set_value('ASAN_OPTIONS', 'fake_option1=1')
with open('/build_dir/target.options', 'w') as f:
f.write('[asan]\nfake_option2=1\n[msan]\nfake_options3=1')
fuzzer = TestEngineFuzzer()
fuzzer.run('/input', '/output', 1)
self.assertEqual('fake_option1=1:fake_option2=1',
environment.get_value('ASAN_OPTIONS'))
self.assertEqual(None, environment.get_value('MSAN_OPTIONS'))
class GetFuzzerPath(unittest.TestCase):
"""_get_fuzzer_path Tests."""
@parameterized.parameterized.expand([('fuzzer', 'LINUX'), ('fuzzer.exe',
'WINDOWS')])
def test_get_fuzzer_path(self, target_name, mock_platform):
"""Test that get_fuzzer_path returns the path of a fuzzer."""
target_path = os.path.join('path', 'to', target_name)
helpers.patch(self, ['clusterfuzz._internal.system.environment.platform'])
self.mock.platform.return_value = mock_platform
result = builtin._get_fuzzer_path(['a', target_path], 'fuzzer') # pylint: disable=protected-access
self.assertEqual(result, target_path)
|
2,386 |
test half
|
################################################################################
#
# Copyright (C) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from __future__ import print_function
import pytest
from Tensile.DataType import DataType
def test_init_single():
expected = DataType('S')
assert DataType('single') == expected
assert DataType('Float') == expected
assert DataType('tensileDataTypeFloat') == expected
def test_init_double():
expected = DataType('D')
assert DataType('double') == expected
assert DataType('Double') == expected
assert DataType('tensileDataTypeDouble') == expected
def test_init_complexSingle():
expected = DataType('C')
assert DataType('complexSingle') == expected
assert DataType('complexFloat') == expected
assert DataType('tensileDataTypeComplexFloat') == expected
def test_init_complexDouble():
expected = DataType('Z')
assert DataType('complexDouble') == expected
assert DataType('complexDouble') == expected
assert DataType('tensileDataTypeComplexDouble') == expected
def test_init_half():
expected = DataType('H')
assert DataType('half') == expected
assert DataType('Half') == expected
assert DataType('tensileDataTypeHalf') == expected
def test_init_i8():
expected = DataType('4xi8')
assert DataType('int8x4') == expected
assert DataType('Int8x4') == expected
assert DataType('tensileDataTypeInt8x4') == expected
def test_init_i32():
expected = DataType('I')
assert DataType('int32') == expected
assert DataType('Int32') == expected
assert DataType('tensileDataTypeInt32') == expected
def test_single():
obj = DataType(0)
assert obj.toChar() == 'S'
assert obj.toName() == 'single'
assert obj.toEnum() == 'Float'
assert obj.toOpenCL() == 'float'
assert obj.toHIP() == 'float'
assert obj.toDevice("") == 'float'
assert obj.toCpp() == 'float'
assert obj.getLibString() == 'tensileDataTypeFloat'
assert obj.numBytes() == 4
assert obj.isReal()
def test_double():
obj = DataType(1)
assert obj.toChar() == 'D'
assert obj.toName() == 'double'
assert obj.toEnum() == 'Double'
assert obj.toOpenCL() == 'double'
assert obj.toHIP() == 'double'
assert obj.toDevice("") == 'double'
assert obj.toCpp() == 'double'
assert obj.getLibString() == 'tensileDataTypeDouble'
assert obj.numBytes() == 8
assert obj.isReal()
def test_complexSingle():
obj = DataType(2)
assert obj.toChar() == 'C'
assert obj.toName() == 'complexSingle'
assert obj.toEnum() == 'ComplexFloat'
assert obj.toOpenCL() == 'float2'
assert obj.toHIP() == 'TensileComplexFloat'
assert obj.toDevice("") == 'TensileComplexFloat'
assert obj.toCpp() == 'TensileComplexFloat'
assert obj.getLibString() == 'tensileDataTypeComplexFloat'
assert obj.numBytes() == 8
assert not obj.isReal()
def test_complexDouble():
obj = DataType(3)
assert obj.toChar() == 'Z'
assert obj.toName() == 'complexDouble'
assert obj.toEnum() == 'ComplexDouble'
assert obj.toOpenCL() == 'double2'
assert obj.toHIP() == 'TensileComplexDouble'
assert obj.toDevice("") == 'TensileComplexDouble'
assert obj.toCpp() == 'TensileComplexDouble'
assert obj.getLibString() == 'tensileDataTypeComplexDouble'
assert obj.numBytes() == 16
assert not obj.isReal()
def METHOD_NAME():
obj = DataType(4)
assert obj.toChar() == 'H'
assert obj.toName() == 'half'
assert obj.toEnum() == 'Half'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'tensile_half'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'tensile_half'
assert obj.toCpp() == 'TensileHalf'
assert obj.getLibString() == 'tensileDataTypeHalf'
assert obj.numBytes() == 2
assert obj.isReal()
def test_int8():
obj = DataType(5)
assert obj.toChar() == '4xi8'
assert obj.toName() == 'int8x4'
assert obj.toEnum() == 'Int8x4'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'uint32_t'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'uint32_t'
assert obj.toCpp() == 'TensileInt8x4'
assert obj.getLibString() == 'tensileDataTypeInt8x4'
assert obj.numBytes() == 4
assert obj.isReal()
def test_int32():
obj = DataType(6)
assert obj.toChar() == 'I'
assert obj.toName() == 'int32'
assert obj.toEnum() == 'Int32'
assert obj.toOpenCL() == 'ERROR'
assert obj.toHIP() == 'int32_t'
assert obj.toDevice("OCL") == 'ERROR'
assert obj.toDevice("") == 'int32_t'
assert obj.toCpp() == 'TensileInt32'
assert obj.getLibString() == 'tensileDataTypeInt32'
assert obj.numBytes() == 4
assert obj.isReal()
def test_cmp():
assert DataType('single') == DataType('S')
assert not DataType('S') != DataType(0)
assert DataType('Float') < DataType('Double')
assert not DataType('tensileDataTypeFloat') > DataType('Z')
assert DataType('half') >= DataType('ComplexFloat')
assert not DataType('int32') <= DataType('tensileDataTypeInt8x4')
def test_bounds():
with pytest.raises(Exception):
DataType(14)
|
2,387 |
test get weight channel axis
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Callable, Dict
import numpy as np
import openvino.runtime as ov
import pytest
import torch
from nncf.openvino.graph.layer_attributes import OVLayerAttributes
from nncf.openvino.graph.metatypes.openvino_metatypes import OVConvolutionMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVMatMulMetatype
from nncf.quantization.algorithms.smooth_quant.openvino_backend import OVSmoothQuantAlgoBackend
from tests.post_training.test_templates.test_smooth_quant import TemplateTestSQAlgorithm
from tests.shared.command import Command
class TestOVSQAlgorithm(TemplateTestSQAlgorithm):
@staticmethod
def fn_to_type(tensor) -> np.ndarray:
return np.array(tensor)
@staticmethod
def get_transform_fn() -> Callable:
def transform_fn(data_item):
tensor, _ = data_item
return {"input.1": tensor}
return transform_fn
@staticmethod
def get_backend() -> OVSmoothQuantAlgoBackend:
return OVSmoothQuantAlgoBackend()
@staticmethod
def backend_specific_model(model: torch.nn.Module, tmp_dir: str) -> ov.Model:
onnx_path = Path(f"{tmp_dir}/model.onnx")
torch.onnx.export(model, torch.rand(model.INPUT_SIZE), onnx_path, opset_version=13, input_names=["input.1"])
ov_path = Path(f"{tmp_dir}/model.xml")
runner = Command(f"mo -m {onnx_path} -o {tmp_dir} -n model --compress_to_fp16=False")
runner.run()
core = ov.Core()
ov_model = core.read_model(ov_path)
return ov_model
@staticmethod
def check_scales(model: ov.Model, reference_values: Dict[str, np.ndarray]) -> None:
ops_list = {op.get_friendly_name(): op for op in model.get_ops()}
for ref_name, ref_value in reference_values.items():
node = ops_list[ref_name]
const_node = node.input(1).get_source_output().get_node()
assert const_node.get_type_name() == "Constant"
value = const_node.data
ref_value = np.array(ref_value)
assert value.shape == ref_value.shape
assert np.all(np.isclose(value, ref_value, atol=0.0001)), f"{value} != {ref_value}"
@pytest.mark.parametrize(
"node_metatype, layer_attributes, port_id, reference_value",
(
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 0, -1),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": True}), 0, -2),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 1, -2),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": True}), 1, -1),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 2, RuntimeError),
(OVConvolutionMetatype, OVLayerAttributes({}, inputs_attributes={}), 0, 1),
),
)
def test_get_activation_channel_axis(self, node_metatype, layer_attributes, port_id, reference_value):
return super().test_get_activation_channel_axis(node_metatype, layer_attributes, port_id, reference_value)
@pytest.mark.parametrize(
"node_metatype, layer_attributes, port_id, reference_value",
(
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": False}}), 1, -2),
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": True}}), 1, -1),
(OVMatMulMetatype, OVLayerAttributes({0: {"transpose": False}}), 0, -1),
(OVMatMulMetatype, OVLayerAttributes({0: {"transpose": True}}), 0, -2),
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": False}}), 2, RuntimeError),
(OVConvolutionMetatype, OVLayerAttributes({1: {}}), 1, 0),
),
)
def METHOD_NAME(self, node_metatype, layer_attributes, port_id, reference_value):
return super().METHOD_NAME(node_metatype, layer_attributes, port_id, reference_value)
@staticmethod
def get_matmul_metatype():
return OVMatMulMetatype
|
2,388 |
handle old salt host resource
|
"""
Dynamic roster from terraform current state
===========================================
This roster module allows you dynamically generate the roster from the terraform
resources defined with the `Terraform Salt`_ provider.
It exposes all salt_host resources with the same attributes to the salt-ssh
roster, making it completely independent of the type of terraform resource, and
providing the integration using terraform constructs with interpolation.
Basic Example
-------------
Given a simple salt-ssh tree with a Saltfile:
.. code-block:: yaml
salt-ssh:
config_dir: etc/salt
max_procs: 30
wipe_ssh: True
and ``etc/salt/master``:
.. code-block:: yaml
root_dir: .
file_roots:
base:
- srv/salt
pillar_roots:
base:
- srv/pillar
roster: terraform
In the same folder as your ``Saltfile``, create terraform file with resources
like cloud instances, virtual machines, etc. For every single one of those that
you want to manage with Salt, create a ``salt_host`` resource:
.. code-block:: text
resource "salt_host" "dbminion" {
salt_id = "dbserver"
host = "${libvirt_domain.vm-db.network_interface.0.addresses.0}"
user = "root"
passwd = "linux"
}
You can use the count attribute to create multiple roster entries with a single
definition. Please refer to the `Terraform Salt`_ provider for more detailed
examples.
.. _Terraform Salt: https://github.com/dmacvicar/terraform-provider-salt
"""
import logging
import os.path
import salt.utils.files
import salt.utils.json
log = logging.getLogger(__name__)
TF_OUTPUT_PREFIX = "salt.roster."
TF_ROSTER_ATTRS = {
"host": "s",
"user": "s",
"passwd": "s",
"port": "i",
"sudo": "b",
"sudo_user": "s",
"tty": "b",
"priv": "s",
"timeout": "i",
"minion_opts": "m",
"thin_dir": "s",
"cmd_umask": "i",
}
MINION_ID = "salt_id"
def METHOD_NAME(resource):
"""
Handles salt_host resources.
See https://github.com/dmacvicar/terraform-provider-salt
Returns roster attributes for the resource or None
"""
ret = {}
attrs = resource.get("primary", {}).get("attributes", {})
ret[MINION_ID] = attrs.get(MINION_ID)
valid_attrs = set(attrs.keys()).intersection(TF_ROSTER_ATTRS.keys())
for attr in valid_attrs:
ret[attr] = _cast_output_to_type(attrs.get(attr), TF_ROSTER_ATTRS.get(attr))
return ret
def _handle_new_salt_host_resource(resource):
"""
Handles salt_host resources.
See https://github.com/dmacvicar/terraform-provider-salt
Returns roster attributes for the resource or None
"""
rets = []
instances = resource.get("instances", [])
for instance in instances:
ret = {}
attrs = instance.get("attributes", {})
ret[MINION_ID] = attrs.get(MINION_ID)
valid_attrs = set(attrs.keys()).intersection(TF_ROSTER_ATTRS.keys())
for attr in valid_attrs:
ret[attr] = _cast_output_to_type(attrs.get(attr), TF_ROSTER_ATTRS.get(attr))
log.info(ret)
rets.append(ret)
return rets
def _add_ssh_key(ret):
"""
Setups the salt-ssh minion to be accessed with salt-ssh default key
"""
priv = None
if __opts__.get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = __opts__.get(
"ssh_priv",
os.path.abspath(os.path.join(__opts__["pki_dir"], "ssh", "salt-ssh.rsa")),
)
if priv and os.path.isfile(priv):
ret["priv"] = priv
def _cast_output_to_type(value, typ):
"""cast the value depending on the terraform type"""
if value is None:
return value
if typ == "b":
return bool(value)
if typ == "i":
return int(value)
return value
def _parse_state_file(state_file_path="terraform.tfstate"):
"""
Parses the terraform state file passing different resource types to the right handler
"""
with salt.utils.files.fopen(state_file_path, "r") as fh_:
tfstate = salt.utils.json.load(fh_)
if "resources" in tfstate:
return _do_parse_new_state_file(tfstate)
elif "modules" in tfstate:
return _do__parse_old_state_file(tfstate)
else:
log.error("Malformed tfstate file.")
return {}
def _do_parse_new_state_file(tfstate):
"""
Parses the terraform state file passing different resource types to the right handler terraform version >= v0.13.0
"""
ret = {}
resources = tfstate.get("resources")
if not resources:
log.error("Malformed tfstate file. No resources found")
return ret
for resource in resources:
if resource["type"] == "salt_host":
roster_entrys = _handle_new_salt_host_resource(resource)
if not roster_entrys or len(roster_entrys) < 1:
continue
for roster_entry in roster_entrys:
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get("id"))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret
def _do__parse_old_state_file(tfstate):
"""
Parses the terraform state file passing different resource types to the right handler terraform version < v0.13.0
"""
ret = {}
modules = tfstate.get("modules")
if not modules:
log.error("Malformed tfstate file. No modules found")
return ret
for module in modules:
resources = module.get("resources", [])
for resource_name, resource in resources.items():
roster_entry = None
if resource["type"] == "salt_host":
roster_entry = METHOD_NAME(resource)
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get("id"))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret
def targets(tgt, tgt_type="glob", **kwargs): # pylint: disable=W0613
"""
Returns the roster from the terraform state file, checks opts for location, but defaults to terraform.tfstate
"""
roster_file = os.path.abspath("terraform.tfstate")
if __opts__.get("roster_file"):
roster_file = os.path.abspath(__opts__["roster_file"])
if not os.path.isfile(roster_file):
log.error("Can't find terraform state file '%s'", roster_file)
return {}
log.debug("terraform roster: using %s state file", roster_file)
if not roster_file.endswith(".tfstate"):
log.error("Terraform roster can only be used with terraform state files")
return {}
raw = _parse_state_file(roster_file)
log.debug("%s hosts in terraform state file", len(raw))
return __utils__["roster_matcher.targets"](raw, tgt, tgt_type, "ipv4")
|
2,389 |
get token
|
import json
from rest_framework.exceptions import APIException
from django.conf import settings
from users.utils import construct_user_email
from common.utils.common import get_logger
from common.sdk.im.utils import digest
from common.sdk.im.mixin import RequestMixin, BaseRequest
logger = get_logger(__name__)
class URL:
# https://open.feishu.cn/document/ukTMukTMukTM/uEDO4UjLxgDO14SM4gTN
@property
def host(self):
if settings.FEISHU_VERSION == 'feishu':
h = 'https://open.feishu.cn'
else:
h = 'https://open.larksuite.com'
return h
@property
def authen(self):
return f'{self.host}/open-apis/authen/v1/index'
@property
def METHOD_NAME(self):
return f'{self.host}/open-apis/auth/v3/tenant_access_token/internal/'
@property
def get_user_info_by_code(self):
return f'{self.host}/open-apis/authen/v1/access_token'
@property
def send_message(self):
return f'{self.host}/open-apis/im/v1/messages'
def get_user_detail(self, user_id):
return f'{self.host}/open-apis/contact/v3/users/{user_id}'
class ErrorCode:
INVALID_APP_ACCESS_TOKEN = 99991664
INVALID_USER_ACCESS_TOKEN = 99991668
INVALID_TENANT_ACCESS_TOKEN = 99991663
class FeishuRequests(BaseRequest):
"""
处理系统级错误,抛出 API 异常,直接生成 HTTP 响应,业务代码无需关心这些错误
- 确保 status_code == 200
- 确保 access_token 无效时重试
"""
invalid_token_errcodes = (
ErrorCode.INVALID_USER_ACCESS_TOKEN, ErrorCode.INVALID_TENANT_ACCESS_TOKEN,
ErrorCode.INVALID_APP_ACCESS_TOKEN
)
code_key = 'code'
msg_key = 'msg'
def __init__(self, app_id, app_secret, timeout=None):
self._app_id = app_id
self._app_secret = app_secret
super().__init__(timeout=timeout)
def get_access_token_cache_key(self):
return digest(self._app_id, self._app_secret)
def request_access_token(self):
data = {'app_id': self._app_id, 'app_secret': self._app_secret}
response = self.raw_request('post', url=URL().METHOD_NAME, data=data)
self.check_errcode_is_0(response)
access_token = response['tenant_access_token']
expires_in = response['expire']
return access_token, expires_in
def add_token(self, kwargs: dict):
headers = kwargs.setdefault('headers', {})
headers['Authorization'] = f'Bearer {self.access_token}'
class FeiShu(RequestMixin):
"""
非业务数据导致的错误直接抛异常,说明是系统配置错误,业务代码不用理会
"""
def __init__(self, app_id, app_secret, timeout=None):
self._app_id = app_id or ''
self._app_secret = app_secret or ''
self._requests = FeishuRequests(
app_id=app_id,
app_secret=app_secret,
timeout=timeout
)
def get_user_id_by_code(self, code):
# https://open.feishu.cn/document/ukTMukTMukTM/uEDO4UjLxgDO14SM4gTN
body = {
'grant_type': 'authorization_code',
'code': code
}
data = self._requests.post(URL().get_user_info_by_code, json=body, check_errcode_is_0=False)
self._requests.check_errcode_is_0(data)
return data['data']['user_id'], data['data']
def send_text(self, user_ids, msg):
params = {
'receive_id_type': 'user_id'
}
"""
https://open.feishu.cn/document/common-capabilities/message-card/message-cards-content
/using-markdown-tags
"""
body = {
'msg_type': 'interactive',
'content': json.dumps({'elements': [{'tag': 'markdown', 'content': msg}]})
}
invalid_users = []
for user_id in user_ids:
body['receive_id'] = user_id
try:
logger.info(f'Feishu send text: user_ids={user_ids} msg={msg}')
self._requests.post(URL().send_message, params=params, json=body)
except APIException as e:
# 只处理可预知的错误
logger.exception(e)
invalid_users.append(user_id)
return invalid_users
@staticmethod
def get_user_detail(user_id, **kwargs):
# get_user_id_by_code 已经返回个人信息,这里直接解析
data = kwargs['other_info']
username = user_id
name = data.get('name', username)
email = data.get('email') or data.get('enterprise_email')
email = construct_user_email(username, email)
return {
'username': username, 'name': name, 'email': email
}
|
2,390 |
getcalculate sigma
|
#-------------------------------------------------------------------------------
# ArtificialViscosity base class
#-------------------------------------------------------------------------------
from PYB11Generator import *
from ArtificialViscosityAbstractMethods import *
from RestartMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralArtificialViscosity")
class ArtificialViscosity:
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
"""
#...........................................................................
# Constructors
def pyinit(self,
Clinear = "const Scalar",
Cquadratic = "const Scalar",
QcorrectionOrder = ("const RKOrder", "RKOrder::LinearOrder")):
"ArtificialViscosity constructor"
#...........................................................................
# Methods
@PYB11const
def curlVelocityMagnitude(self, DvDx="const Tensor&"):
"Calculate the curl of the velocity given the stress tensor."
return "Scalar"
@PYB11const
def calculateLimiter(self,
vi = "const Vector&",
vj = "const Vector&",
ci = "const Scalar",
cj = "const Scalar",
hi = "const Scalar",
hj = "const Scalar",
nodeListID = "const int",
nodeID = "const int"):
"Method to return the limiter magnitude for the given node."
return "Tensor"
@PYB11const
def shockDirection(self,
ci = "const Scalar",
hi = "const Scalar",
nodeListID = "const int",
nodeID = "const int"):
"Helper for the limiter, calculate the unit grad div v term for the given node"
return "Vector"
@PYB11const
def sigmaWeighting(self, r="const Vector&"):
"Helper method to calculate the weighting based on the given position for use in the sigma calculation."
return "Vector"
@PYB11const
def sigmaij(self,
rji = "const Vector&",
rjiUnit = "const Vector&",
vji = "const Vector&",
hi2 = "const Scalar&",
nodeListID = "const int",
nodeID = "const int"):
"Figure out the total stress-strain tensor for a given node pair based on the stored average value and the given (position, velocity) pair."
return "Tensor"
#...........................................................................
# Properties
Cl = PYB11property("Scalar", "Cl", "Cl",
doc="The linear coefficient")
Cq = PYB11property("Scalar", "Cq", "Cq",
doc="The quadratic coefficient")
QcorrectionOrder = PYB11property("RKOrder", "QcorrectionOrder", "QcorrectionOrder",
doc="The RK correction order used for computing gradients in the viscosity")
balsaraShearCorrection = PYB11property("bool", "balsaraShearCorrection", "balsaraShearCorrection",
doc="Toggle whether to use the Balsara suppression for shear flows")
ClMultiplier = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "ClMultiplier",
doc="Correction multiplier for the linear term")
CqMultiplier = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "CqMultiplier",
doc="Correction multiplier for the quadratic term")
shearCorrection = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "shearCorrection",
doc="Correction multiplier for Balsara shear suppression")
sigma = PYB11property("const FieldList<%(Dimension)s, Tensor>&", "sigma",
doc="Access the internally computed estimate of sigma: sig^ab = partial v^a / partial x^b")
gradDivVelocity = PYB11property("const FieldList<%(Dimension)s, Vector>&", "gradDivVelocity",
doc="Access the internally computed estimate of the velocity gradient and grad div velocity")
limiter = PYB11property("bool", "limiter", "limiter",
doc="Toggle whether to apply the del^2 velocity limiter")
epsilon2 = PYB11property("Scalar", "epsilon2", "epsilon2",
doc="Safety factor in denominator for Q")
negligibleSoundSpeed = PYB11property("Scalar", "negligibleSoundSpeed", "negligibleSoundSpeed",
doc="The negligible sound speed parameter for use in the limiter")
csMultiplier = PYB11property("Scalar", "csMultiplier", "csMultiplier",
doc="The multiplier for sound speed in the limiter")
energyMultiplier = PYB11property("Scalar", "energyMultiplier", "energyMultiplier",
doc="The multiplier for energy in the limiter.")
# This one is a protected property!
@PYB11const
@PYB11ignore
@PYB11protected
@PYB11cppname("calculateSigma")
def METHOD_NAME(self):
return "bool"
@PYB11ignore
@PYB11protected
@PYB11cppname("calculateSigma")
def setcalculateSigma(self, val="bool"):
return "void"
calculateSigma = property(METHOD_NAME, setcalculateSigma, doc="Toggle if sigma should be computed")
#-------------------------------------------------------------------------------
# Inject abstract interface
#-------------------------------------------------------------------------------
PYB11inject(ArtificialViscosityAbstractMethods, ArtificialViscosity, pure_virtual=True)
PYB11inject(RestartMethods, ArtificialViscosity)
|
2,391 |
test property diffuse color
|
import pytest
import pyvista as pv
from pyvista.plotting._property import _check_range
@pytest.fixture()
def prop():
return pv.Property()
def test_check_range():
with pytest.raises(ValueError, match="outside the acceptable"):
_check_range(-1, (0, 1), 'parm')
with pytest.raises(ValueError, match="outside the acceptable"):
_check_range(2, (0, 1), 'parm')
assert _check_range(0, (0, 1), 'parm') is None
def test_property_init():
prop = pv.Property()
# copy but equal
assert prop._theme is not pv.global_theme
assert prop._theme == pv.global_theme
def test_property_style(prop):
style = 'Surface'
prop.style = style
assert prop.style == style
def test_property_edge_color(prop):
prop.edge_color = 'b'
assert prop.edge_color.float_rgb == (0, 0, 1)
def test_property_opacity(prop):
opacity = 0.5
prop.opacity = opacity
assert prop.opacity == opacity
with pytest.raises(ValueError):
prop.opacity = 2
def test_property_show_edges(prop):
value = False
prop.show_edges = value
assert prop.show_edges == value
def test_property_lighting(prop):
value = False
prop.lighting = value
assert prop.lighting == value
def test_property_ambient(prop):
value = 0.45
prop.ambient = value
assert prop.ambient == value
with pytest.raises(ValueError):
prop.ambient = -1
def test_property_diffuse(prop):
value = 0.5
prop.diffuse = value
assert prop.diffuse == value
with pytest.raises(ValueError):
prop.diffuse = 2
def test_property_specular(prop):
value = 0.5
prop.specular = value
assert prop.specular == value
with pytest.raises(ValueError):
prop.specular = 2
def test_property_specular_power(prop):
value = 0.5
prop.specular_power = value
assert prop.specular_power == value
with pytest.raises(ValueError):
prop.specular = 200
def test_property_metallic(prop):
value = 0.1
prop.metallic = value
assert prop.metallic == value
with pytest.raises(ValueError):
prop.metallic = -1
def test_property_roughness(prop):
value = 0.1
prop.roughness = value
assert prop.roughness == value
def test_property_interpolation(prop):
value = 'Gouraud'
prop.interpolation = value
assert prop.interpolation == pv.opts.InterpolationType.from_any(value)
with pytest.raises(ValueError, match='InterpolationType has no value matching'):
prop.interpolation = 'foo'
def test_property_render_points_as_spheres(prop):
value = True
prop.render_points_as_spheres = value
assert prop.render_points_as_spheres is value
def test_property_render_lines_as_tubes(prop):
value = True
prop.render_lines_as_tubes = value
assert prop.render_lines_as_tubes is value
def test_property_point_size(prop):
value = 10.0
prop.point_size = value
assert prop.point_size == value
def test_property_line_width(prop):
assert isinstance(prop.line_width, float)
value = 10.0
prop.line_width = value
assert prop.line_width == value
@pytest.mark.parametrize("value", ['back', 'front', 'none'])
def test_property_culling(prop, value):
prop.culling = value
assert prop.culling == value
with pytest.raises(ValueError, match='Invalid culling'):
prop.culling = 'foo'
def METHOD_NAME(prop):
prop.diffuse_color = 'b'
assert prop.diffuse_color.float_rgb == (0, 0, 1)
def test_property_ambient_color(prop):
prop.ambient_color = 'b'
assert prop.ambient_color.float_rgb == (0, 0, 1)
def test_property_specular_color(prop):
prop.specular_color = 'b'
assert prop.specular_color.float_rgb == (0, 0, 1)
def test_property_anisotropy(prop):
value = 0.1
if pv.vtk_version_info < (9, 1, 0):
with pytest.raises(pv.core.errors.VTKVersionError):
prop.anisotropy = value
return
assert isinstance(prop.anisotropy, float)
prop.anisotropy = value
assert prop.anisotropy == value
|
2,392 |
test perturb real volatility below
|
import numpy
import pytest
from base import RNGStub, TrialStub
from orion.algo.pbt.explore import PerturbExplore, PipelineExplore, ResampleExplore
from orion.algo.space import Categorical, Dimension
from orion.core.utils.flatten import flatten
class TestPipelineExplore:
def test_no_explore(self):
params = object()
assert PipelineExplore([])(RNGStub(), None, params) is params
def test_explore_otherwise_next(self):
for i in range(4):
explore = PipelineExplore(
[
dict(of_type="explorestub", rval=None if j < i else i, some="args")
for j in range(4)
]
)
assert explore(RNGStub(), TrialStub(), None) == i
def test_configuration(self):
explore_configs = [
dict(of_type="explorestub", some="args", rval=1, no_call=False),
dict(of_type="explorestub", other="args", rval=None, no_call=True),
]
explore = PipelineExplore(explore_configs)
assert explore.configuration == dict(
of_type="pipelineexplore", explore_configs=explore_configs
)
class TestPerturb:
@pytest.mark.parametrize("factor", [0.5, 1, 1.5])
def test_perturb_real_factor(self, factor):
explore = PerturbExplore(factor=factor)
rng = RNGStub()
rng.random = lambda: 1.0
assert explore.perturb_real(rng, 1.0, (0.1, 2.0)) == factor
rng.random = lambda: 0.0
assert explore.perturb_real(rng, 1.0, (0.1, 2.0)) == 1.0 / factor
def test_perturb_real_below_interval_cap(self):
explore = PerturbExplore(factor=0.0, volatility=0)
rng = RNGStub()
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_real(rng, 0.0, (1.0, 2.0)) == 1.0
explore.volatility = 1000
assert explore.perturb_real(rng, 0.0, (1.0, 2.0)) == 2.0
def test_perturb_real_above_interval_cap(self):
explore = PerturbExplore(factor=1.0, volatility=0)
rng = RNGStub()
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_real(rng, 3.0, (1.0, 2.0)) == 2.0
explore.volatility = 1000
assert explore.perturb_real(rng, 3.0, (1.0, 2.0)) == 1.0
@pytest.mark.parametrize("volatility", [0.0, 0.05, 1.0])
def METHOD_NAME(self, volatility):
explore = PerturbExplore(factor=1.0, volatility=volatility)
rng = RNGStub()
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_real(rng, 0.0, (1.0, 2.0)) == 1.0 + volatility
@pytest.mark.parametrize("volatility", [0.0, 0.05, 1.0])
def test_perturb_real_volatility_above(self, volatility):
explore = PerturbExplore(factor=1.0, volatility=volatility)
rng = RNGStub()
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_real(rng, 3.0, (1.0, 2.0)) == 2.0 - volatility
@pytest.mark.parametrize("factor", [0.5, 0.75, 1, 1.5])
def test_perturb_int_factor(self, factor):
explore = PerturbExplore(factor=factor)
rng = RNGStub()
rng.random = lambda: 1.0
assert explore.perturb_int(rng, 5, (0, 10)) == int(numpy.round(5 * factor))
rng.random = lambda: 0.0
assert explore.perturb_int(rng, 5, (0, 10)) == int(numpy.round(5 / factor))
def test_perturb_int_duplicate_equal(self):
explore = PerturbExplore(factor=1.0)
rng = RNGStub()
rng.random = lambda: 1.0
assert explore.perturb_int(rng, 1, (0, 10)) == 1
def test_perturb_int_no_duplicate_below(self):
explore = PerturbExplore(factor=0.75)
rng = RNGStub()
rng.random = lambda: 1.0
assert explore.perturb_int(rng, 1, (0, 10)) == 0
def test_perturb_int_no_duplicate_above(self):
explore = PerturbExplore(factor=0.75)
rng = RNGStub()
rng.random = lambda: 0.0
assert explore.perturb_int(rng, 1, (0, 10)) == 2
def test_perturb_int_no_out_of_bounds(self):
explore = PerturbExplore(factor=0.75, volatility=0)
rng = RNGStub()
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_int(rng, 0, (0, 10)) == 0
rng.random = lambda: 0.0
rng.normal = lambda mean, variance: variance
assert explore.perturb_int(rng, 10, (0, 10)) == 10
def test_perturb_cat(self):
explore = PerturbExplore()
rng = RNGStub()
rng.randint = lambda low, high=None, size=None: [1] if size else 1
dim = Categorical("name", ["one", "two", 3, 4.0])
assert explore.perturb_cat(rng, "whatever", dim) in dim
def test_perturb(self, space):
explore = PerturbExplore()
rng = RNGStub()
rng.randint = lambda low, high=None, size=None: numpy.ones(size) if size else 1
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: 0.0
params = {"x": 1.0, "y": 2, "z": 0, "f": 10}
new_params = explore(rng, space, params)
for key in space.keys():
assert new_params[key] in space[key]
def test_perturb_hierarchical_params(self, hspace):
explore = PerturbExplore()
rng = RNGStub()
rng.randint = lambda low, high=None, size=None: numpy.ones(size) if size else 1
rng.random = lambda: 1.0
rng.normal = lambda mean, variance: 0.0
params = {"numerical": {"x": 1.0, "y": 2, "f": 10}, "z": 0}
new_params = explore(rng, hspace, params)
assert "numerical" in new_params
assert "x" in new_params["numerical"]
for key in hspace.keys():
assert flatten(new_params)[key] in hspace[key]
def test_perturb_with_invalid_dim(self, space, monkeypatch):
explore = PerturbExplore()
monkeypatch.setattr(Dimension, "type", "type_that_dont_exist")
with pytest.raises(
ValueError, match="Unsupported dimension type type_that_dont_exist"
):
explore(RNGStub(), space, {"x": 1.0, "y": 2, "z": 0, "f": 10})
def test_configuration(self):
explore = PerturbExplore(factor=2.0, volatility=10.0)
assert explore.configuration == dict(
of_type="perturbexplore", factor=2.0, volatility=10.0
)
class TestResample:
def test_resample_probability(self, space):
explore = ResampleExplore(probability=0.5)
rng = RNGStub()
rng.randint = lambda low, high, size: [1]
rng.random = lambda: 0.5
params = {"x": 1.0, "y": 2, "z": 0, "f": 10}
assert explore(rng, space, params) is params
rng.random = lambda: 0.4
assert explore(rng, space, params) is not params
def test_configuration(self):
explore = ResampleExplore(probability=0.5)
assert explore.configuration == dict(of_type="resampleexplore", probability=0.5)
|
2,393 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
def __init__(__self__, group_ids=None, METHOD_NAME=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if group_ids and not isinstance(group_ids, list):
raise TypeError("Expected argument 'group_ids' to be a list")
pulumi.set(__self__, "group_ids", group_ids)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> Optional[Sequence[str]]:
"""
GroupIds from the private link service resource.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The Private Endpoint resource for this Connection.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.ConnectionStateResponse']:
"""
Details about the state of the connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the Private Endpoint Connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
group_ids=self.group_ids,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Get a specific private endpoint connection under a topic, domain, or partner namespace or namespace.
:param str parent_name: The name of the parent resource (namely, either, the topic name, domain name, or partner namespace name or namespace name).
:param str parent_type: The type of the parent resource. This can be either \\'topics\\', \\'domains\\', or \\'partnerNamespaces\\' or \\'namespaces\\'.
:param str private_endpoint_connection_name: The name of the private endpoint connection connection.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['parentName'] = parent_name
__args__['parentType'] = parent_type
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20230601preview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
group_ids=pulumi.get(__ret__, 'group_ids'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Get a specific private endpoint connection under a topic, domain, or partner namespace or namespace.
:param str parent_name: The name of the parent resource (namely, either, the topic name, domain name, or partner namespace name or namespace name).
:param str parent_type: The type of the parent resource. This can be either \\'topics\\', \\'domains\\', or \\'partnerNamespaces\\' or \\'namespaces\\'.
:param str private_endpoint_connection_name: The name of the private endpoint connection connection.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
...
|
2,394 |
get
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._flux_config_operation_status_operations import build_get_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FluxConfigOperationStatusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.aio.SourceControlConfigurationClient`'s
:attr:`flux_config_operation_status` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def METHOD_NAME(
self,
resource_group_name: str,
cluster_rp: Union[str, _models.Enum0],
cluster_resource_name: Union[str, _models.Enum1],
cluster_name: str,
flux_configuration_name: str,
operation_id: str,
**kwargs: Any
) -> _models.OperationStatusResult:
"""Get Async Operation status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters). Known values are:
"Microsoft.ContainerService" and "Microsoft.Kubernetes". Required.
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters). Known values are:
"managedClusters" and "connectedClusters". Required.
:type cluster_resource_name: str or
~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.models.Enum1
:param cluster_name: The name of the kubernetes cluster. Required.
:type cluster_name: str
:param flux_configuration_name: Name of the Flux Configuration. Required.
:type flux_configuration_name: str
:param operation_id: operation Id. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatusResult or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.v2021_11_01_preview.models.OperationStatusResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01-preview"))
cls: ClsType[_models.OperationStatusResult] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
flux_configuration_name=flux_configuration_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("OperationStatusResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/fluxConfigurations/{fluxConfigurationName}/operations/{operationId}"
}
|
2,395 |
cla
|
#import sys;sys.path.insert(0,'/home/ccsmm/workdir/ccsmmutils');import img_utils as myiu
import cv2
import numpy as np
import matplotlib.pyplot as plt
from ipdb import set_trace as BP
from PIL import Image
def image_crop( infilename , save_path):
"""
image file 와 crop한이미지를 저장할 path 을 입력받아 crop_img를 저장한다.
:param infilename:
crop할 대상 image file 입력으로 넣는다.
:param save_path:
crop_image file의 저장 경로를 넣는다.
:return:
"""
img = Image.open( infilename )
(img_h, img_w) = img.size
print(img.size)
# crop 할 사이즈 : grid_w, grid_h
grid_w = 96 # crop width
grid_h = 96 # crop height
range_w = (int)(img_w/grid_w)
range_h = (int)(img_h/grid_h)
print(range_w, range_h)
i = 0
for w in range(range_w):
for h in range(range_h):
bbox = (h*grid_h, w*grid_w, (h+1)*(grid_h), (w+1)*(grid_w))
print(h*grid_h, w*grid_w, (h+1)*(grid_h), (w+1)*(grid_w))
# 가로 세로 시작, 가로 세로 끝
crop_img = img.crop(bbox)
fname = "{}.jpg".format("{0:05d}".format(i))
savename = save_path + fname
crop_img.save(savename)
print('save file ' + savename + '....')
i += 1
def concat_images(imga, imgb):
"""
Combines two color image ndarrays side-by-side.
"""
imga=imgreshape(imga)
imgb=imgreshape(imgb)
ha,wa = imga.shape[:2]
hb,wb = imgb.shape[:2]
max_height = np.max([ha, hb])
total_width = wa+wb
new_img = np.zeros(shape=(max_height, total_width, 3))
new_img[:ha,:wa]=imga
new_img[:hb,wa:wa+wb]=imgb
return new_img
def concat_n_images(image_path_list):
"""
Combines N color images from a list of image paths.
"""
output = None
for i, img_path in enumerate(image_path_list):
img = plt.imread(img_path)[:,:,:3]
if i==0:
output = img
else:
output = concat_images(output, img)
return output
def tonumpy(data):
import numpy as np
import torch
try: data.type()
except :
return data
# if ( ('torch' in type(data)) and ('Tensor' in type(data)) ):
# if ('cuda' in type(data)):
if ( ('torch' in data.type()) and ('Tensor' in data.type()) ):
if ('cuda' in data.type()):
data=data.detach().cpu().numpy()
else:
data=data.detach().numpy()
return data
def subplot(sp=111):
plt.subplot(sp)
def title(title="No title"):
plt.title(title)
def plot(x,sp=111,title="No title",t=0.000001,fdir='./',save=0):
import matplotlib.pylab as plt
import numpy as np
import os
import torch
if len(np.shape(x)) != 1:
print("dimension not matched, getting last element")
x = x[-1]
# return 0
data = tonumpy(x)
plt.subplot(sp)
plt.plot(data)
plt.title(title)
if(t>0):
plt.draw()
plt.pause(t)
else:
plt.show()
if save:
fname=os.path.join(fdir,title+'.png')
plt.savefig(fname,bbox_inches='tight')
def imshow(img,sp=111,title="No title",t=0.000001,fdir='./',save=0):
import matplotlib.pylab as plt
import numpy as np
import os
import torch
if len(np.shape(img)) == 4: #batch_size * rgb
img=img[-1] #Take last image(3,480,640)
elif len(np.shape(img)) == 3: #rgb
img=img
elif len(np.shape(img)) == 2: #gray
img=img
else:
print("dimension not matched")
return 0
if isinstance(img, torch.Tensor):
img=tonumpy(img)
img=imgreshape(img)
plt.subplot(sp)
plt.imshow(img)
plt.title(title)
if(t>0):
plt.draw()
plt.pause(t)
else:
plt.show()
if save:
fname=os.path.join(fdir,title+'.png')
plt.savefig(fname,bbox_inches='tight')
def fig(num=0):
if num:
plt.figure(num)
else:
plt.figure()
def METHOD_NAME():
plt.METHOD_NAME()
def clf():
plt.clf()
def imsshow(imgs):
import torchvision
imshow(torchvision.utils.make_grid(imgs))
def imgreshape(img):
"""img must be a tensor type data"""
import numpy as np
import torch
if isinstance(img,torch.Tensor):
img=img.squeeze()
if len(img.shape) == 2:
# img=img.unsqueeze(0)
np.reshape(img,[1,np.shape(img)[0],np.shape(img)[1]])
if len(img.shape) == 3:
if img.shape[0] <= 3:
img=np.transpose(img,(1,2,0))
return img
def imgnormalize(img):
import cv2
return cv2.normalize(img,None,0,255,cv2.NORM_MINMAX)
#Default color space in OpenCV is BGR, but matplotlib's is RGB.
#So when we use matplotlib to disply it, we need to change the color space from bgr to rgb
def bgr2rgb(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def rgb2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
def test_dispgen(model,imgL,imgR,idx):
model.eval()
# imgL = torch.FloatTensor(imgL).cuda()
# imgR = torch.FloatTensor(imgR).cuda()
imgL = imresize(imgL,(368,1232))
imgR = imresize(imgR,(368,1232))
imgL=imgL.astype(np.float32)/256
imgR=imgR.astype(np.float32)/256
imgL = imgL.transpose(2,0,1)
imgR = imgR.transpose(2,0,1)
imgL=imgL.reshape(1,3,368,1232)
imgR=imgR.reshape(1,3,368,1232)
imgL = Variable(torch.FloatTensor(imgL).cuda())
imgR = Variable(torch.FloatTensor(imgR).cuda())
with torch.no_grad():
output = model(imgL,imgR)
output = torch.squeeze(output)
pred_disp = output.data.cpu().numpy()
display_save(imgL,imgR,pred_disp,idx)
return pred_disp
def display_save(imgL,imgR,dispL,inx):
import os
import skimage
import skimage.io
import skimage.transform
# output_dir='output_eval_disp'
output_dir=argsglb.output_dir
plt.clf()
img_utils.imshow(imgL,sp=221,title='Left')
img_utils.imshow(imgR,222,'Right')
img_utils.imshow(dispL.astype('uint16'),223,'Disp(est)')
# img_utils.imshow(dispL_GT.astype('uint16'),224,'Disp(GT)')
fname=os.path.join(output_dir,'psmnet_all_{}'.format(inx))
plt.savefig(fname,bbox_inches='tight')
fname=os.path.join(output_dir,'psmnet_disp_{}.png'.format(inx))
# skimage.io.imsave(fname,(dispL.squeeze()*256).astype('uint16'))
return 0
|
2,396 |
error
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
gperf C file post-processor
We use gperf to build up a perfect hashtable of pointer values. The way gperf
does this is to create a table 'wordlist' indexed by a string representation
of a pointer address, and then doing memcmp() on a string passed in for
comparison
We are exclusively working with 4-byte pointer values. This script adjusts
the generated code so that we work with pointers directly and not strings.
This saves a considerable amount of space.
"""
import sys
import argparse
import os
import re
from packaging import version
# --- debug stuff ---
def debug(text):
if not args.verbose:
return
sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
def METHOD_NAME(text):
sys.exit(os.path.basename(sys.argv[0]) + " ERROR: " + text)
def warn(text):
sys.stdout.write(
os.path.basename(
sys.argv[0]) +
" WARNING: " +
text +
"\n")
def reformat_str(match_obj):
addr_str = match_obj.group(0)
# Nip quotes
addr_str = addr_str[1:-1]
addr_vals = [0, 0, 0, 0, 0, 0, 0 , 0]
ctr = 7
i = 0
while True:
if i >= len(addr_str):
break
if addr_str[i] == "\\":
if addr_str[i + 1].isdigit():
# Octal escape sequence
val_str = addr_str[i + 1:i + 4]
addr_vals[ctr] = int(val_str, 8)
i += 4
else:
# Char value that had to be escaped by C string rules
addr_vals[ctr] = ord(addr_str[i + 1])
i += 2
else:
addr_vals[ctr] = ord(addr_str[i])
i += 1
ctr -= 1
return "(char *)0x%02x%02x%02x%02x%02x%02x%02x%02x" % tuple(addr_vals)
def process_line(line, fp):
if line.startswith("#"):
fp.write(line)
return
# Set the lookup function to static inline so it gets rolled into
# z_object_find(), nothing else will use it
if re.search(args.pattern + " [*]$", line):
fp.write("static inline " + line)
return
m = re.search("gperf version (.*) [*][/]$", line)
if m:
v = version.parse(m.groups()[0])
v_lo = version.parse("3.0")
v_hi = version.parse("3.1")
if (v < v_lo or v > v_hi):
warn("gperf %s is not tested, versions %s through %s supported" %
(v, v_lo, v_hi))
# Replace length lookups with constant len since we're always
# looking at pointers
line = re.sub(r'lengthtable\[key\]', r'sizeof(void *)', line)
# Empty wordlist entries to have NULLs instead of ""
line = re.sub(r'[{]["]["][}]', r'{}', line)
# Suppress a compiler warning since this table is no longer necessary
line = re.sub(r'static unsigned char lengthtable',
r'static unsigned char __unused lengthtable', line)
# drop all use of register keyword, let compiler figure that out,
# we have to do this since we change stuff to take the address of some
# parameters
line = re.sub(r'register', r'', line)
# Hashing the address of the string
line = re.sub(r"hash [(]str, len[)]",
r"hash((const char *)&str, len)", line)
# Just compare pointers directly instead of using memcmp
if re.search("if [(][*]str", line):
fp.write(" if (str == s)\n")
return
# Take the strings with the binary information for the pointer values,
# and just turn them into pointers
line = re.sub(r'["].*["]', reformat_str, line)
fp.write(line)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False)
parser.add_argument("-i", "--input", required=True,
help="Input C file from gperf")
parser.add_argument("-o", "--output", required=True,
help="Output C file with processing done")
parser.add_argument("-p", "--pattern", required=True,
help="Search pattern for objects")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
with open(args.input, "r") as in_fp, open(args.output, "w") as out_fp:
for line in in_fp.readlines():
process_line(line, out_fp)
if __name__ == "__main__":
main()
|
2,397 |
id for
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import infra.network
import infra.proc
import infra.commit
import http
import cimetrics.upload
from concurrent import futures
from infra.log_capture import flush_info
from infra.snp import IS_SNP
import infra.jwt_issuer
import time
from loguru import logger as LOG
DEFAULT_TIMEOUT_S = 10 if IS_SNP else 5
def submit_range(primary, id_pattern, start, end, format_width):
LOG.info(f"Starting submission of {start:>{format_width}} to {end:>{format_width}}")
def METHOD_NAME(i):
return id_pattern[i % len(id_pattern)]
first_seqno = None
last_seqno = None
view = None
seqno = None
with primary.client("user0") as c:
for i in range(start, end):
idx = METHOD_NAME(i)
msg = f"Unique message {i}"
r = c.post(
"/app/log/public",
{
"id": idx,
"msg": msg,
},
# Print logs for every 1000th submission, to show progress
log_capture=None if i % 1000 == 500 else [],
)
assert r.status_code == http.HTTPStatus.OK
seqno = r.seqno
view = r.view
if first_seqno is None:
first_seqno = seqno
last_seqno = seqno
return (first_seqno, view, last_seqno)
def get_all_entries(
client,
target_id,
from_seqno=None,
to_seqno=None,
timeout=DEFAULT_TIMEOUT_S,
log_on_success=False,
headers=None,
):
LOG.info(
f"Getting historical entries{f' from {from_seqno}' if from_seqno is not None else ''}{f' to {to_seqno}' if to_seqno is not None else ''} for id {target_id}"
)
logs = None if log_on_success else []
start_time = time.time()
end_time = start_time + timeout
entries = []
path = f"/app/log/public/historical/range?id={target_id}"
if from_seqno is not None:
path += f"&from_seqno={from_seqno}"
if to_seqno is not None:
path += f"&to_seqno={to_seqno}"
while time.time() < end_time:
r = client.get(path, headers=headers or {}) # , log_capture=logs)
if r.status_code == http.HTTPStatus.OK:
j_body = r.body.json()
entries += j_body["entries"]
if "@nextLink" in j_body:
path = j_body["@nextLink"]
continue
else:
# No @nextLink means we've reached end of range
duration = time.time() - start_time
LOG.info(f"Done! Fetched {len(entries)} entries in {duration:0.2f}s")
return entries, duration
elif r.status_code == http.HTTPStatus.ACCEPTED:
# Ignore retry-after header, retry soon
time.sleep(0.1)
continue
else:
LOG.error("Printing historical/range logs on unexpected status")
flush_info(logs, None)
raise ValueError(
f"""
Unexpected status code from historical range query: {r.status_code}
{r.body}
"""
)
LOG.error("Printing historical/range logs on timeout")
flush_info(logs, None)
raise TimeoutError(f"Historical range not available after {timeout}s")
def test_historical_query_range(network, args):
id_a = 2
id_b = 3
id_c = 4
# NB: Because we submit from multiple concurrent threads, the actual pattern
# on the ledger will not match this but will be interleaved. But the final
# ratio of transactions will match this
id_pattern = [id_a, id_a, id_a, id_b, id_b, id_c]
n_entries = 30000
format_width = len(str(n_entries))
jwt_issuer = infra.jwt_issuer.JwtIssuer()
jwt_issuer.register(network)
jwt = jwt_issuer.issue_jwt()
primary, _ = network.find_primary()
# Submit many transactions, overwriting the same IDs
LOG.info(f"Submitting {n_entries} entries")
submissions_per_job = 1000
assigned = 0
fs = []
with futures.ThreadPoolExecutor() as executor:
while assigned < n_entries:
start = assigned
end = min(n_entries, assigned + submissions_per_job)
fs.append(
executor.submit(
submit_range, primary, id_pattern, start, end, format_width
)
)
assigned = end
results = [f.result() for f in fs]
first_seqno = min(res[0] for res in results)
view = max(res[1] for res in results)
last_seqno = max(res[2] for res in results)
with primary.client("user0") as c:
infra.commit.wait_for_commit(c, seqno=last_seqno, view=view, timeout=3)
LOG.info(
f"Total ledger contains {last_seqno} entries, of which we expect our transactions to be spread over a range of ~{last_seqno - first_seqno} transactions"
)
# Total fetch time depends on number of entries. We expect to be much faster than this, but
# to set a safe timeout allow for a rate as low as 100 fetches per second
timeout = n_entries / 100
# Ensure all nodes have reached committed state before querying a backup for historical state
network.wait_for_all_nodes_to_commit(primary=primary)
entries = {}
node = network.find_node_by_role(role=infra.network.NodeRole.BACKUP, log_capture=[])
with node.client(common_headers={"authorization": f"Bearer {jwt}"}) as c:
# Index is currently built lazily to avoid impacting other perf tests using the same app
# So pre-fetch to ensure index is fully constructed
get_all_entries(c, id_a, timeout=timeout)
get_all_entries(c, id_b, timeout=timeout)
get_all_entries(c, id_c, timeout=timeout)
entries[id_a], duration_a = get_all_entries(c, id_a, timeout=timeout)
entries[id_b], duration_b = get_all_entries(c, id_b, timeout=timeout)
entries[id_c], duration_c = get_all_entries(c, id_c, timeout=timeout)
c.get("/node/memory")
id_a_fetch_rate = len(entries[id_a]) / duration_a
id_b_fetch_rate = len(entries[id_b]) / duration_b
id_c_fetch_rate = len(entries[id_c]) / duration_c
average_fetch_rate = (id_a_fetch_rate + id_b_fetch_rate + id_c_fetch_rate) / 3
with cimetrics.upload.metrics(complete=False) as metrics:
upload_name = "hist_sgx_cft^"
LOG.debug(f"Uploading metric: {upload_name} = {average_fetch_rate}")
metrics.put(upload_name, average_fetch_rate)
# NB: The similar test in e2e_logging checks correctness, so we make no duplicate
# assertions here
return network
def run(args):
with infra.network.network(
args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_open(args)
network = test_historical_query_range(network, args)
if __name__ == "__main__":
def add(parser):
pass
args = infra.e2e_args.cli_args(add=add)
args.package = "samples/apps/logging/liblogging"
args.nodes = infra.e2e_args.max_nodes(args, f=0)
args.initial_member_count = 1
args.sig_ms_interval = 1000 # Set to cchost default value
run(args)
|
2,398 |
set up
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from copy import deepcopy
from unittest import TestCase
from unittest.mock import MagicMock
from mmengine.registry import init_default_scope
from mmocr.datasets import ConcatDataset, OCRDataset
from mmocr.registry import TRANSFORMS
class TestConcatDataset(TestCase):
@TRANSFORMS.register_module()
class MockTransform:
def __init__(self, return_value):
self.return_value = return_value
def __call__(self, *args, **kwargs):
return self.return_value
def METHOD_NAME(self):
init_default_scope('mmocr')
dataset = OCRDataset
# create dataset_a
data_info = dict(filename='img_1.jpg', height=720, width=1280)
dataset.parse_data_info = MagicMock(return_value=data_info)
self.dataset_a = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
self.dataset_a_with_pipeline = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json',
pipeline=[dict(type='MockTransform', return_value=1)])
# create dataset_b
data_info = dict(filename='img_2.jpg', height=720, width=1280)
dataset.parse_data_info = MagicMock(return_value=data_info)
self.dataset_b = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
self.dataset_b_with_pipeline = dataset(
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json',
pipeline=[dict(type='MockTransform', return_value=2)])
def test_init(self):
with self.assertRaises(TypeError):
ConcatDataset(datasets=[0])
with self.assertRaises(ValueError):
ConcatDataset(
datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b)
],
pipeline=[dict(type='MockTransform', return_value=3)])
with self.assertRaises(ValueError):
ConcatDataset(
datasets=[
deepcopy(self.dataset_a),
deepcopy(self.dataset_b_with_pipeline)
],
pipeline=[dict(type='MockTransform', return_value=3)])
with self.assertRaises(ValueError):
dataset_a = deepcopy(self.dataset_a)
dataset_b = OCRDataset(
metainfo=dict(dummy='dummy'),
data_root=osp.join(
osp.dirname(__file__), '../data/det_toy_dataset'),
data_prefix=dict(img_path='imgs'),
ann_file='textdet_test.json')
ConcatDataset(datasets=[dataset_a, dataset_b])
# test lazy init
ConcatDataset(
datasets=[deepcopy(self.dataset_a),
deepcopy(self.dataset_b)],
pipeline=[dict(type='MockTransform', return_value=3)],
lazy_init=True)
def test_getitem(self):
cat_datasets = ConcatDataset(
datasets=[deepcopy(self.dataset_a),
deepcopy(self.dataset_b)],
pipeline=[dict(type='MockTransform', return_value=3)])
for datum in cat_datasets:
self.assertEqual(datum, 3)
cat_datasets = ConcatDataset(
datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b)
],
pipeline=[dict(type='MockTransform', return_value=3)],
force_apply=True)
for datum in cat_datasets:
self.assertEqual(datum, 3)
cat_datasets = ConcatDataset(datasets=[
deepcopy(self.dataset_a_with_pipeline),
deepcopy(self.dataset_b_with_pipeline)
])
self.assertEqual(cat_datasets[0], 1)
self.assertEqual(cat_datasets[-1], 2)
|
2,399 |
worker
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
import atexit
from concurrent.futures import _base
import itertools
import Queue as queue
import threading
import weakref
import sys
try:
from multiprocessing import cpu_count
except ImportError:
# some platforms don't have multiprocessing
def cpu_count():
return None
__author__ = 'Brian Quinlan ([email protected])'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpreter shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items:
q.put(None)
for t, q in items:
t.join(sys.maxint)
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
def METHOD_NAME(executor_reference, work_queue, initializer, initargs):
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
executor = executor_reference()
if executor is not None:
executor._initializer_failed()
return
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
# attempt to increment idle count
executor = executor_reference()
if executor is not None:
executor._idle_semaphore.release()
del executor
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class BrokenThreadPool(_base.BrokenExecutor):
"""
Raised when a worker thread in a ThreadPoolExecutor failed initializing.
"""
class ThreadPoolExecutor(_base.Executor):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().next
def __init__(self, max_workers=None, thread_name_prefix='', initializer=None, initargs=()):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._initializer = initializer
self._initargs = initargs
self._work_queue = queue.Queue()
self._idle_semaphore = threading.Semaphore(0)
self._threads = set()
self._broken = False
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# if idle threads are available, don't spin new threads
if self._idle_semaphore.acquire(False):
return
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
num_threads = len(self._threads)
if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=METHOD_NAME,
args=(weakref.ref(self, weakref_cb),
self._work_queue, self._initializer, self._initargs))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def _initializer_failed(self):
with self._shutdown_lock:
self._broken = ('A thread initializer failed, the thread pool '
'is not usable anymore')
# Drain work queue and mark pending futures failed
while True:
try:
work_item = self._work_queue.get_nowait()
except queue.Empty:
break
if work_item is not None:
work_item.future.set_exception(BrokenThreadPool(self._broken))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.