id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
1,300 |
import os
import subprocess
import sys
import xml
import pytest
import tiledb
from .common import DiskTestCase
# Wrapper to execute specific code in subprocess so that we can ensure the thread count
# init is correct. Necessary because multiprocess.get_context is only available in Python 3.4+,
# and the multiprocessing method may be set to fork by other tests (e.g. dask).
def init_test_wrapper(cfg=None):
python_exe = sys.executable
cmd = (
f"from tiledb.tests.test_context_and_config import init_test_helper; "
f"init_test_helper({cfg})"
)
test_path = os.path.dirname(os.path.abspath(__file__))
sp_output = subprocess.check_output([python_exe, "-c", cmd], cwd=test_path)
return int(sp_output.decode("UTF-8").strip())
def init_test_helper(cfg=None):
tiledb.default_ctx(cfg)
concurrency_level = tiledb.default_ctx().config()["sm.io_concurrency_level"]
print(int(concurrency_level))
class ContextTest(DiskTestCase):
def test_default_ctx(self):
ctx = tiledb.default_ctx()
self.assertIsInstance(ctx, tiledb.Ctx)
assert isinstance(ctx.config(), tiledb.libtiledb.Config)
def test_default_ctx_errors(self):
config = tiledb.Config()
ctx = tiledb.Ctx(config=config)
with pytest.raises(ValueError) as excinfo:
tiledb.default_ctx(ctx)
assert (
"default_ctx takes in `tiledb.Config` object or dictionary with "
"config parameters."
) == str(excinfo.value)
def test_scope_ctx(self):
key = "sm.memory_budget"
ctx0 = tiledb.default_ctx()
new_config_dict = {key: 42}
new_config = tiledb.Config({key: 78})
new_ctx = tiledb.Ctx({key: 61})
assert tiledb.default_ctx() is ctx0
assert tiledb.default_ctx().config()[key] == "5368709120"
with tiledb.scope_ctx(new_config_dict) as ctx1:
assert tiledb.default_ctx() is ctx1
assert tiledb.default_ctx().config()[key] == "42"
with tiledb.scope_ctx(new_config) as ctx2:
assert tiledb.default_ctx() is ctx2
assert tiledb.default_ctx().config()[key] == "78"
with tiledb.scope_ctx(new_ctx) as ctx3:
assert tiledb.default_ctx() is ctx3 is new_ctx
assert tiledb.default_ctx().config()[key] == "61"
assert tiledb.default_ctx() is ctx2
assert tiledb.default_ctx().config()[key] == "78"
assert tiledb.default_ctx() is ctx1
assert tiledb.default_ctx().config()[key] == "42"
assert tiledb.default_ctx() is ctx0
assert tiledb.default_ctx().config()[key] == "5368709120"
def test_scope_ctx_error(self):
with pytest.raises(ValueError) as excinfo:
with tiledb.scope_ctx([]):
pass
assert (
"scope_ctx takes in `tiledb.Ctx` object, `tiledb.Config` object, "
"or dictionary with config parameters."
) == str(excinfo.value)
@pytest.mark.skipif(
"pytest.tiledb_vfs == 's3'", reason="Test not yet supported with S3"
)
@pytest.mark.filterwarnings(
# As of 0.17.0, a warning is emitted for the aarch64 conda builds with
# the messsage:
# <jemalloc>: MADV_DONTNEED does not work (memset will be used instead)
# <jemalloc>: (This is the expected behaviour if you are running under QEMU)
# This can be ignored as this is being run in a Docker image / QEMU and
# is therefore expected behavior
"ignore:This is the expected behaviour if you are running under QEMU"
)
def test_init_config(self):
self.assertEqual(
int(tiledb.default_ctx().config()["sm.io_concurrency_level"]),
init_test_wrapper(),
)
self.assertEqual(3, init_test_wrapper({"sm.io_concurrency_level": 3}))
@pytest.mark.skipif(
"pytest.tiledb_vfs == 's3'", reason="Test not yet supported with S3"
)
class TestConfig(DiskTestCase):
def test_config(self):
config = tiledb.Config()
config["sm.memory_budget"] = 103
assert repr(config) is not None
tiledb.Ctx(config)
def test_ctx_config(self):
ctx = tiledb.Ctx({"sm.memory_budget": 103})
config = ctx.config()
self.assertEqual(config["sm.memory_budget"], "103")
def test_vfs_config(self):
config = tiledb.Config()
config["vfs.min_parallel_size"] = 1
ctx = tiledb.Ctx()
self.assertEqual(ctx.config()["vfs.min_parallel_size"], "10485760")
vfs = tiledb.VFS(config, ctx=ctx)
self.assertEqual(vfs.config()["vfs.min_parallel_size"], "1")
def test_config_iter(self):
config = tiledb.Config()
k, v = [], []
for p in config.items():
k.append(p[0])
v.append(p[1])
self.assertTrue(len(k) > 0)
k, v = [], []
for p in config.items("vfs.s3."):
k.append(p[0])
v.append(p[1])
self.assertTrue(len(k) > 0)
def METHOD_NAME(self):
config = tiledb.Config()
config["sm.foo"] = "bar"
ctx = tiledb.Ctx(config)
self.assertEqual(ctx.config()["sm.foo"], "bar")
def test_config_unset(self):
config = tiledb.Config()
config["sm.memory_budget"] = 103
del config["sm.memory_budget"]
# check that config parameter is default
self.assertEqual(
config["sm.memory_budget"], tiledb.Config()["sm.memory_budget"]
)
def test_config_from_file(self):
# skip: beacuse Config.load doesn't support VFS-supported URIs?
if pytest.tiledb_vfs == "s3":
pytest.skip(
"TODO need more plumbing to make pandas use TileDB VFS to read CSV files"
)
config_path = self.path("config")
with tiledb.FileIO(self.vfs, config_path, "wb") as fh:
fh.write("sm.memory_budget 100")
config = tiledb.Config.load(config_path)
self.assertEqual(config["sm.memory_budget"], "100")
def test_ctx_config_from_file(self):
config_path = self.path("config")
vfs = tiledb.VFS()
with tiledb.FileIO(vfs, config_path, "wb") as fh:
fh.write("sm.memory_budget 100")
ctx = tiledb.Ctx(config=tiledb.Config.load(config_path))
config = ctx.config()
self.assertEqual(config["sm.memory_budget"], "100")
def test_ctx_config_dict(self):
ctx = tiledb.Ctx(config={"sm.memory_budget": "100"})
config = ctx.config()
assert issubclass(type(config), tiledb.libtiledb.Config)
self.assertEqual(config["sm.memory_budget"], "100")
def test_config_repr_html(self):
config = tiledb.Config()
try:
assert xml.etree.ElementTree.fromstring(config._repr_html_()) is not None
except:
pytest.fail(
f"Could not parse config._repr_html_(). Saw {config._repr_html_()}"
)
| null |
1,301 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTADynamicUpgradeJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTADynamicUpgradeJob')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DynamicMode(self):
return self.get_query_params().get('DynamicMode')
def METHOD_NAME(self,DynamicMode):
self.add_query_param('DynamicMode',DynamicMode)
def get_MultiModuleMode(self):
return self.get_query_params().get('MultiModuleMode')
def set_MultiModuleMode(self,MultiModuleMode):
self.add_query_param('MultiModuleMode',MultiModuleMode)
def get_RetryCount(self):
return self.get_query_params().get('RetryCount')
def set_RetryCount(self,RetryCount):
self.add_query_param('RetryCount',RetryCount)
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_NeedConfirm(self):
return self.get_query_params().get('NeedConfirm')
def set_NeedConfirm(self,NeedConfirm):
self.add_query_param('NeedConfirm',NeedConfirm)
def get_GroupType(self):
return self.get_query_params().get('GroupType')
def set_GroupType(self,GroupType):
self.add_query_param('GroupType',GroupType)
def get_NeedPush(self):
return self.get_query_params().get('NeedPush')
def set_NeedPush(self,NeedPush):
self.add_query_param('NeedPush',NeedPush)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_DownloadProtocol(self):
return self.get_query_params().get('DownloadProtocol')
def set_DownloadProtocol(self,DownloadProtocol):
self.add_query_param('DownloadProtocol',DownloadProtocol)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_FirmwareId(self):
return self.get_query_params().get('FirmwareId')
def set_FirmwareId(self,FirmwareId):
self.add_query_param('FirmwareId',FirmwareId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_RetryInterval(self):
return self.get_query_params().get('RetryInterval')
def set_RetryInterval(self,RetryInterval):
self.add_query_param('RetryInterval',RetryInterval)
def get_SrcVersions(self):
return self.get_query_params().get('SrcVersion')
def set_SrcVersions(self, SrcVersions):
for depth1 in range(len(SrcVersions)):
if SrcVersions[depth1] is not None:
self.add_query_param('SrcVersion.' + str(depth1 + 1) , SrcVersions[depth1])
def get_OverwriteMode(self):
return self.get_query_params().get('OverwriteMode')
def set_OverwriteMode(self,OverwriteMode):
self.add_query_param('OverwriteMode',OverwriteMode)
def get_MaximumPerMinute(self):
return self.get_query_params().get('MaximumPerMinute')
def set_MaximumPerMinute(self,MaximumPerMinute):
self.add_query_param('MaximumPerMinute',MaximumPerMinute
| null |
1,302 |
from galaxy import model
from galaxy.app_unittest_utils.tools_support import UsesApp
from galaxy.tools.parameters import (
basic,
dataset_matcher,
)
from galaxy.util import (
bunch,
XML,
)
from galaxy.util.unittest import TestCase
from .test_data_parameters import MockHistoryDatasetAssociation
class MockTool:
def __init__(self, app):
self.app = app
self.tool_type = "default"
self.valid_input_states = model.Dataset.valid_input_states
class TestDatasetMatcher(TestCase, UsesApp):
def test_hda_mismatches(self):
# Datasets not visible are not "valid" for param.
self.mock_hda.visible = False
assert not self.test_context.hda_match(self.mock_hda)
# Datasets that don't match datatype are not valid.
self.mock_hda.visible = True
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, None, None)
assert not self.test_context.hda_match(self.mock_hda)
def METHOD_NAME(self):
# Datasets that visible and matching are valid
self.mock_hda.visible = True
self.mock_hda.extension = "txt"
hda_match = self.test_context.hda_match(self.mock_hda, check_implicit_conversions=False)
assert hda_match
# Match is not a conversion and so matching hda is the same hda
# supplied.
assert not hda_match.implicit_conversion
assert hda_match.hda == self.mock_hda
def test_valid_hda_implicit_convered(self):
# Find conversion returns an HDA to an already implicitly converted
# dataset.
self.mock_hda.extension = "data"
converted_hda = model.HistoryDatasetAssociation()
self.mock_hda.conversion_destination = (False, "tabular", converted_hda)
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
assert hda_match.implicit_conversion
assert hda_match.hda == converted_hda
assert hda_match.target_ext == "tabular"
def test_hda_match_implicit_can_convert(self):
# Find conversion returns a target extension to convert to, but not
# a previously implicitly converted dataset.
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, "tabular", None)
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
assert hda_match.implicit_conversion
assert hda_match.hda == self.mock_hda
assert hda_match.target_ext == "tabular"
def test_hda_match_properly_skips_conversion(self):
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, "tabular", bunch.Bunch())
hda_match = self.test_context.hda_match(self.mock_hda, check_implicit_conversions=False)
assert not hda_match
def test_data_destination_tools_require_public(self):
self.tool.tool_type = "data_destination"
# Public datasets okay and valid
self.app.security_agent.dataset_is_public = lambda dataset: True
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
# Non-public datasets not valid
self.app.security_agent.dataset_is_public = lambda dataset: False
hda_match = self.test_context.hda_match(self.mock_hda)
assert not hda_match
def test_filtered_hda_matched_key(self):
self.filtered_param = True
data1_val = model.HistoryDatasetAssociation()
data1_val.dbkey = "hg18"
self.other_values = {"data1": data1_val}
assert self.test_context.filter_values == {"hg18"}
# mock_hda is hg19, other is hg18 so should not be "valid hda"
hda_match = self.test_context.hda_match(self.mock_hda)
assert not hda_match
def test_filtered_hda_unmatched_key(self):
self.filtered_param = True
data1_val = model.HistoryDatasetAssociation()
data1_val.dbkey = "hg19"
self.other_values = {"data1": data1_val}
# Other param value and this dataset both hg19, should be valid
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
def test_metadata_filtered_hda_options_filter_attribute_matched_keys(self):
self.metadata_filtered_param = True
data1_val = model.HistoryDatasetAssociation()
self.other_values = {"data1": data1_val}
hda1 = MockHistoryDatasetAssociation()
hda1.metadata = MockMetadata()
hda1.metadata.foo = "bar"
hda2 = MockHistoryDatasetAssociation()
hda2.metadata = MockMetadata()
hda2.metadata.foo = "baz"
assert self.test_context.filter_values == {"baz", "bar"}
hda_match = self.test_context.hda_match(hda1)
assert hda_match
hda_match = self.test_context.hda_match(hda2)
assert hda_match
def test_metadata_filtered_hda_options_filter_attribute_unmatched_key(self):
self.metadata_filtered_param = True
data1_val = model.HistoryDatasetAssociation()
self.other_values = {"data1": data1_val}
hda = MockHistoryDatasetAssociation()
hda.metadata = MockMetadata()
hda.metadata.foo = "no-match"
assert self.test_context.filter_values == {"baz", "bar"}
hda_match = self.test_context.hda_match(hda)
assert not hda_match
def setUp(self):
self.setup_app()
self.mock_hda = MockHistoryDatasetAssociation()
self.tool = MockTool(self.app)
self.current_user_roles = []
self.other_values = {}
# Reset lazily generated stuff
self.filtered_param = False
self.metadata_filtered_param = False
self._test_context = None
self.param = None
@property
def test_context(self):
if self._test_context is None:
option_xml = ""
if self.filtered_param:
option_xml = """<options><filter type="data_meta" ref="data1" key="dbkey" /></options>"""
if self.metadata_filtered_param:
option_xml = """
<options options_filter_attribute="metadata.foo">
<filter type="add_value" value="bar" />
<filter type="add_value" value="baz" />
</options>"""
param_xml = XML(f"""<param name="data2" type="data" format="txt">{option_xml}</param>""")
self.param = basic.DataToolParameter(
self.tool,
param_xml,
)
trans = bunch.Bunch(
app=self.app,
get_current_user_roles=lambda: self.current_user_roles,
workflow_building_mode=True,
)
self._test_context = dataset_matcher.get_dataset_matcher_factory(trans).dataset_matcher(
param=self.param, other_values=self.other_values
)
return self._test_context
class MockMetadata:
def __init__(self):
self.foo = None
| null |
1,303 |
#!/usr/bin/env python
""" Checks all data resources give 200s
"""
import json
import os
import re
from unittest import TestCase, main
from g2p.app import APP
from g2p.log import LOGGER
from g2p.mappings.langs import LANGS_NETWORK
from g2p.tests.public import __file__ as PUB_FILE
PUB_DIR = os.path.dirname(PUB_FILE)
class ResourceIntegrationTest(TestCase):
"""
This tests that the api returns 200s for all basic
GET requests.
"""
def setUp(self):
# Test external hosts
self.client = APP.test_client
self.prefix = "/api/v1"
# routes
self.conversion_route = "/api/v1/g2p"
self.static_route = "/static/<path:filename>"
self.routes = [str(route) for route in APP.url_map.iter_rules()]
self.routes_no_args = [
route
for route in self.routes
if "<" not in route and route != self.conversion_route
]
self.routes_only_args = [
route
for route in self.routes
if "<" in route and route != self.static_route
]
# endpoints
self.rules_by_endpoint = APP.url_map._rules_by_endpoint
self.endpoints = [rt for rt in self.rules_by_endpoint.keys()]
# args
self.arg_match = re.compile(r"\<[a-z:]+\>")
self.args_to_check = "node"
def return_endpoint_arg(self, ep):
split = ep.split(".")
split_length = len(split)
return split[split_length - 1]
def METHOD_NAME(self, ep):
return str(self.rules_by_endpoint[ep][0])
def test_response_code(self):
"""
Ensure all routes return 200
"""
for rt in self.routes_no_args:
try:
r = self.client().get(rt)
self.assertEqual(r.status_code, 200)
LOGGER.debug("Route " + rt + " returned " + str(r.status_code))
except Exception:
LOGGER.error("Couldn't connect. Is flask running?")
def test_response_code_with_args(self):
"""
Ensure all args return 200
"""
for ep in self.routes_only_args:
for node in LANGS_NETWORK.nodes:
rt = re.sub(self.arg_match, node, ep)
try:
r = self.client().get(rt)
self.assertEqual(r.status_code, 200)
except Exception:
LOGGER.error("Couldn't connect. Is flask running?")
LOGGER.debug(
"Successfully tested "
+ str(len(LANGS_NETWORK.nodes))
+ " node resources at route "
+ ep
+ " ."
)
def test_g2p_conversion(self):
"""
Ensure conversion returns proper response
"""
params = {
"in-lang": "dan",
"out-lang": "eng-arpabet",
"text": "hej",
"debugger": True,
"index": True,
}
minimal_params = {
"in-lang": "dan",
"out-lang": "eng-arpabet",
"text": "hej",
"debugger": False,
"index": False,
}
bad_params = {"in-lang": "dan", "out-lang": "moh", "text": "hej"}
same_params = {"in-lang": "dan", "out-lang": "dan", "text": "hej"}
missing_params = {
"in-lang": "not-here",
"out-lang": "eng-arpabet",
"text": "hej",
}
self.maxDiff = None
response = self.client().get(self.conversion_route, query_string=params)
res_json = response.get_json()
self.assertEqual(response.status_code, 200)
with open(os.path.join(PUB_DIR, "sample_response.json")) as f:
data = json.load(f)
self.assertEqual(res_json, data)
# check minimal response
minimal_response = self.client().get(
self.conversion_route, query_string=minimal_params
)
data["debugger"] = False
data["index"] = False
self.assertEqual(minimal_response.status_code, 200)
self.assertEqual(minimal_response.get_json(), data)
with self.assertLogs(LOGGER, level="ERROR"):
bad_response = self.client().get(
self.conversion_route, query_string=bad_params
)
with self.assertLogs(LOGGER, level="ERROR"):
same_response = self.client().get(
self.conversion_route, query_string=same_params
)
self.assertEqual(bad_response.status_code, 400)
self.assertEqual(same_response.status_code, 400)
with self.assertLogs(LOGGER, level="ERROR"):
missing_response = self.client().get(
self.conversion_route, query_string=missing_params
)
self.assertEqual(missing_response.status_code, 404)
def test_g2p_conversion_with_tok(self):
params_with_tok = {
"in-lang": "fra",
"out-lang": "eng-arpabet",
"text": "ceci, celà",
"debugger": True,
"index": True,
"tokenize": True,
}
response = self.client().get(
self.conversion_route, query_string=params_with_tok
)
self.assertEqual(response.status_code, 200)
res_json_tok = response.get_json()
self.assertEqual(res_json_tok["debugger"][0][0][0]["input"], "ceci")
params_no_tok = {
"in-lang": "fra",
"out-lang": "eng-arpabet",
"text": "ceci, celà",
"debugger": True,
"index": True,
"tokenize": False,
}
response = self.client().get(self.conversion_route, query_string=params_no_tok)
self.assertEqual(response.status_code, 200)
res_json_no_tok = response.get_json()
self.assertNotEqual(res_json_tok, res_json_no_tok)
self.assertEqual(res_json_no_tok["debugger"][0][0][0]["input"], "ceci, celà")
self.assertNotEqual(res_json_tok["debugger"], res_json_no_tok["debugger"])
if __name__ == "__main__":
main()
| null |
1,304 |
import os
import signal
import sys
import click
from click.exceptions import Abort, ClickException
from .types import ExitCode
class InterruptAwareCommandMixin(click.BaseCommand):
"""
Replace the main() method to support proper exit-codes
for interruptions on Windows and POSIX platforms.
Using this, interrupting the command will let the shell
know that the execution is also interrupted instead of
continuing the shell/batch script.
"""
def main(self, *args, **kwargs):
try:
_interrupted = False
kwargs.pop("standalone_mode", None)
kwargs.pop("prog_name", None)
super().main(
*args,
standalone_mode=False,
prog_name="backend.ai",
**kwargs,
)
except KeyboardInterrupt:
# For interruptions outside the Click's exception handling block.
print("Interrupted!", end="", file=sys.stderr)
sys.stderr.flush()
_interrupted = True
except Abort as e:
# Click wraps unhandled KeyboardInterrupt with a plain
# sys.exit(1) call and prints "Aborted!" message
# (which would look non-sense to users).
# This is *NOT* what we want.
# Instead of relying on Click, mark the _interrupted
# flag to perform our own exit routines.
if isinstance(e.__context__, KeyboardInterrupt):
print("Interrupted!", end="", file=sys.stderr)
sys.stderr.flush()
_interrupted = True
else:
print("Aborted!", end="", file=sys.stderr)
sys.stderr.flush()
sys.exit(ExitCode.FAILURE)
except ClickException as e:
e.show()
sys.exit(e.exit_code)
finally:
if _interrupted:
# Override the exit code when it's interrupted,
# referring https://github.com/python/cpython/pull/11862
if sys.platform.startswith("win"):
# Use STATUS_CONTROL_C_EXIT to notify cmd.exe
# for interrupted exit
sys.exit(-1073741510)
else:
# Use the default signal handler to set the exit
# code properly for interruption.
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)
class AliasGroupMixin(click.Group):
"""
Enable command aliases.
ref) https://github.com/click-contrib/click-aliases
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._commands = {}
self._aliases = {}
def command(self, *args, **kwargs):
aliases = kwargs.pop("aliases", [])
decorator = super().command(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def group(self, *args, **kwargs):
aliases = kwargs.pop("aliases", [])
# keep the same class type
kwargs["cls"] = type(self)
decorator = super().group(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def get_command(self, ctx, cmd_name):
if cmd_name in self._aliases:
cmd_name = self._aliases[cmd_name]
command = super().get_command(ctx, cmd_name)
if command:
return command
def METHOD_NAME(self, ctx, formatter):
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
if subcommand in self._commands:
aliases = ",".join(sorted(self._commands[subcommand]))
subcommand = "{0} ({1})".format(subcommand, aliases)
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
class ExtendedCommandGroup(InterruptAwareCommandMixin, AliasGroupMixin, click.Group):
pass
| null |
1,305 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022-2023 Valory AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Component registry helpers."""
import os
import shutil
import tempfile
from distutils.dir_util import copy_tree # pylint: disable=deprecated-module
from pathlib import Path
from shutil import copytree
from typing import Optional, cast
import click
from aea.cli.registry.settings import REGISTRY_LOCAL, REGISTRY_REMOTE, REMOTE_IPFS
from aea.cli.utils.click_utils import reraise_as_click_exception
from aea.cli.utils.config import (
get_default_remote_registry,
get_ipfs_node_multiaddr,
load_item_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.package_utils import (
try_get_item_source_path,
try_get_item_target_path,
)
from aea.configurations.constants import DEFAULT_README_FILE, SERVICE, SERVICES
from aea.configurations.data_types import PublicId
from aea.helpers.cid import to_v1
from autonomy.configurations.base import (
DEFAULT_SERVICE_CONFIG_FILE,
PACKAGE_TYPE_TO_CONFIG_CLASS,
)
try:
from aea_cli_ipfs.ipfs_utils import IPFSTool # type: ignore
IS_IPFS_PLUGIN_INSTALLED = True
except ImportError: # pragma: nocover
IS_IPFS_PLUGIN_INSTALLED = False
def fetch_service(
ctx: Context,
public_id: PublicId,
alias: Optional[str] = None,
) -> Path:
"""Fetch service."""
if ctx.registry_type == REGISTRY_REMOTE:
return fetch_service_remote(public_id, alias=alias)
if ctx.registry_type == REGISTRY_LOCAL:
return fetch_service_local(ctx, public_id, alias=alias)
return fetch_service_mixed(ctx, public_id, alias=alias)
def fetch_service_mixed(
ctx: Context,
public_id: PublicId,
alias: Optional[str] = None,
) -> Path:
"""Fetch service in mixed mode."""
try:
return fetch_service_local(ctx, public_id, alias=alias)
except Exception as e: # pylint: disable=broad-except
click.echo(
f"Fetch from local registry failed (reason={str(e)}), trying remote registry..."
)
return fetch_service_remote(public_id, alias=alias)
def fetch_service_remote(
public_id: PublicId,
alias: Optional[str] = None,
) -> Path:
"""Fetch service in remote mode."""
if get_default_remote_registry() == REMOTE_IPFS:
return fetch_service_ipfs(public_id, alias=alias)
raise Exception("HTTP registry not supported.") # pragma: nocover
def fetch_service_ipfs(
public_id: PublicId,
alias: Optional[str] = None,
) -> Path:
"""Fetch service from IPFS node."""
if not IS_IPFS_PLUGIN_INSTALLED:
raise RuntimeError("IPFS plugin not installed.") # pragma: no cover
with tempfile.TemporaryDirectory() as temp_dir:
ipfs_tool = IPFSTool(get_ipfs_node_multiaddr())
download_path = Path(ipfs_tool.download(public_id.hash, temp_dir))
package_path = Path.cwd() / (alias or download_path.name)
shutil.copytree(download_path, package_path)
if not Path(package_path, DEFAULT_SERVICE_CONFIG_FILE).exists():
raise click.ClickException(
"Downloaded packages is not a service package, "
"if you intend to download an agent please use `--agent` flag "
"or check the hash"
)
service_config = load_item_config(
SERVICE, package_path, PACKAGE_TYPE_TO_CONFIG_CLASS
)
click.echo(
f"Downloaded service package {service_config.public_id} @ {package_path}"
)
return package_path
def fetch_service_local(
ctx: Context,
public_id: PublicId,
alias: Optional[str] = None,
) -> Path:
"""Fetch service from local directory."""
with reraise_as_click_exception(ValueError):
registry_path = ctx.registry_path
source_path = try_get_item_source_path(
registry_path, public_id.author, SERVICES, public_id.name
)
target_path = Path(ctx.cwd, alias or public_id.name)
if target_path.exists():
raise click.ClickException(
f'Item "{target_path.name}" already exists in target folder "{target_path.parent}".'
)
copy_tree(source_path, str(target_path))
click.echo(f"Copied service package {public_id}")
return target_path
def publish_service_package(click_context: click.Context, registry: str) -> None:
"""Publish a service package."""
# TODO ensure we have error handling here.
service_config = load_item_config(
SERVICE, Path(click_context.obj.cwd), PACKAGE_TYPE_TO_CONFIG_CLASS
)
if registry == REGISTRY_REMOTE:
if get_default_remote_registry() == REMOTE_IPFS:
METHOD_NAME(service_config.public_id, Path(click_context.obj.cwd))
else:
raise Exception("HTTP registry not supported.") # pragma: no cover
else:
publish_service_local(
cast(
Context,
click_context.obj,
),
service_config.public_id,
)
def METHOD_NAME(public_id: PublicId, package_path: Path) -> None:
"""Publish a service package on the IPFS registry."""
if not IS_IPFS_PLUGIN_INSTALLED: # pragma: nocover
raise RuntimeError("IPFS plugin not installed.")
package_path = package_path.resolve()
with tempfile.TemporaryDirectory() as temp_dir:
temp_service_dir = Path(temp_dir, public_id.name)
temp_service_dir.mkdir()
shutil.copyfile(
package_path / DEFAULT_SERVICE_CONFIG_FILE,
temp_service_dir / DEFAULT_SERVICE_CONFIG_FILE,
)
shutil.copyfile(
package_path / DEFAULT_README_FILE,
temp_service_dir / DEFAULT_README_FILE,
)
ipfs_tool = IPFSTool(get_ipfs_node_multiaddr())
_, package_hash, _ = ipfs_tool.add(str(temp_service_dir.resolve()))
package_hash = to_v1(package_hash)
click.echo(
f'Service "{public_id.name}" successfully published on the IPFS registry.\n\tPublicId: {public_id}\n\tPackage hash: {package_hash}'
)
def publish_service_local(ctx: Context, public_id: PublicId) -> None:
"""Publish a service package on the local packages directory."""
with reraise_as_click_exception(ValueError):
registry_path = ctx.registry_path
target_dir = try_get_item_target_path(
registry_path,
public_id.author,
SERVICES,
public_id.name,
)
author_dir = Path(target_dir).parent
if not os.path.exists(author_dir):
os.makedirs(author_dir, exist_ok=True)
# TODO: also make services dir?
copytree(ctx.cwd, target_dir)
click.echo(
f'Service "{public_id.name}" successfully published on the local packages directory.'
)
| null |
1,306 |
__author__ = "Aleksandr Slepchenkov"
__email__ = "[email protected]"
from typing import (
Any,
Dict,
Iterable,
List,
Match,
Optional,
Pattern,
Sequence,
Tuple,
Type,
)
Tokens = List[Dict[str, Any]]
# There are too much levels of optional unions of lists of text in cell and align 385 and 396 lines in mistune
def escape(text: str, quote: bool = ..., smart_amp: bool = ...) -> str: ...
class BlockGrammar:
def_links: Pattern[str]
def_footnotes: Pattern[str]
newline: Pattern[str]
block_code: Pattern[str]
fences: Pattern[str]
hrule: Pattern[str]
heading: Pattern[str]
lheading: Pattern[str]
block_quote: Pattern[str]
list_block: Pattern[str]
list_item: Pattern[str]
list_bullet: Pattern[str]
paragraph: Pattern[str]
block_html: Pattern[str]
table: Pattern[str]
nptable: Pattern[str]
text: Pattern[str]
class BlockLexer:
grammar_class: Type[BlockGrammar]
default_rules: List[str]
list_rules: Tuple[str]
footnote_rules: Tuple[str]
tokens: Tokens
def_links: Dict[str, Dict[str, str]]
def_footnotes: Dict[str, int]
rules = ... # type: BlockGrammar
def __init__(self, rules: Optional[BlockGrammar] = ..., **kwargs: Any) -> None: ...
def __call__(self, text: str, rules: Optional[Sequence[str]] = ...) -> Tokens: ...
def parse(self, text: str, rules: Optional[Sequence[str]] = ...) -> Tokens: ...
def parse_newline(self, m: Match[str]) -> None: ...
def parse_block_code(self, m: Match[str]) -> None: ...
def parse_fences(self, m: Match[str]) -> None: ...
def parse_heading(self, m: Match[str]) -> None: ...
def parse_lheading(self, m: Match[str]) -> None: ...
def parse_hrule(self, m: Match[str]) -> None: ...
def parse_list_block(self, m: Match[str]) -> None: ...
def parse_block_quote(self, m: Match[str]) -> None: ...
def parse_def_links(self, m: Match[str]) -> None: ...
def parse_def_footnotes(self, m: Match[str]) -> None: ...
def parse_table(self, m: Match[str]) -> None: ...
def parse_nptable(self, m: Match[str]) -> None: ...
def parse_block_html(self, m: Match[str]) -> None: ...
def parse_paragraph(self, m: Match[str]) -> None: ...
def parse_text(self, m: Match[str]) -> None: ...
class InlineGrammar:
escape: Pattern[str]
inline_html: Pattern[str]
autolink: Pattern[str]
link: Pattern[str]
reflink: Pattern[str]
nolink: Pattern[str]
url: Pattern[str]
double_emphasis: Pattern[str]
emphasis: Pattern[str]
code: Pattern[str]
linebreak: Pattern[str]
strikethrough: Pattern[str]
footnote: Pattern[str]
text: Pattern[str]
def hard_wrap(self) -> None: ...
class InlineLexer:
grammar_class: Type[InlineGrammar]
default_rules: List[str]
inline_html_rules: List[str]
renderer: Renderer
links: Dict[str, Dict[str, str]]
footnotes: Dict[str, int]
footnote_index: int
_in_link: bool
_in_footnote: bool
_parse_inline_html: bool
rules: InlineGrammar
def __init__(
self, renderer: Renderer, rules: Optional[InlineGrammar] = ..., **kwargs: Any
) -> None: ...
def __call__(self, text: str, rules: Optional[Sequence[str]] = ...) -> str: ...
def setup(
self,
links: Optional[Dict[str, Dict[str, str]]],
footnotes: Optional[Dict[str, int]],
) -> None: ...
line_match: Match[str]
line_started: bool
def output(self, text: str, rules: Optional[Sequence[str]] = ...) -> str: ...
def output_escape(self, m: Match[str]) -> str: ...
def output_autolink(self, m: Match[str]) -> str: ...
def output_url(self, m: Match[str]) -> str: ...
def output_inline_html(self, m: Match[str]) -> str: ...
def output_footnote(self, m: Match[str]) -> Optional[str]: ...
def output_link(self, m: Match[str]) -> str: ...
def output_reflink(self, m: Match[str]) -> Optional[str]: ...
def output_nolink(self, m: Match[str]) -> Optional[str]: ...
def output_double_emphasis(self, m: Match[str]) -> str: ...
def output_emphasis(self, m: Match[str]) -> str: ...
def output_code(self, m: Match[str]) -> str: ...
def output_linebreak(self, m: Match[str]) -> str: ...
def output_strikethrough(self, m: Match[str]) -> str: ...
def output_text(self, m: Match[str]) -> str: ...
class Renderer:
options: Dict[str, str]
def __init__(self, **kwargs: Any) -> None: ...
def METHOD_NAME(self) -> str: ...
def block_code(
self, code: str, lang: Any = ...
) -> str: ... # It seems that lang should be string, however other types are valid as well
def block_quote(self, text: str) -> str: ...
def block_html(self, html: str) -> str: ...
def header(self, text: str, level: int, raw: Optional[str] = ...) -> str: ...
def hrule(self) -> str: ...
def list(
self, body: Any, ordered: bool = ...
) -> str: ... # body - same reason as for lang above, and for other Any in this class
def list_item(self, text: Any) -> str: ...
def paragraph(self, text: str) -> str: ...
def table(self, header: Any, body: Any) -> str: ...
def table_row(self, content: Any) -> str: ...
def table_cell(self, content: Any, **flags: Dict[str, Any]) -> str: ...
def double_emphasis(self, text: Any) -> str: ...
def emphasis(self, text: Any) -> str: ...
def codespan(self, text: str) -> str: ...
def linebreak(self) -> str: ...
def strikethrough(self, text: Any) -> str: ...
def text(self, text: Any) -> str: ...
def escape(self, text: Any) -> str: ...
def autolink(self, link: Any, is_email: bool = ...) -> str: ...
def link(self, link: Any, title: Any, text: Any) -> str: ...
def image(self, src: Any, title: Any, text: Any) -> str: ...
def inline_html(self, html: Any) -> str: ...
def newline(self) -> str: ...
def footnote_ref(self, key: Any, index: int) -> str: ...
def footnote_item(self, key: Any, text: str) -> str: ...
def footnotes(self, text: Any) -> str: ...
class Markdown:
renderer = ... # type: Renderer
inline = ... # type: InlineLexer
block = ... # type: BlockLexer
footnotes = ... # type: List[Dict[str, Any]]
tokens = ... # type: Tokens
def __init__(
self,
renderer: Optional[Renderer] = ...,
inline: Optional[InlineLexer] = ...,
block: Optional[BlockLexer] = ...,
**kwargs: Any,
) -> None: ...
def __call__(self, text: str) -> str: ...
def render(self, text: str) -> str: ...
def parse(self, text: str) -> str: ...
token = ... # type: Dict[str, Any]
def pop(self) -> Optional[Dict[str, Any]]: ...
def peek(self) -> Optional[Dict[str, Any]]: ...
def output(self, text: str, rules: Optional[Sequence[str]] = ...) -> str: ...
def tok(self) -> str: ...
def tok_text(self) -> str: ...
def output_newline(self) -> str: ...
def output_hrule(self) -> str: ...
def output_heading(self) -> str: ...
def output_code(self) -> str: ...
def output_table(self) -> str: ...
def output_block_quote(self) -> str: ...
def output_list(self) -> str: ...
def output_list_item(self) -> str: ...
def output_loose_item(self) -> str: ...
def output_footnote(self) -> str: ...
def output_close_html(self) -> str: ...
def output_open_html(self) -> str: ...
def output_paragraph(self) -> str: ...
def output_text(self) -> str: ...
def markdown(text: str, escape: bool = ..., **kwargs: Any) -> str: ...
| null |
1,307 |
'''
Copyright (C) 2017-2023 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import logging
from decimal import Decimal
from typing import Dict, Tuple
from yapic import json
from cryptofeed.connection import AsyncConnection, RestEndpoint, Routes, WebsocketEndpoint
from cryptofeed.defines import BID, ASK, BUY, PROBIT, L2_BOOK, SELL, TRADES
from cryptofeed.feed import Feed
from cryptofeed.symbols import Symbol
from cryptofeed.types import OrderBook, Trade
LOG = logging.getLogger('feedhandler')
class Probit(Feed):
id = PROBIT
websocket_endpoints = [WebsocketEndpoint('wss://api.probit.com/api/exchange/v1/ws')]
rest_endpoints = [RestEndpoint('https://api.probit.com', routes=Routes('/api/exchange/v1/market'))]
websocket_channels = {
L2_BOOK: 'order_books',
TRADES: 'recent_trades',
}
@classmethod
def _parse_symbol_data(cls, data: dict) -> Tuple[Dict, Dict]:
ret = {}
info = {'instrument_type': {}}
# doc: https://docs-en.probit.com/reference-link/market
for entry in data['data']:
if entry['closed']:
continue
s = Symbol(entry['base_currency_id'], entry['quote_currency_id'])
ret[s.normalized] = entry['id']
info['instrument_type'][s.normalized] = s.type
return ret, info
def __reset(self):
self._l2_book = {}
async def _trades(self, msg: dict, timestamp: float):
'''
{
"channel":"marketdata",
"market_id":"ETH-BTC",
"status":"ok","lag":0,
"recent_trades":[
{
"id":"ETH-BTC:4429182",
"price":"0.028229",
"quantity":"3.117",
"time":"2020-11-01T03:59:06.277Z",
"side":"buy","tick_direction":"down"
},{
"id":"ETH-BTC:4429183",
"price":"0.028227",
"quantity":"1.793",
"time":"2020-11-01T03:59:14.528Z",
"side":"buy",
"tick_direction":"down"
}
],"reset":true
}
{
"channel":"marketdata",
"market_id":"ETH-BTC",
"status":"ok","lag":0,
"recent_trades":[
{
"id":"ETH-BTC:4429282",
"price":"0.028235",
"quantity":"2.203",
"time":"2020-11-01T04:22:15.117Z",
"side":"buy",
"tick_direction":"down"
}
]
}
'''
pair = self.exchange_symbol_to_std_symbol(msg['market_id'])
for update in msg['recent_trades']:
t = Trade(
self.id,
pair,
BUY if update['side'] == 'buy' else SELL,
Decimal(update['quantity']),
Decimal(update['price']),
self.timestamp_normalize(update['time']),
id=update['id'],
raw=update
)
await self.callback(TRADES, t, timestamp)
async def _l2_update(self, msg: dict, timestamp: float):
'''
{
"channel":"marketdata",
"market_id":"ETH-BTC",
"status":"ok",
"lag":0,
"order_books":[
{
"side":"buy",
"price":"0.0165",
"quantity":"0.47"
},{
"side":"buy",
"price":"0",
"quantity":"14656.177"
},{
"side":"sell",
"price":"6400",
"quantity":"0.001"
}],
"reset":true
}
{
"channel":"marketdata",
"market_id":"ETH-BTC",
"status":"ok",
"lag":0,
"order_books":[
{
"side":"buy",
"price":"0.0281",
"quantity":"48.541"
},{
"side":"sell",
"price":"0.0283",
"quantity":"0"
}]
}
'''
pair = self.exchange_symbol_to_std_symbol(msg['market_id'])
is_snapshot = msg.get('reset', False)
if is_snapshot:
self._l2_book[pair] = OrderBook(self.id, pair, max_depth=self.max_depth)
for entry in msg["order_books"]:
price = Decimal(entry['price'])
quantity = Decimal(entry['quantity'])
side = BID if entry['side'] == "buy" else ASK
self._l2_book[pair].book[side][price] = quantity
await self.book_callback(L2_BOOK, self._l2_book[pair], timestamp, raw=msg)
else:
delta = {BID: [], ASK: []}
for entry in msg["order_books"]:
price = Decimal(entry['price'])
quantity = Decimal(entry['quantity'])
side = BID if entry['side'] == "buy" else ASK
if quantity == 0:
if price in self._l2_book[pair].book[side]:
del self._l2_book[pair].book[side][price]
delta[side].append((price, 0))
else:
self._l2_book[pair].book[side][price] = quantity
delta[side].append((price, quantity))
await self.book_callback(L2_BOOK, self._l2_book[pair], timestamp, raw=msg, delta=delta)
async def METHOD_NAME(self, msg: str, conn, timestamp: float):
msg = json.loads(msg, parse_float=Decimal)
# Probit can send multiple type updates in one message so we avoid the use of elif
if 'recent_trades' in msg:
await self._trades(msg, timestamp)
if 'order_books' in msg:
await self._l2_update(msg, timestamp)
# Probit has a 'ticker' channel, but it provide OHLC-last data, not BBO px.
async def subscribe(self, conn: AsyncConnection):
self.__reset()
if self.subscription:
for chan in self.subscription:
for pair in self.subscription[chan]:
await conn.write(json.dumps({"type": "subscribe",
"channel": "marketdata",
"filter": [chan],
"interval": 100,
"market_id": pair,
}))
| null |
1,308 |
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of builds related views
"""
import json
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template.loader import render_to_string
import common
from helpers import builds_helper, systems_helper, tags_helper, environs_helper, deploys_helper
import random
import logging
log = logging.getLogger(__name__)
def builds_landing(request):
return get_build_names(request)
def get_build_names(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
build_names = builds_helper.get_build_names(request, start=index, size=size)
return render(request, 'builds/build_names.html', {
'build_names': build_names,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(build_names) < common.DEFAULT_BUILD_SIZE,
})
def get_build(request, id):
info = builds_helper.get_build_and_tag(request, id)
tag = info.get("tag")
if tag:
tag["build"]=json.loads(tag["metaInfo"])
return render(request, 'builds/build_details.html', {
"build": info["build"],
"tag": tag
})
def list_builds(request, name):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
builds = builds_helper.get_builds_and_tags(request, name=name, pageIndex=index, pageSize=size)
return render(request, 'builds/builds.html', {
'build_name': name,
'builds': builds,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(builds) < common.DEFAULT_BUILD_SIZE,
})
def get_all_builds(request):
name = request.GET.get('name')
branch = request.GET.get('branch')
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
builds = builds_helper.get_builds_and_tags(request, name=name, branch=branch, pageIndex=index,
pageSize=size)
deploy_state = None
current_build_id = request.GET.get('current_build_id', None)
override_policy = request.GET.get('override_policy')
deploy_id = request.GET.get('deploy_id')
current_build = None
scmType = ""
if current_build_id:
current_build = builds_helper.get_build_and_tag(request, current_build_id)
current_build = current_build.get('build')
scmType = current_build.get('type')
if deploy_id:
deploy_config = deploys_helper.get(request, deploy_id)
if deploy_config:
deploy_state = deploy_config.get('state', None)
scm_url = systems_helper.get_scm_url(request, scmType)
html = render_to_string('builds/pick_a_build.tmpl', {
"builds": builds,
"current_build": current_build,
"scm_url": scm_url,
"buildName": name,
"branch": branch,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(builds) < common.DEFAULT_BUILD_SIZE,
"overridePolicy": override_policy,
"deployState": deploy_state,
})
return HttpResponse(html)
# currently we only support search by git commit or SHA, 7 letters or longer
def search_commit(request, commit):
builds = builds_helper.get_builds_and_tags(request, commit=commit)
return render(request, 'builds/builds_by_commit.html', {
'commit': commit,
'builds': builds,
})
def list_build_branches(request, name):
branches = builds_helper.get_branches(request, name=name)
return HttpResponse(json.dumps(branches), content_type="application/json")
def METHOD_NAME(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
keep_first=False)
show_checkbox_str = request.GET.get('show_checkbox', 'False')
show_checkbox = show_checkbox_str.lower() == 'true'
pagination_id = random.randint(0, 1000000)
rows = render_to_string('builds/commit_rows.tmpl', {
"commits": commits,
"show_checkbox": show_checkbox,
"pagination_id": pagination_id
})
return HttpResponse(json.dumps({'rows': rows, 'new_start_sha': new_start_sha,
'truncated': truncated}),
content_type="application/json")
def compare_commits(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
keep_first=True)
html = render_to_string('builds/commits.tmpl', {
"commits": commits,
"start_sha": new_start_sha,
"end_sha": endSha,
"repo": repo,
"scm": scm,
"truncated": truncated,
"show_checkbox": False,
})
return HttpResponse(html)
def compare_commits_datatables(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
size=2000,
keep_first=True)
html = render_to_string('builds/show_commits.tmpl', {
"commits": commits,
"start_sha": new_start_sha,
"end_sha": endSha,
"repo": repo,
"scm": scm,
"truncated": truncated,
"show_checkbox": False,
})
return HttpResponse(html)
def tag_build(request, id):
if request.method == "POST":
build_info = builds_helper.get_build_and_tag(request, id)
current_tag = build_info.get("tag")
if current_tag:
tagged_build = json.loads(current_tag["metaInfo"])
if tagged_build["id"] == id:
log.info("There is already a tag associated with the build. Remove it")
builds_helper.del_build_tag(request, current_tag["id"])
tag = {"targetId":id, "targetType":"Build", "comments":request.POST["comments"]}
value = request.POST["tag_value"]
if value.lower() == "good":
tag["value"] = tags_helper.TagValue.GOOD_BUILD
elif value.lower()=="bad":
tag["value"] = tags_helper.TagValue.BAD_BUILD
else:
return HttpResponse(status=400)
builds_helper.set_build_tag(request, tag)
return redirect("/builds/{0}/".format(id))
else:
return HttpResponse(status=405)
| null |
1,309 |
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools.translate import _
from odoo.addons.base_rest.components.service import to_int
from odoo.addons.component.core import Component
class MembershipService(Component):
_inherit = "base.shopinvader.service"
_name = "shopinvader.membership.service"
_usage = "membership"
_expose_model = "membership.membership_line"
_description = "Service providing a method to access membership lines"
def _get_base_search_domain(self):
"""
This method must provide a domain used to retrieve the requested
membership lines.
This domain MUST TAKE CARE of restricting the access to the membership
lines visible for the current customer
:return: Odoo domain
"""
# The partner must be set and not be the anonymous one
if not self._is_logged_in():
return expression.FALSE_DOMAIN
# here we only allow access to membership lines linked to the
# current customer
return expression.normalize_domain([("partner", "=", self.partner.id)])
def search(self, **params):
"""
Get every membership lines related to logged user
:param params: dict/json
:return: dict
"""
return self._paginate_search(**params)
def subscribe(self, _id):
"""
Subscribe to a membership product with logged user
:param _id: id of product.product
:return: dict with invoice_id
"""
if not self._is_logged_in():
raise UserError(_("A user should be logged"))
membership_product = self.env["product.product"].search(
[("id", "=", _id), ("membership", "=", True)]
)
if not membership_product:
raise UserError(_("No membership product found with id %s") % _id)
wizard = self.env["membership.invoice"].create(
{"product_id": _id, "member_price": membership_product.list_price}
)
invoices_views_dict = wizard.with_context(
active_ids=self.partner.ids
).membership_invoice()
return {"invoice_id": invoices_views_dict.get("domain")[0][2][0]}
def _validator_subscribe(self):
"""
Validator for the subscribe
:return: dict
"""
return {"membership_product_id": {"type": "integer"}}
def _validator_return_subscribe(self):
"""
Output validator for the subscribe
:return: dict
"""
return {"invoice_id": {"type": "integer"}}
def _validator_search(self):
"""
Validator for the search
:return: dict
"""
schema = {
"per_page": {
"coerce": to_int,
"nullable": True,
"type": "integer",
},
"page": {"coerce": to_int, "nullable": True, "type": "integer"},
}
return schema
def _validator_return_search(self):
"""
Output validator for the search
:return: dict
"""
membership_line_schema = {
"membership_line_id": {"type": "integer"},
"date": {"type": "string", "nullable": True},
"date_from": {"type": "string", "nullable": True},
"date_to": {"type": "string", "nullable": True},
"date_cancel": {"type": "string", "nullable": True},
"membership_id": {
"type": "dict",
"nullable": True,
"schema": {
"id": {"type": "integer"},
"name": {"type": "string"},
},
},
"member_price": {"type": "float"},
"state": {
"type": "dict",
"nullable": True,
"schema": {
"value": {"type": "string"},
"label": {"type": "string"},
},
},
}
schema = {
"size": {"type": "integer"},
"data": {
"type": "list",
"schema": {"type": "dict", "schema": membership_line_schema},
},
}
return schema
def _get_parser_membership_line(self):
"""
Get the parser of membership.membership_line
:return: list
"""
to_parse = [
"id:membership_line_id",
"date",
"date_from",
"date_to",
"date_cancel",
("membership_id:membership_id", ("id", "name")),
"member_price",
]
return to_parse
def _get_selection_label(self, membership_line, field):
"""
Get the translated label of the membership line selection field
:param membership_line: membership.membership_line recordset
:param field: str
:return: str
"""
if field not in membership_line._fields:
return ""
# convert_to_export(...) give the label of the selection (translated).
return membership_line._fields.get(field).convert_to_export(
membership_line[field], membership_line
)
def _to_json_membership_line(self, membership_line):
membership_line.ensure_one()
parser = self._get_parser_membership_line()
values = membership_line.jsonify(parser)[0]
values.update(
{
"state": {
"value": membership_line.state,
"label": self._get_selection_label(membership_line, "state"),
}
}
)
return values
def METHOD_NAME(self, membership_lines):
res = []
for membership_line in membership_lines:
res.append(self._to_json_membership_line(membership_line))
return res
| null |
1,310 |
#
# Containerlab provider module
#
import subprocess
import typing
import shutil
from box import Box
from . import _Provider,get_forwarded_ports
from ..utils import log
from ..data import filemaps
from ..cli import is_dry_run,external_commands
def list_bridges( topology: Box ) -> typing.Set[str]:
return { l.bridge for l in topology.links if l.bridge and l.node_count != 2 and not 'external_bridge' in l.clab }
def use_ovs_bridge( topology: Box ) -> bool:
return topology.defaults.providers.clab.bridge_type == "ovs-bridge"
def create_linux_bridge( brname: str ) -> bool:
if external_commands.run_command(
['brctl','show',brname],check_result=True,ignore_errors=True) and not is_dry_run():
log.print_verbose(f'Linux bridge {brname} already exists, skipping')
return True
status = external_commands.run_command(
['sudo','ip','link','add','name',brname,'type','bridge'],check_result=True,return_stdout=True)
if status is False:
return False
log.print_verbose( f"Created Linux bridge '{brname}': {status}" )
status = external_commands.run_command(
['sudo','ip','link','set','dev',brname,'up'],check_result=True,return_stdout=True)
if status is False:
return False
log.print_verbose( f"Enable Linux bridge '{brname}': {status}" )
status = external_commands.run_command(
['sudo','sh','-c',f'echo 65528 >/sys/class/net/{brname}/bridge/group_fwd_mask'],
check_result=True,
return_stdout=True)
if status is False:
return False
log.print_verbose( f"Enable LLDP,LACP,802.1X forwarding on Linux bridge '{brname}': {status}" )
return True
def destroy_linux_bridge( brname: str ) -> bool:
status = external_commands.run_command(
['sudo','ip','link','del','dev',brname],check_result=True,return_stdout=True)
if status is False:
return False
log.print_verbose( f"Delete Linux bridge '{brname}': {status}" )
return True
def create_ovs_bridge( brname: str ) -> bool:
status = external_commands.run_command(
['sudo','ovs-vsctl','add-br',brname],check_result=True,return_stdout=True)
if status is False:
return False
log.print_verbose( f"Create OVS bridge '{brname}': {status}" )
return True
def destroy_ovs_bridge( brname: str ) -> bool:
status = external_commands.run_command(
['sudo','ovs-vsctl','del-br',brname],check_result=True,return_stdout=True)
if status is False:
return False
log.print_verbose( f"Delete OVS bridge '{brname}': {status}" )
return True
GENERATED_CONFIG_PATH = "clab_files"
def METHOD_NAME(node: Box, fplist: list) -> None:
if not fplist:
return
node.clab.ports = node.clab.ports or [] # Make sure the list of forwarded ports is a list
for port_map in fplist: # Iterate over forwarded port mappings
port_map_string = f'{port_map[0]}:{port_map[1]}' # Build the containerlab-compatible map entry
if not port_map_string in node.clab.ports: # ... and add it to the list of forwarded ports
node.clab.ports.append(port_map_string) # ... if the user didn't do it manually
'''
normalize_clab_filemaps: convert clab templates and file binds into host:target lists
'''
def normalize_clab_filemaps(node: Box) -> None:
for undot_key in ['clab.binds','clab.config_templates']:
if not undot_key in node:
continue
filemaps.normalize_file_mapping(node,f'nodes.{node.name}',undot_key,'clab')
class Containerlab(_Provider):
def augment_node_data(self, node: Box, topology: Box) -> None:
node.hostname = "clab-%s-%s" % (topology.name,node.name)
normalize_clab_filemaps(node)
self.create_extra_files_mappings(node,topology)
node_fp = get_forwarded_ports(node,topology)
if node_fp:
METHOD_NAME(node,node_fp)
def post_configuration_create(self, topology: Box) -> None:
for n in topology.nodes.values():
if n.get('clab.binds',None):
self.create_extra_files(n,topology)
def pre_start_lab(self, topology: Box) -> None:
log.print_verbose('pre-start hook for Containerlab - create any bridges')
for brname in list_bridges(topology):
if use_ovs_bridge(topology):
create_ovs_bridge(brname)
else:
create_linux_bridge(brname)
def post_stop_lab(self, topology: Box) -> None:
log.print_verbose('post-stop hook for Containerlab, cleaning up any bridges')
for brname in list_bridges(topology):
if use_ovs_bridge(topology):
destroy_ovs_bridge(brname)
else:
destroy_linux_bridge(brname)
| null |
1,311 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Base class for ZenML annotator stack components."""
from abc import ABC, abstractmethod
from typing import Any, ClassVar, List, Optional, Tuple, Type, cast
from zenml.enums import StackComponentType
from zenml.stack import Flavor, StackComponent
from zenml.stack.stack_component import StackComponentConfig
class BaseAnnotatorConfig(StackComponentConfig):
"""Base config for annotators.
Attributes:
notebook_only: if the annotator can only be used in a notebook.
"""
notebook_only: ClassVar[bool] = False
class BaseAnnotator(StackComponent, ABC):
"""Base class for all ZenML annotators."""
@property
def config(self) -> BaseAnnotatorConfig:
"""Returns the `BaseAnnotatorConfig` config.
Returns:
The configuration.
"""
return cast(BaseAnnotatorConfig, self._config)
@abstractmethod
def get_url(self) -> str:
"""Gets the URL of the annotation interface.
Returns:
The URL of the annotation interface.
"""
@abstractmethod
def get_url_for_dataset(self, dataset_name: str) -> str:
"""Gets the URL of the annotation interface for a specific dataset.
Args:
dataset_name: name of the dataset.
Returns:
The URL of the dataset annotation interface.
"""
@abstractmethod
def METHOD_NAME(self) -> List[Any]:
"""Gets the datasets currently available for annotation.
Returns:
The datasets currently available for annotation.
"""
@abstractmethod
def get_dataset_names(self) -> List[str]:
"""Gets the names of the datasets currently available for annotation.
Returns:
The names of the datasets currently available for annotation.
"""
@abstractmethod
def get_dataset_stats(self, dataset_name: str) -> Tuple[int, int]:
"""Gets the statistics of a dataset.
Args:
dataset_name: name of the dataset.
Returns:
A tuple containing (labeled_task_count, unlabeled_task_count) for
the dataset.
"""
@abstractmethod
def launch(self, url: Optional[str]) -> None:
"""Launches the annotation interface.
Args:
url: The URL of the annotation interface.
"""
@abstractmethod
def add_dataset(self, **kwargs: Any) -> Any:
"""Registers a dataset for annotation.
Args:
**kwargs: keyword arguments.
Returns:
The dataset or confirmation object on adding the dataset.
"""
@abstractmethod
def get_dataset(self, **kwargs: Any) -> Any:
"""Gets the dataset with the given name.
Args:
**kwargs: keyword arguments.
Returns:
The dataset with the given name.
"""
@abstractmethod
def delete_dataset(self, **kwargs: Any) -> None:
"""Deletes a dataset.
Args:
**kwargs: keyword arguments.
"""
@abstractmethod
def get_labeled_data(self, **kwargs: Any) -> Any:
"""Gets the labeled data for the given dataset.
Args:
**kwargs: keyword arguments.
Returns:
The labeled data for the given dataset.
"""
@abstractmethod
def get_unlabeled_data(self, **kwargs: str) -> Any:
"""Gets the unlabeled data for the given dataset.
Args:
**kwargs: Additional keyword arguments to pass to the Label Studio client.
Returns:
The unlabeled data for the given dataset.
"""
class BaseAnnotatorFlavor(Flavor):
"""Base class for annotator flavors."""
@property
def type(self) -> StackComponentType:
"""Returns the flavor type.
Returns:
The flavor type.
"""
return StackComponentType.ANNOTATOR
@property
def config_class(self) -> Type[BaseAnnotatorConfig]:
"""Config class for this flavor.
Returns:
The config class.
"""
return BaseAnnotatorConfig
@property
@abstractmethod
def implementation_class(self) -> Type[BaseAnnotator]:
"""Implementation class.
Returns:
The implementation class.
"""
return BaseAnnotator
| null |
1,312 |
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet50 model with Monte Carlo dropout."""
import string
import tensorflow as tf
# Use batch normalization defaults from Pytorch.
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def apply_dropout(inputs, dropout_rate, filterwise_dropout):
"""Apply a dropout layer to the inputs."""
noise_shape = None
if filterwise_dropout:
noise_shape = [inputs.shape[0], 1, 1, inputs.shape[3]]
return tf.keras.layers.Dropout(
dropout_rate, noise_shape=noise_shape)(
inputs, training=True)
def bottleneck_block(inputs, filters, stage, block, strides, dropout_rate,
filterwise_dropout):
"""Residual block with 1x1 -> 3x3 -> 1x1 convs in main path.
Note that strides appear in the second conv (3x3) rather than the first (1x1).
This is also known as "ResNet v1.5" as it differs from He et al. (2015)
(http://torch.ch/blog/2016/02/04/resnets.html).
Dropout is applied post-activation to every batch-normalized conv layer.
Args:
inputs: tf.Tensor.
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
dropout_rate: Dropout rate.
filterwise_dropout: Dropout whole convolutional filters instead of
individual values in the feature map.
Returns:
tf.Tensor.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
filters1,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(inputs)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = tf.keras.layers.Activation('relu')(x)
x = apply_dropout(x, dropout_rate, filterwise_dropout)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size=3,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
x = tf.keras.layers.Activation('relu')(x)
x = apply_dropout(x, dropout_rate, filterwise_dropout)
x = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(x)
shortcut = inputs
if not x.shape.is_compatible_with(shortcut.shape):
shortcut = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(shortcut)
shortcut = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(shortcut)
shortcut = apply_dropout(shortcut, dropout_rate, filterwise_dropout)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def group(inputs, filters, num_blocks, stage, strides, dropout_rate,
filterwise_dropout):
"""Group of residual blocks."""
blocks = string.ascii_lowercase
x = bottleneck_block(
inputs,
filters,
stage,
block=blocks[0],
strides=strides,
dropout_rate=dropout_rate,
filterwise_dropout=filterwise_dropout)
for i in range(num_blocks - 1):
x = bottleneck_block(
x,
filters,
stage,
block=blocks[i + 1],
strides=1,
dropout_rate=dropout_rate,
filterwise_dropout=filterwise_dropout)
return x
def METHOD_NAME(input_shape,
num_classes,
dropout_rate,
filterwise_dropout):
"""Builds ResNet50.
Using strided conv, pooling, four groups of residual blocks, and pooling, the
network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->
14x14 -> 7x7 (Table 1 of He et al. (2015)).
Args:
input_shape: Shape tuple of input excluding batch dimension.
num_classes: Number of output classes.
dropout_rate: Dropout rate.
filterwise_dropout: Dropout whole convolutional filters instead of
individual values in the feature map.
Returns:
tf.keras.Model.
"""
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(
64,
kernel_size=7,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
name='conv1')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(x)
x = tf.keras.layers.Activation('relu')(x)
x = apply_dropout(x, dropout_rate, filterwise_dropout)
x = tf.keras.layers.MaxPooling2D(3, strides=2, padding='same')(x)
x = group(x, [64, 64, 256], stage=2, num_blocks=3, strides=1,
dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout)
x = group(x, [128, 128, 512], stage=3, num_blocks=4, strides=2,
dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout)
x = group(x, [256, 256, 1024], stage=4, num_blocks=6, strides=2,
dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout)
x = group(x, [512, 512, 2048], stage=5, num_blocks=3, strides=2,
dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
name='fc1000')(x)
return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50')
| null |
1,313 |
# OPENCORE - ADD
import traceback
from flask import session as login_session
from flask import request, redirect, url_for, flash
from shared.helpers import sessionMaker
from shared.database import hashing_functions
import sys, os
from shared.auth.KeycloakDiffgramClient import KeycloakDiffgramClient
from shared.auth.OAuth2Provider import OAuth2Provider
from shared.settings import settings
import base64
from shared.shared_logger import get_shared_logger
import ast
import zlib
import bz2
import gzip
logger = get_shared_logger()
def set_jwt_in_session(token_data: dict):
"""
Sets the JWT data in the client cookie session.
:param token_data:
:return:
"""
oidc = OAuth2Provider()
oidc_client = oidc.get_client()
id_token = oidc_client.get_id_token_from_jwt(jwt_data = token_data)
refresh_token = oidc_client.get_refresh_token_from_jwt(jwt_data = token_data)
access_token = oidc_client.get_access_token_from_jwt(jwt_data = token_data)
str_id_comp = gzip.compress(id_token.encode())
str_refresh_comp = gzip.compress(refresh_token.encode())
str_access_comp = gzip.compress(access_token.encode())
logger.info(f'Access Token: {access_token}')
logger.info(f'Id Token: {id_token}')
logger.info(f'Refresh Token: {refresh_token}')
logger.info(f'ID Token Original size: {sys.getsizeof(id_token)} - Compressed Size: {sys.getsizeof(str_id_comp)}')
logger.info(
f'Access_token Token Original size: {sys.getsizeof(access_token)} - Compressed Size: {sys.getsizeof(str_access_comp)}')
logger.info(
f'Refresh Token Original size: {sys.getsizeof(refresh_token)} - Compressed Size: {sys.getsizeof(str_refresh_comp)}')
login_session.clear()
login_session['refresh_token'] = str_refresh_comp
# login_session['access_token'] = str_access_comp
login_session['id_token'] = str_id_comp
def get_decoded_refresh_token_from_session() -> str or None:
"""
Gets the JWT from the client cookie.
:return: String representing the refresh token
"""
jwt_token = login_session.get('refresh_token')
if type(jwt_token) == str:
return jwt_token
if jwt_token is None:
return None
token_string = gzip.decompress(jwt_token).decode()
return token_string
def get_decoded_id_token_from_session() -> str or None:
"""
Gets the JWT from the client cookie.
:return: String representing the ID token
"""
oidc = OAuth2Provider()
oidc_client = oidc.get_client()
id_token = login_session.get('id_token')
if id_token is None:
return None
if type(id_token) == str:
token_string = id_token
expired = oidc_client.id_token_has_expired(id_token = token_string)
if expired:
token_string = METHOD_NAME()
return token_string
# Case of compressed token (type is bytes)
token_string = gzip.decompress(id_token).decode()
expired = oidc_client.id_token_has_expired(id_token = token_string)
if expired:
token_string = METHOD_NAME()
return token_string
def get_decoded_access_token_from_session() -> str or None:
"""
Gets the JWT from the client cookie.
:return: String representing the ID token
"""
oidc = OAuth2Provider()
oidc_client = oidc.get_client()
access_token = login_session.get('access_token')
if type(access_token) == str:
return access_token
if access_token is None:
return None
token_string = gzip.decompress(access_token).decode()
expired = oidc_client.id_token_has_expired(id_token = token_string)
if expired:
token_string = METHOD_NAME()
return token_string
def METHOD_NAME() -> str or None:
try:
oidc = OAuth2Provider()
oidc_client = oidc.get_client()
refresh_token = get_decoded_refresh_token_from_session()
new_token = oidc_client.refresh_token(refresh_token)
new_refresh_token = oidc_client.get_refresh_token_from_jwt(jwt_data = new_token)
new_id_token = oidc_client.get_id_token_from_jwt(jwt_data = new_token)
new_access_token = oidc_client.get_access_token_from_jwt(jwt_data = new_token)
if new_refresh_token is not None:
refresh_comp = gzip.compress(new_refresh_token.encode())
login_session['refresh_token'] = refresh_comp
if new_id_token is not None:
id_comp = gzip.compress(new_id_token.encode())
login_session['id_token'] = id_comp
# if new_access_token is not None:
# login_session['access_token'] = new_access_token
return new_id_token
except:
msg = traceback.format_exc()
logger.warning(f'Refresh token failed {msg}')
return None
def LoggedIn():
if settings.USE_OAUTH2:
try:
id_token = get_decoded_id_token_from_session()
# access_token = get_decoded_access_token_from_session()
if not id_token:
return False
return True
except Exception as e:
err_data = traceback.format_exc()
logger.error(err_data)
return False
else:
if login_session.get('user_id', None) is not None:
out = hashing_functions.check_secure_val(login_session['user_id'])
if out is not None:
return True
else:
return False
else:
return False
def get_user_from_oauth2(session):
from shared.database.user import User
oauth2 = OAuth2Provider()
oauth2_client = oauth2.get_client()
# access_token = get_decoded_access_token_from_session()
id_token = get_decoded_id_token_from_session()
if id_token is None:
return None
decoded_token = oauth2_client.get_decoded_jwt_token(id_token = id_token)
if not decoded_token:
return None
diffgram_user = User.get_user_by_oauth2_id(session = session,
oidc_id = decoded_token.get('sub'))
if not diffgram_user:
return None
return diffgram_user.id
def getUserID(session):
if settings.USE_OAUTH2:
return get_user_from_oauth2(session = session)
else:
if login_session.get('user_id', None) is not None:
out = hashing_functions.check_secure_val(login_session['user_id'])
if out is not None:
return out
return None
def defaultRedirect():
return redirect('/user/login')
def setSecureCookie(user_db):
cookie_hash = hashing_functions.make_secure_val(user_db.id)
login_session['user_id'] = cookie_hash
def get_session_string():
if settings.USE_OAUTH2:
token = get_decoded_id_token_from_session()
return token
else:
return login_session.get('user_id')
def get_current_version(session):
user = session.query(User).filter_by(id = getUserID(session = session)).first()
project = session.query(Project).filter_by(id = user.project_id_current).first()
version = session.query(Version).filter_by(id = project.version_id_current).first()
return version
def get_ml_settings(session, version):
machine_learning_settings = session.query(Machine_learning_settings).filter_by(
id = version.machine_learning_settings_id).first()
return machine_learning_settings
def get_gcs_service_account(gcs):
path = settings.SERVICE_ACCOUNT_FULL_PATH
return gcs.from_service_account_json(path)
| null |
1,314 |
"""Handle omega (specimen rotation) metadata
* OmegaWedges class specifies omega metadata in wedges
"""
import numpy as np
from .baseclass import ImageSeries
OMEGA_KEY = 'omega'
class OmegaImageSeries(ImageSeries):
"""ImageSeries with omega metadata"""
DFLT_TOL = 1.0e-6
TAU = 360
def __init__(self, ims):
"""This class is initialized with an existing imageseries"""
# check for omega metadata
if OMEGA_KEY in ims.metadata:
self._omega = ims.metadata[OMEGA_KEY]
if len(ims) != self._omega.shape[0]:
msg = 'omega array mismatch: array has %s frames, expecting %s'
msg = msg % (self._omega.shape[0], len(ims))
raise OmegaSeriesError(msg)
else:
raise OmegaSeriesError('Imageseries has no omega metadata')
super(OmegaImageSeries, self).__init__(ims)
self._make_wedges()
def _make_wedges(self, tol=DFLT_TOL):
nf = len(self)
om = self.omega
# find the frames where the wedges break
starts = [0]
delta = om[0, 1] - om[0, 0]
omlast = om[0, 1]
for f in range(1, nf):
if delta <= 0:
raise OmegaSeriesError('omega array must be increasing')
# check whether delta changes or ranges not contiguous
d = om[f,1] - om[f,0]
if (np.abs(d - delta) > tol) or (np.abs(om[f,0] - omlast) > tol):
starts.append(f)
delta = d
omlast = om[f, 1]
starts.append(nf)
nw = len(starts) - 1
nf0 = 0
self._wedge_om = np.zeros((nw, 3))
self._wedge_f = np.zeros((nw, 2), dtype=int)
self._omegawedges = OmegaWedges(nf)
for s in range(nw):
ostart = om[starts[s], 0]
ostop = om[starts[s + 1] - 1, 1]
steps = starts[s+1] - starts[s]
self._omegawedges.METHOD_NAME(ostart, ostop, steps)
#
delta = (ostop - ostart)/steps
self._wedge_om[s, :] = (ostart, ostop, delta)
self._wedge_f[s, 0] = nf0
self._wedge_f[s, 1] = steps
nf0 += steps
assert(nf0 == nf)
@property
def omega(self):
"""return omega range array (nframes, 2)"""
return self._omega
@property
def omegawedges(self):
return self._omegawedges
@property
def nwedges(self):
return self.omegawedges.nwedges
def wedge(self, i):
"""return i'th wedge as a dictionary"""
d = self.omegawedges.wedges[i]
delta = (d['ostop'] - d['ostart'])/d['nsteps']
d.update(delta=delta)
return d
def omega_to_frame(self, om):
"""Return frame and wedge which includes given omega, -1 if not found"""
f = -1
w = -1
for i in range(len(self._wedge_om)):
omin = self._wedge_om[i, 0]
omax = self._wedge_om[i, 1]
omcheck = omin + np.mod(om - omin, self.TAU)
if omcheck < omax:
odel = self._wedge_om[i, 2]
f = self._wedge_f[i,0] + int(np.floor((omcheck - omin)/odel))
w = i
break
return f, w
def omegarange_to_frames(self, omin, omax):
"""Return list of frames for range of omegas"""
noframes = ()
f0, w0 = self.omega_to_frame(omin)
if w0 < 0:
return noframes
f1, w1 = self.omega_to_frame(omax)
if w1 < 0:
return noframes
# if same wedge, require frames be increasing
if (w0 == w1) and (f1 > f0):
return list(range(f0, f1+1))
# case: adjacent wedges with 2pi jump in omega
w0max = self._wedge_om[w0, 1]
w1min = self._wedge_om[w1, 0]
if np.mod(np.abs(w1min - w0max), self.TAU) < self.DFLT_TOL:
r0 = list(range(f0, self._wedge_f[w0, 0] + self._wedge_f[w0, 1]))
r1 = list(range(self._wedge_f[w1, 0], f1 + 1))
return r0 + r1
return noframes
class OmegaWedges(object):
"""piecewise linear omega ranges"""
def __init__(self, nframes):
"""Constructor for OmegaWedge"""
self.nframes = nframes
self._wedges = []
#
# ============================== API
#
@property
def omegas(self):
"""n x 2 array of omega values, one per frame"""
if self.nframes != self.wframes:
msg = "number of frames (%s) does not match "\
"number of wedge frames (%s)" %(self.nframes, self.wframes)
raise OmegaSeriesError(msg)
oa = np.zeros((self.nframes, 2))
wstart = 0
for w in self.wedges:
ns = w['nsteps']
wr = list(range(wstart, wstart + ns))
wa0 = np.linspace(w['ostart'], w['ostop'], ns + 1)
oa[wr, 0] = wa0[:-1]
oa[wr, 1] = wa0[1:]
wstart += ns
return oa
@property
def nwedges(self):
"""number of wedges"""
return len(self._wedges)
@property
def wedges(self):
"""list of wedges (dictionaries)"""
return self._wedges
def METHOD_NAME(self, ostart, ostop, nsteps, loc=None):
"""add wedge to list"""
d = dict(ostart=ostart, ostop=ostop, nsteps=nsteps)
if loc is None:
loc = self.nwedges
self.wedges.insert(loc, d)
def delwedge(self, i):
"""delete wedge number i"""
self.wedges.pop(i)
@property
def wframes(self):
"""number of frames in wedges"""
wf = [w['nsteps'] for w in self.wedges]
return int(np.sum(wf))
def save_omegas(self, fname):
"""save omegas to text file"""
np.save(fname, self.omegas)
pass # end class
class OmegaSeriesError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| null |
1,315 |
import os
import pytest
from ..utils import assert_content
from pyfs import head, cp
from pyio import write, write_regions
def test_cp_file_from_local_folder_to_mount_folder(size, local_file, mount_file, source_path):
"""TC-PIPE-FUSE-50"""
head(source_path, size=size, write_to=local_file)
cp(local_file, mount_file)
assert_content(local_file, mount_file)
def test_append_to_file_end(local_file, mount_file, source_path):
"""TC-PIPE-FUSE-51"""
head(source_path, append_to=local_file)
head(source_path, append_to=mount_file)
assert_content(local_file, mount_file)
def test_override_file_tail(size, local_file, mount_file):
"""TC-PIPE-FUSE-52"""
if size < 10:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 10, amount=10)
write(mount_file, offset=actual_size - 10, amount=10)
assert_content(local_file, mount_file)
def test_override_file_head(size, local_file, mount_file):
"""TC-PIPE-FUSE-53"""
if size < 10:
pytest.skip()
write(local_file, offset=0, amount=10)
write(mount_file, offset=0, amount=10)
assert_content(local_file, mount_file)
def test_write_to_position_that_is_bigger_than_file_length(local_file, mount_file):
"""TC-PIPE-FUSE-54"""
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size + 10, amount=10)
write(mount_file, offset=actual_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_that_exceeds_file_length(size, local_file, mount_file):
"""TC-PIPE-FUSE-55"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 5, amount=10)
write(mount_file, offset=actual_size - 5, amount=10)
assert_content(local_file, mount_file)
def test_write_region_in_first_chunk(size, local_file, mount_file):
"""TC-PIPE-FUSE-56"""
if size < 20:
pytest.skip()
write(local_file, offset=10, amount=10)
write(mount_file, offset=10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-57"""
if size < chunk_size + 20:
pytest.skip()
write(local_file, offset=chunk_size + 10, amount=10)
write(mount_file, offset=chunk_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_matching_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-58"""
if size < chunk_size:
pytest.skip()
write(local_file, offset=0, amount=chunk_size)
write(mount_file, offset=0, amount=chunk_size)
assert_content(local_file, mount_file)
def test_write_region_between_two_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-59"""
if size < chunk_size + 5:
pytest.skip()
write(local_file, offset=chunk_size - 5, amount=10)
write(mount_file, offset=chunk_size - 5, amount=10)
assert_content(local_file, mount_file)
def test_write_two_regions_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-60"""
if size < chunk_size + 110:
pytest.skip()
write_regions(local_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_in_two_adjacent_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-61"""
if size < chunk_size + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
assert_content(local_file, mount_file)
def METHOD_NAME(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-62"""
if size < chunk_size * 2 + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_three_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-63"""
if size < chunk_size * 2 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_four_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-64"""
if size < chunk_size * 3 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_with_one_of_them_exceeding_file_length(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-65"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_with_one_of_them_starting_from_position_that_is_bigger_than_file_length(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-66"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_starting_from_position_that_is_bigger_than_file_length(chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-67"""
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
write_regions(mount_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_overlapping_regions(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-68"""
if size < 25:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_region_to_an_already_written_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-69"""
if size < chunk_size + 10:
pytest.skip()
write_regions(local_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
write_regions(mount_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
assert_content(local_file, mount_file)
| null |
1,316 |
################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2009-2022 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from __future__ import annotations
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from ..models import CremeEntity
from ..utils.dates import dt_from_ISO8601, dt_to_ISO8601
logger = logging.getLogger(__name__)
class LastViewedItem:
def __init__(self, request, entity: CremeEntity):
self.pk = entity.pk
self.ctype_id = entity.entity_type_id
self.url = entity.get_absolute_url()
self.METHOD_NAME(entity)
self.__add(request)
def __repr__(self):
return self.name
def __eq__(self, other):
return self.pk == other.pk
def as_dict(self) -> dict:
return {
'pk': self.pk,
'ctype_id': self.ctype_id,
'url': self.url,
'name': self.name,
'modified': dt_to_ISO8601(self.modified),
}
@property
def ctype(self):
return ContentType.objects.get_for_id(self.ctype_id)
@classmethod
def from_dict(cls, data: dict):
instance = object.__new__(cls)
for attr in ('pk', 'url', 'name'):
setattr(instance, attr, data[attr])
instance.ctype_id = data['ctype_id']
instance.modified = dt_from_ISO8601(data['modified'])
return instance
def METHOD_NAME(self, entity: CremeEntity) -> None:
self.name = str(entity)
self.modified = entity.modified
def __add(self, request) -> None:
logger.debug('LastViewedItem.add: %s', self)
session = request.session
last_viewed_items = self._deserialize_all(session)
if last_viewed_items and last_viewed_items[0] == self:
return
try:
last_viewed_items.remove(self)
except ValueError:
logger.debug('%s not in last_viewed', self)
last_viewed_items.insert(0, self)
del last_viewed_items[settings.MAX_LAST_ITEMS:]
self._serialize_all(session, last_viewed_items)
@classmethod
def _deserialize_all(cls, session) -> list[LastViewedItem]:
from_dict = cls.from_dict
return [from_dict(data) for data in session.get('last_viewed_items', ())]
@staticmethod
def _serialize_all(session, items) -> None:
session['last_viewed_items'] = [item.as_dict() for item in items]
# TODO: use the future entity representation table
@classmethod
def get_all(cls, request) -> list[LastViewedItem]:
items = []
session = request.session
old_items = cls._deserialize_all(session)
if old_items:
MAX_LAST_ITEMS = settings.MAX_LAST_ITEMS
updated = False
if len(old_items) > MAX_LAST_ITEMS:
# The 'settings' value has changed since the list has been stored
updated = True
del old_items[MAX_LAST_ITEMS:]
entities = CremeEntity.objects.filter(
is_deleted=False,
).in_bulk([item.pk for item in old_items])
# If any entity has been deleted -> must update
updated |= (len(old_items) != len(entities))
for item in old_items:
entity = entities.get(item.pk)
if entity:
if entity.modified > item.modified:
updated = True
# TODO: use CremeEntity.populate_real_entities() or ctype_id
item.METHOD_NAME(entity.get_real_entity())
items.append(item)
if updated:
cls._serialize_all(session, items)
return items
| null |
1,317 |
"""Tests for anomaly classification with OTX CLI"""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import os
import pytest
from otx.cli.registry import Registry
from tests.test_suite.e2e_test_system import e2e_pytest_component
from tests.test_suite.run_test_command import (
nncf_eval_openvino_testing,
nncf_eval_testing,
nncf_export_testing,
nncf_optimize_testing,
nncf_validate_fq_testing,
otx_demo_deployment_testing,
otx_demo_openvino_testing,
otx_demo_testing,
otx_deploy_openvino_testing,
otx_eval_deployment_testing,
otx_eval_openvino_testing,
otx_eval_testing,
otx_export_testing,
otx_train_testing,
ptq_eval_testing,
ptq_optimize_testing,
ptq_validate_fq_testing,
)
args = {
"--train-data-roots": "tests/assets/anomaly/hazelnut/train",
"--val-data-roots": "tests/assets/anomaly/hazelnut/test",
"--test-data-roots": "tests/assets/anomaly/hazelnut/test",
"--input": "tests/assets/anomaly/hazelnut/test/colour",
"train_params": [],
}
otx_dir = os.getcwd()
templates = Registry("src/otx/algorithms").filter(task_type="ANOMALY_CLASSIFICATION").templates
templates_ids = [template.model_template_id for template in templates]
class TestToolsAnomalyClassification:
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train(self, template, tmp_dir_path):
otx_train_testing(template, tmp_dir_path, otx_dir, args, deterministic=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_export(self, template, tmp_dir_path):
otx_export_testing(template, tmp_dir_path, check_ir_meta=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval(self, template, tmp_dir_path):
otx_eval_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_openvino(self, template, tmp_dir_path):
otx_eval_openvino_testing(template, tmp_dir_path, otx_dir, args, threshold=0.2)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo(self, template, tmp_dir_path):
otx_demo_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo_openvino(self, template, tmp_dir_path):
otx_demo_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_deploy_openvino(self, template, tmp_dir_path):
otx_deploy_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_deployment(self, template, tmp_dir_path):
otx_eval_deployment_testing(template, tmp_dir_path, otx_dir, args, threshold=0.0)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def METHOD_NAME(self, template, tmp_dir_path):
otx_demo_deployment_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_optimize(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_export(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_export_testing(template, tmp_dir_path)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_validate_fq(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_eval(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_testing(template, tmp_dir_path, otx_dir, args, threshold=0.01)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_eval_openvino(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_optimize(self, template, tmp_dir_path):
ptq_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_validate_fq(self, template, tmp_dir_path):
ptq_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_eval(self, template, tmp_dir_path):
ptq_eval_testing(template, tmp_dir_path, otx_dir, args)
| null |
1,318 |
import sys
import os
import glob
import gzip
import contextlib
import inspect
import subprocess
import tempfile
import pysam
WORKDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pysam_test_work"))
BAM_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pysam_data"))
TABIX_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"tabix_data"))
CBCF_DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"cbcf_data"))
LINKDIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "linker_tests"))
TESTS_TEMPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "tmp"))
IS_PYTHON3 = sys.version_info[0] >= 3
if IS_PYTHON3:
from itertools import zip_longest
from urllib.request import urlopen
else:
from itertools import izip as zip_longest
from urllib2 import urlopen
if IS_PYTHON3:
def force_str(s):
try:
return s.decode('ascii')
except AttributeError:
return s
def force_bytes(s):
try:
return s.encode('ascii')
except AttributeError:
return s
else:
def force_str(s):
return s
def force_bytes(s):
return s
def openfile(fn):
if fn.endswith(".gz"):
try:
return gzip.open(fn, "rt", encoding="utf-8")
except TypeError:
return gzip.open(fn, "r")
else:
return open(fn)
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.
'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
infile1 = open(filename1, "rb")
infile2 = open(filename2, "rb")
def METHOD_NAME(infile):
while 1:
c = infile.read(1)
if c == b"":
break
yield c
found = False
for c1, c2 in zip_longest(METHOD_NAME(infile1), METHOD_NAME(infile2)):
if c1 != c2:
break
else:
found = True
infile1.close()
infile2.close()
return found
def checkGZBinaryEqual(filename1, filename2):
'''return true if the decompressed contents of the two files
are binary equal.
'''
with gzip.open(filename1, "rb") as infile1:
d1 = infile1.read()
with gzip.open(filename2, "rb") as infile2:
d2 = infile2.read()
if d1 == d2:
return True
return False
def check_samtools_view_equal(
filename1, filename2,
without_header=False):
'''return true if the two files are equal in their
content through samtools view.
'''
# strip MD and NM tags, as not preserved in CRAM files
args = ["-x", "MD", "-x", "NM"]
if not without_header:
args.append("-h")
lines1 = pysam.samtools.view(*(args + [filename1]))
lines2 = pysam.samtools.view(*(args + [filename2]))
if len(lines1) != len(lines2):
return False
if lines1 != lines2:
# line by line comparison
# sort each line, as tags get rearranged between
# BAM/CRAM
for n, pair in enumerate(zip(lines1, lines2)):
l1, l2 = pair
l1 = sorted(l1[:-1].split("\t"))
l2 = sorted(l2[:-1].split("\t"))
if l1 != l2:
print("mismatch in line %i" % n)
print(l1)
print(l2)
return False
else:
return False
return True
def check_url(url):
'''return True if URL is available.
A URL might not be available if it is the wrong URL
or there is no connection to the URL.
'''
try:
urlopen(url, timeout=1)
return True
except:
return False
def checkFieldEqual(cls, read1, read2, exclude=[]):
'''check if two reads are equal by comparing each field.'''
# add the . for refactoring purposes.
for x in (".query_name",
".query_sequence",
".flag",
".reference_id",
".reference_start",
".mapping_quality",
".cigartuples",
".next_reference_id",
".next_reference_start",
".template_length",
".query_length",
".query_qualities",
".bin",
".is_paired", ".is_proper_pair",
".is_unmapped", ".is_mapped",
".mate_is_unmapped", ".mate_is_mapped",
".is_reverse", ".is_forward",
".mate_is_reverse", ".mate_is_forward",
".is_read1", ".is_read2",
".is_secondary", ".is_qcfail",
".is_duplicate"):
n = x[1:]
if n in exclude:
continue
cls.assertEqual(getattr(read1, n), getattr(read2, n),
"attribute mismatch for %s: %s != %s" %
(n, getattr(read1, n), getattr(read2, n)))
def check_lines_equal(cls, a, b, sort=False, filter_f=None, msg=None):
"""check if contents of two files are equal comparing line-wise.
sort: bool
sort contents of both files before comparing.
filter_f:
remover lines in both a and b where expression is True
"""
with openfile(a) as inf:
aa = inf.readlines()
with openfile(b) as inf:
bb = inf.readlines()
if filter_f is not None:
aa = [x for x in aa if not filter_f(x)]
bb = [x for x in bb if not filter_f(x)]
if sort:
cls.assertEqual(sorted(aa), sorted(bb), msg)
else:
cls.assertEqual(aa, bb, msg)
def get_temp_filename(suffix=""):
caller_name = inspect.getouterframes(inspect.currentframe(), 2)[1][3]
try:
os.makedirs(TESTS_TEMPDIR)
except OSError:
pass
f = tempfile.NamedTemporaryFile(
prefix="pysamtests_tmp_{}_".format(caller_name),
suffix=suffix,
delete=False,
dir=TESTS_TEMPDIR)
f.close()
return f.name
@contextlib.contextmanager
def get_temp_context(suffix="", keep=False):
caller_name = inspect.getouterframes(inspect.currentframe(), 3)[1][3]
try:
os.makedirs(TESTS_TEMPDIR)
except OSError:
pass
f = tempfile.NamedTemporaryFile(
prefix="pysamtests_tmp_{}_".format(caller_name),
suffix=suffix,
delete=False,
dir=TESTS_TEMPDIR)
f.close()
yield f.name
if not keep:
# clear up any indices as well
for f in glob.glob(f.name + "*"):
os.unlink(f)
def make_data_files(directory):
what = None
try:
if not os.path.exists(os.path.join(directory, "all.stamp")):
subprocess.check_output(["make", "-C", directory], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
what = "Making test data in '%s' failed:\n%s" % (directory, force_str(e.output))
if what is not None:
raise RuntimeError(what)
def load_and_convert(filename, encode=True):
'''load data from filename and convert all fields to string.
Filename can be either plain or compressed (ending in .gz).
'''
data = []
if filename.endswith(".gz"):
with gzip.open(filename) as inf:
for line in inf:
line = line.decode("ascii")
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
else:
with open(filename) as f:
for line in f:
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
return data
def flatten_nested_list(l):
return [i for ll in l for i in ll]
| null |
1,319 |
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import Dict
import pytest
import torch
import torch.nn as nn
from otx.algorithms.segmentation.adapters.mmseg import SelfSLMLP
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class TestSelfSLMLP:
@e2e_pytest_unit
@pytest.mark.parametrize("use_conv", [True, False])
@pytest.mark.parametrize("with_avg_pool", [True, False])
def test_init(self, use_conv: bool, with_avg_pool: bool):
"""Test __init__ function."""
selfslmlp = SelfSLMLP(
in_channels=2, hid_channels=2, out_channels=2, use_conv=use_conv, with_avg_pool=with_avg_pool
)
if with_avg_pool:
assert isinstance(selfslmlp.avgpool, nn.AdaptiveAvgPool2d)
if use_conv:
assert isinstance(selfslmlp.mlp[0], nn.Conv2d)
assert isinstance(selfslmlp.mlp[3], nn.Conv2d)
else:
assert isinstance(selfslmlp.mlp[0], nn.Linear)
assert isinstance(selfslmlp.mlp[3], nn.Linear)
@e2e_pytest_unit
@pytest.mark.parametrize("init_linear", ["normal", "kaiming"])
@pytest.mark.parametrize("std", [0.01, 1.0, 10.0])
@pytest.mark.parametrize("bias", [0.1, 0.0, 1.0])
def test_init_weights(self, init_linear: str, std: float, bias: float):
"""Test init_weights function.
Check if weights of nn.Linear was changed except for biases.
BatchNorm weights are already set to 1, so it isn't required to be checked.
"""
def gather_weight_mean_std(modules):
mean = []
std = []
for module in modules:
if isinstance(module, nn.Linear):
mean.append(module.weight.mean())
std.append(module.weight.std())
return mean, std
selfslmlp = SelfSLMLP(in_channels=2, hid_channels=2, out_channels=2, use_conv=False, with_avg_pool=True)
orig_mean, orig_std = gather_weight_mean_std(selfslmlp.modules())
selfslmlp.init_weights(init_linear, std, bias)
updated_mean, updated_std = gather_weight_mean_std(selfslmlp.modules())
for origs, updateds in zip([orig_mean, orig_std], [updated_mean, updated_std]):
for orig, updated in zip(origs, updateds):
assert orig != updated
@e2e_pytest_unit
@pytest.mark.parametrize("init_linear", ["undefined"])
def test_init_weights_undefined_initialization(self, init_linear: str):
"""Test init_weights function when undefined initialization is given."""
selfslmlp = SelfSLMLP(in_channels=2, hid_channels=2, out_channels=2, use_conv=False, with_avg_pool=True)
with pytest.raises(ValueError):
selfslmlp.init_weights(init_linear)
@e2e_pytest_unit
@pytest.mark.parametrize(
"inputs,norm_cfg,use_conv,with_avg_pool,expected",
[
(torch.rand((2, 2)), dict(type="BN1d"), False, False, torch.Size([2, 2])),
(torch.rand((2, 2, 2, 2)), dict(type="BN1d"), False, True, torch.Size([2, 2])),
(torch.rand((2, 2, 2, 2)), dict(type="BN2d"), True, False, torch.Size([2, 2, 2, 2])),
(torch.rand((2, 2, 2, 2)), dict(type="BN2d"), True, True, torch.Size([2, 2, 1, 1])),
],
)
def test_forward_tensor(
self, inputs: torch.Tensor, norm_cfg: Dict, use_conv: bool, with_avg_pool: bool, expected: torch.Size
):
"""Test forward function for tensor."""
selfslmlp = SelfSLMLP(
in_channels=2,
hid_channels=2,
out_channels=2,
norm_cfg=norm_cfg,
use_conv=use_conv,
with_avg_pool=with_avg_pool,
)
results = selfslmlp(inputs)
assert results.shape == expected
@e2e_pytest_unit
@pytest.mark.parametrize(
"inputs,norm_cfg,use_conv,with_avg_pool,expected",
[
([torch.rand((2, 2)), torch.rand((2, 2))], dict(type="BN1d"), False, False, torch.Size([2, 2])),
([torch.rand((2, 2, 2, 2)), torch.rand((2, 2, 2, 2))], dict(type="BN1d"), False, True, torch.Size([2, 2])),
(
[torch.rand((2, 2, 2, 2)), torch.rand((2, 2, 2, 2))],
dict(type="BN2d"),
True,
False,
torch.Size([2, 2, 2, 2]),
),
(
[torch.rand((2, 2, 2, 2)), torch.rand((2, 2, 2, 2))],
dict(type="BN2d"),
True,
True,
torch.Size([2, 2, 1, 1]),
),
],
)
def METHOD_NAME(
self, inputs: torch.Tensor, norm_cfg: Dict, use_conv: bool, with_avg_pool: bool, expected: torch.Size
):
"""Test forward function for list or tuple."""
selfslmlp = SelfSLMLP(
in_channels=2,
hid_channels=2,
out_channels=2,
norm_cfg=norm_cfg,
use_conv=use_conv,
with_avg_pool=with_avg_pool,
)
results = selfslmlp(inputs)
assert results.shape == expected
@e2e_pytest_unit
@pytest.mark.parametrize("inputs", ["unsupported", 1])
def test_forward_unsupported_format(self, inputs: str):
"""Test forward function for unsupported format."""
selfslmlp = SelfSLMLP(
in_channels=2,
hid_channels=2,
out_channels=2,
)
with pytest.raises(TypeError):
selfslmlp(inputs)
| null |
1,320 |
from typing import Any, List
import numpy as np
import requests
from fastapi import (
APIRouter,
Body,
Depends,
File,
Form,
HTTPException,
UploadFile,
)
from sqlalchemy.ext.asyncio.session import AsyncSession
from starlette.responses import JSONResponse
from src import crud
from src.core.config import settings
from src.db import models
from src.endpoints import deps
from src.schemas.msg import Msg
from src.schemas.r5 import (
R5ProjectCreateDTO,
R5ProjectInDB,
R5ProjectUpdateDTO,
R5RegionCreateDTO,
R5RegionInDB,
request_examples,
)
router = APIRouter()
# ----------------------ACTIVITY ENDPOINTS------------------------
# ----------------------------------------------------------------
headers = {}
if settings.R5_AUTHORIZATION:
headers["Authorization"] = settings.R5_AUTHORIZATION
@router.get("/activity")
async def get_activity(
current_user: models.User = Depends(deps.get_current_active_superuser),
):
"""
Get all activities.
"""
response = requests.delete(settings.R5_API_URL + "/activity", headers=headers)
return response.json()
# ------------------------REGION ENDPOINTS------------------------
# ----------------------------------------------------------------
@router.get("/region", response_model=List[R5RegionInDB])
async def get_regions(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get all regions.
"""
regions = await crud.r5.get_all_regions(db)
return regions
@router.get("/region/{region_id}", response_model=R5RegionInDB)
async def get_region(
*,
region_id: str,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get region.
"""
region = await crud.r5.get_region(db, region_id)
return region
@router.get("/region/{region_id}/project", response_model=List[R5ProjectInDB])
async def get_projects_for_region(
*,
region_id: str,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get all projects.
"""
projects = await crud.r5.get_projects_for_region(db, region_id)
return projects
@router.post("/region", response_model=R5RegionInDB)
async def region_create(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
region_in: R5RegionCreateDTO = Body(...,
example=request_examples["region"]["create"]),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Create new region.
"""
region = await crud.r5.create_region(db=db, region_in=region_in)
return region
# TODO: Add region update
@router.delete("/region/{region_id}", response_model=Msg)
async def region_delete(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
region_id: str,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Delete region.
"""
region = await crud.r5.get_region(db=db, region_id=region_id)
if not region:
raise HTTPException(status_code=400, detail="The region doesn't exist")
result = await crud.r5.delete_region(db=db, region_id=region_id)
return result
# -----------------------PROJECT ENDPOINTS------------------------
# ----------------------------------------------------------------
@router.get("/project", response_model=List[R5ProjectInDB])
async def get_projects(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get all projects.
"""
projects = await crud.r5.get_all_projects(db)
return projects
@router.get("/project/{project_id}", response_model=R5ProjectInDB)
async def get_project(
*,
project_id: str,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get project.
"""
project = await crud.r5.get_project(db, project_id)
return project
@router.post("/project", response_model=R5ProjectInDB)
async def project_create(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
project_in: R5ProjectCreateDTO = Body(...,
example=request_examples["project"]["create"]),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Create new project.
"""
project = await crud.r5.create_project(db=db, project_in=project_in)
return project
@router.put("/project", response_model=Msg)
async def project_update(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
project_in: R5ProjectUpdateDTO = Body(...,
example=request_examples["project"]["update"]),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Update project.
"""
project = await crud.r5.update_project(db=db, project_in=project_in)
return project
@router.delete("/project/{project_id}", response_model=Msg)
async def project_delete(
*,
db: AsyncSession = Depends(deps.get_r5_mongo_db),
project_id: str,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Delete project.
"""
result = await crud.r5.delete_project(db=db, project_id=project_id)
return result
# ------------------------BUNDLE ENDPOINTS------------------------
# ----------------------------------------------------------------
@router.get("/bundle", response_class=JSONResponse)
async def get_bundles(
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get all bundles.
"""
result = requests.get(settings.R5_API_URL + "/bundle", headers=headers)
return result.json()
@router.get("/bundle/{bundle_id}", response_class=JSONResponse)
async def get_bundle(
*,
bundle_id: str,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Get bundle.
"""
result = requests.get(settings.R5_API_URL + "/bundle/" + bundle_id, headers=headers)
return result.json()
@router.post("/bundle", response_class=JSONResponse)
async def create_bundle(
*,
bundle_name: str = Form(...),
osm: UploadFile = File(...),
feed_group: UploadFile = File(...),
region_id: str = Form(...),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Create new bundle.
"""
response = requests.post(
settings.R5_API_URL + "/bundle",
files={
"bundleName": bundle_name,
"osm": osm.file,
"feedGroup": feed_group.file,
"regionId": region_id,
},
headers=headers
)
return response.json()
@router.delete("/bundle/{bundle_id}", response_class=JSONResponse)
async def METHOD_NAME(
*,
bundle_id: str,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Delete bundle.
"""
response = requests.delete(settings.R5_API_URL +
"/bundle/" + bundle_id, headers=headers)
return response.json()
| null |
1,321 |
# -*- coding: utf-8 -*-
"""
Unit tests for running web2py
"""
from __future__ import print_function
import sys
import os
import unittest
import subprocess
import time
import shutil
from gluon.contrib.webclient import WebClient
from gluon._compat import urllib2, PY2
from gluon.fileutils import create_app
test_app_name = '_test_web'
webserverprocess = None
def startwebserver():
global webserverprocess
path = path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path, 'web2py.py')):
i = 0
while i < 10:
i += 1
if os.path.exists(os.path.join(path, 'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
web2py_exec = os.path.join(path, 'web2py.py')
webserverprocess = subprocess.Popen([sys.executable, web2py_exec, '-a', 'testpass'])
print('Sleeping before web2py starts...')
for a in range(1, 11):
time.sleep(1)
print("%d..." % a)
try:
c = WebClient('http://127.0.0.1:8000/')
c.get(test_app_name)
break
except:
continue
print('')
def stopwebserver():
global webserverprocess
print('Killing webserver')
webserverprocess.terminate()
class Cookie(unittest.TestCase):
def testParseMultipleEquals(self):
""" Test for issue #1500.
Ensure that a cookie containing one or more '=' is correctly parsed
"""
client = WebClient()
client.headers['set-cookie'] = "key = value with one =;"
client._parse_headers_in_cookies()
self.assertIn("key", client.cookies)
self.assertEqual(client.cookies['key'], "value with one =")
client.headers['set-cookie'] = "key = value with one = and another one =;"
client._parse_headers_in_cookies()
client._parse_headers_in_cookies()
self.assertIn("key", client.cookies)
self.assertEqual(client.cookies['key'], "value with one = and another one =")
class LiveTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
appdir = os.path.join('applications', test_app_name)
if not os.path.exists(appdir):
os.mkdir(appdir)
create_app(appdir)
startwebserver()
@classmethod
def tearDownClass(cls):
stopwebserver()
appdir = os.path.join('applications', test_app_name)
if os.path.exists(appdir):
shutil.rmtree(appdir)
@unittest.skipIf("datastore" in os.getenv("DB", ""), "TODO: setup web test for app engine")
class TestWeb(LiveTest):
def testRegisterAndLogin(self):
client = WebClient("http://127.0.0.1:8000/%s/default/" % test_app_name)
client.get('index')
# register
data = dict(first_name='Homer',
last_name='Simpson',
email='[email protected]',
password='test',
password_two='test',
_formname='register')
client.post('user/register', data=data)
# logout
client.get('user/logout')
# login again
data = dict(email='[email protected]',
password='test',
_formname='login')
client.post('user/login', data=data)
self.assertIn('Homer', client.text)
# check registration and login were successful
client.get('index')
self.assertIn('Homer', client.text)
client = WebClient('http://127.0.0.1:8000/admin/default/')
client.post('index', data=dict(password='testpass'))
client.get('site')
client.get('design/' + test_app_name)
def METHOD_NAME(self):
s = WebClient("http://127.0.0.1:8000/%s/" % test_app_name)
s.get('static/js/web2py.js')
self.assertNotIn('expires', s.headers)
self.assertFalse(s.headers['cache-control'].startswith('max-age'))
text = s.text
s.get('static/_1.2.3/js/web2py.js')
self.assertEqual(text, s.text)
self.assertIn('expires', s.headers)
self.assertTrue(s.headers['cache-control'].startswith('max-age'))
@unittest.skipIf(not(PY2), 'skip PY3 testSoap')
def testSoap(self):
# test soap server implementation
from gluon.contrib.pysimplesoap.client import SoapClient, SoapFault
url = 'http://127.0.0.1:8000/examples/soap_examples/call/soap?WSDL'
client = SoapClient(wsdl=url)
ret = client.SubIntegers(a=3, b=2)
# check that the value returned is ok
self.assertIn('SubResult', ret)
self.assertEqual(ret['SubResult'], 1)
try:
ret = client.Division(a=3, b=0)
except SoapFault as sf:
# verify the exception value is ok
# self.assertEqual(sf.faultstring, "float division by zero") # true only in 2.7
self.assertEqual(sf.faultcode, "Server.ZeroDivisionError")
# store sent and received xml for low level test
xml_request = client.xml_request
xml_response = client.xml_response
# do a low level raw soap request (using
s = WebClient('http://127.0.0.1:8000/')
try:
s.post('examples/soap_examples/call/soap', data=xml_request, method="POST")
except urllib2.HTTPError as e:
self.assertEqual(e.msg, 'INTERNAL SERVER ERROR')
# check internal server error returned (issue 153)
self.assertEqual(s.status, 500)
self.assertEqual(s.text, xml_response)
| null |
1,322 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
import unittest
from NeoVintageous.nv.vi.cmd_base import ViCommandDefBase
from NeoVintageous.nv.vi.cmd_base import CommandNotFound
class TestViCommandDefBase(unittest.TestCase):
def setUp(self):
super().setUp()
self.command = ViCommandDefBase()
def METHOD_NAME(self):
self.assertEqual('', self.command.inp)
def test_set_input(self):
self.command.inp = 'x'
self.assertEqual('x', self.command.inp)
def test_reset_input(self):
self.command.inp = 'x'
self.command.reset()
self.assertEqual('', self.command.inp)
def test_accept_input_defaults_to_false(self):
self.assertFalse(self.command.accept_input)
def test_input_parser_defaults_to_none(self):
self.assertIsNone(self.command.input_parser)
def test_translate_default_raises_not_implemented(self):
with self.assertRaisesRegex(NotImplementedError, 'ViCommandDefBase must implement translate()'):
self.command.translate('state stub')
def test_accept_default_raises_not_implemented(self):
with self.assertRaisesRegex(NotImplementedError, 'ViCommandDefBase must implement accept()'):
self.command.accept('state stub')
def test_serialize_default(self):
self.assertEqual({'name': 'ViCommandDefBase', 'data': {'_inp': ''}}, self.command.serialize())
def test_serialize_input(self):
self.command.inp = 'ab'
self.assertEqual({'name': 'ViCommandDefBase', 'data': {'_inp': 'ab'}}, self.command.serialize())
def test_serialize_uses_a_serializable_whitelist(self):
self.command.inp = 'ab'
self.command.foo = 'bar'
self.command.fizz = 'buzz'
self.assertEqual({'name': 'ViCommandDefBase', 'data': {'_inp': 'ab'}}, self.command.serialize())
def test_from_json(self):
command = self.command.from_json({'foo': 'bar', '_inp': 'xyz'})
self.assertEqual('bar', command.foo)
self.assertEqual('xyz', command.inp)
self.assertEqual({'name': 'ViCommandDefBase', 'data': {'_inp': 'xyz'}}, command.serialize())
def test__str__(self):
self.assertEqual('<ViCommandDefBase>', str(self.command))
self.command.command = 'fizz'
self.assertEqual('<ViCommandDefBase>', str(self.command))
class ViCommandDefBaseImplementation(ViCommandDefBase):
pass
class TestViCommandDefBaseTestImplementation(unittest.TestCase):
def setUp(self):
super().setUp()
self.command = ViCommandDefBaseImplementation()
def test__str__(self):
self.assertEqual('<ViCommandDefBaseImplementation>', str(self.command))
self.command.command = 'buzz'
self.assertEqual('<ViCommandDefBaseImplementation>', str(self.command))
def test_serialize_input(self):
self.command.inp = 'ab'
self.assertEqual({'name': 'ViCommandDefBaseImplementation', 'data': {'_inp': 'ab'}}, self.command.serialize())
class TestCommandNotFound(unittest.TestCase):
def setUp(self):
super().setUp()
self.command = CommandNotFound()
def test_translate_raises_exception(self):
with self.assertRaisesRegex(TypeError, 'CommandNotFound should not be used as a runnable command'):
self.command.translate()
| null |
1,323 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvcs.endpoint import endpoint_data
class UpdateMonitorRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vcs', '2020-05-15', 'UpdateMonitor','vcs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self):
return self.get_body_params().get('CorpId')
def set_CorpId(self,CorpId):
self.add_body_params('CorpId', CorpId)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_RuleName(self):
return self.get_body_params().get('RuleName')
def set_RuleName(self,RuleName):
self.add_body_params('RuleName', RuleName)
def get_PicOperateType(self):
return self.get_body_params().get('PicOperateType')
def set_PicOperateType(self,PicOperateType):
self.add_body_params('PicOperateType', PicOperateType)
def get_AttributeName(self):
return self.get_body_params().get('AttributeName')
def set_AttributeName(self,AttributeName):
self.add_body_params('AttributeName', AttributeName)
def get_AttributeOperateType(self):
return self.get_body_params().get('AttributeOperateType')
def set_AttributeOperateType(self,AttributeOperateType):
self.add_body_params('AttributeOperateType', AttributeOperateType)
def get_RuleExpression(self):
return self.get_body_params().get('RuleExpression')
def set_RuleExpression(self,RuleExpression):
self.add_body_params('RuleExpression', RuleExpression)
def get_NotifierTimeOut(self):
return self.get_body_params().get('NotifierTimeOut')
def set_NotifierTimeOut(self,NotifierTimeOut):
self.add_body_params('NotifierTimeOut', NotifierTimeOut)
def get_TaskId(self):
return self.get_body_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_body_params('TaskId', TaskId)
def get_DeviceOperateType(self):
return self.get_body_params().get('DeviceOperateType')
def set_DeviceOperateType(self,DeviceOperateType):
self.add_body_params('DeviceOperateType', DeviceOperateType)
def get_PicList(self):
return self.get_body_params().get('PicList')
def set_PicList(self,PicList):
self.add_body_params('PicList', PicList)
def get_AttributeValueList(self):
return self.get_body_params().get('AttributeValueList')
def set_AttributeValueList(self,AttributeValueList):
self.add_body_params('AttributeValueList', AttributeValueList)
def get_NotifierAppSecret(self):
return self.get_body_params().get('NotifierAppSecret')
def set_NotifierAppSecret(self,NotifierAppSecret):
self.add_body_params('NotifierAppSecret', NotifierAppSecret)
def get_NotifierExtendValues(self):
return self.get_body_params().get('NotifierExtendValues')
def set_NotifierExtendValues(self,NotifierExtendValues):
self.add_body_params('NotifierExtendValues', NotifierExtendValues)
def get_DeviceList(self):
return self.get_body_params().get('DeviceList')
def set_DeviceList(self,DeviceList):
self.add_body_params('DeviceList', DeviceList)
def get_NotifierUrl(self):
return self.get_body_params().get('NotifierUrl')
def set_NotifierUrl(self,NotifierUrl):
self.add_body_params('NotifierUrl', NotifierUrl)
def get_NotifierType(self):
return self.get_body_params().get('NotifierType')
def set_NotifierType(self,NotifierType):
self.add_body_params('NotifierType', NotifierType)
def get_AlgorithmVendor(self):
return self.get_body_params().get('AlgorithmVendor')
def set_AlgorithmVendor(self,AlgorithmVendor):
self.add_body_params('AlgorithmVendor', AlgorithmVendor
| null |
1,324 |
import asyncio
import logging
import math
import sys
import time
import unittest
from typing import Dict, Optional, List
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_tracker import BitfinexOrderBookTracker
from hummingbot.core.data_type.common import TradeType
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent
from hummingbot.core.utils.async_utils import safe_ensure_future
class BitfinexOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[BitfinexOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USD",
]
integrity_test_max_volume = 5 # Max volume in asks and bids for the book to be ready for tests
daily_volume = 2500 # Approximate total daily volume in BTC for this exchange for sanity test
book_enties = 5 # Number of asks and bids (each) for the book to be ready for tests
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: BitfinexOrderBookTracker = BitfinexOrderBookTracker(trading_pairs=cls.trading_pairs)
cls.order_book_tracker_task: asyncio.Task = safe_ensure_future(cls.order_book_tracker.start())
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
'''
Wait until the order book under test fills as needed
'''
print("Waiting for order book to fill...")
while True:
book_present = cls.trading_pairs[0] in cls.order_book_tracker.order_books
enough_asks = False
enough_bids = False
enough_ask_rows = False
enough_bid_rows = False
if book_present:
ask_volume = sum(i.amount for i in cls.order_book_tracker.order_books[cls.trading_pairs[0]].ask_entries())
ask_count = sum(1 for i in cls.order_book_tracker.order_books[cls.trading_pairs[0]].ask_entries())
bid_volume = sum(i.amount for i in cls.order_book_tracker.order_books[cls.trading_pairs[0]].bid_entries())
bid_count = sum(1 for i in cls.order_book_tracker.order_books[cls.trading_pairs[0]].bid_entries())
enough_asks = ask_volume >= cls.integrity_test_max_volume
enough_bids = bid_volume >= cls.integrity_test_max_volume
enough_ask_rows = ask_count >= cls.book_enties
enough_bid_rows = bid_count >= cls.book_enties
print("Bid volume in book: %f (in %d bids), ask volume in book: %f (in %d asks)" % (bid_volume, bid_count, ask_volume, ask_count))
if book_present and enough_asks and enough_bids and enough_ask_rows and enough_bid_rows:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1 # noqa: F841
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def METHOD_NAME(self):
"""2
Tests if the order book tracker is able to retrieve order book trade message from exchange
and emit order book trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
# Bittrex datetime is in epoch milliseconds
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
sut_book: OrderBook = order_books[self.trading_pairs[0]]
# # 1 - test that best bid is less than best ask
# self.assertGreater(sut_book.get_price(False), sut_book.get_price(True))
# 2 - test that price to buy integrity_test_max_volume BTC is is greater than or equal to best ask
self.assertGreaterEqual(sut_book.get_price_for_volume(True, self.integrity_test_max_volume).result_price,
sut_book.get_price(True))
# 3 - test that price to sell integrity_test_max_volume BTC is is less than or equal to best bid
self.assertLessEqual(sut_book.get_price_for_volume(False, self.integrity_test_max_volume).result_price,
sut_book.get_price(False))
# 4 - test that all bids in order book are sorted by price in descending order
previous_price = sys.float_info.max
for bid_row in sut_book.bid_entries():
self.assertTrue(previous_price >= bid_row.price)
previous_price = bid_row.price
# 5 - test that all asks in order book are sorted by price in ascending order
previous_price = 0
for ask_row in sut_book.ask_entries():
self.assertTrue(previous_price <= ask_row.price)
previous_price = ask_row.price
# 6 - test that total volume in first orders in book is less than 10 times
# daily traded volumes for this exchange
total_volume = 0
count = 0
for bid_row in sut_book.bid_entries():
total_volume += bid_row.amount
count += 1
if count > self.book_enties:
break
count = 0
for ask_row in sut_book.ask_entries():
total_volume += ask_row.amount
count += 1
if count > self.book_enties:
break
self.assertLessEqual(total_volume, 10 * self.daily_volume)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| null |
1,325 |
import json
import re
from pathlib import Path
from typing import Any, Dict
import voluptuous.validators as validators
from click.testing import CliRunner, Result
from pytest_voluptuous import S
from ggshield.__main__ import cli
from tests.conftest import (
_IAC_MULTIPLE_VULNERABILITIES,
_IAC_NO_VULNERABILITIES,
_IAC_SINGLE_VULNERABILITY,
)
from tests.unit.conftest import my_vcr
INCIDENT_SCHEMA = validators.Schema(
{
"policy": str,
"policy_id": validators.Match(r"^GG_IAC_\d{4}$"),
"line_end": int,
"line_start": int,
"description": str,
"documentation_url": validators.All(str, validators.Match(r"^https://")),
"component": str,
"severity": validators.Any("LOW", "MEDIUM", "HIGH", "CRITICAL"),
}
)
@my_vcr.use_cassette("test_iac_scan_single_vulnerability")
def test_display_single_vulnerabilities(cli_fs_runner: CliRunner):
Path("tmp/").mkdir(exist_ok=True)
Path("tmp/iac_file_single_vulnerability.tf").write_text(_IAC_SINGLE_VULNERABILITY)
result = cli_fs_runner.invoke(
cli,
[
"iac",
"scan",
"all",
"--json",
"tmp",
],
)
json_result = load_json(result)
assert_iac_version_displayed(json_result, 1)
assert_file_single_vulnerability_displayed(json_result)
@my_vcr.use_cassette("test_iac_scan_multiple_vulnerabilities")
def test_display_multiple_vulnerabilities(cli_fs_runner: CliRunner):
Path("tmp/").mkdir(exist_ok=True)
Path("tmp/iac_file_multiple_vulnerabilities.tf").write_text(
_IAC_MULTIPLE_VULNERABILITIES
)
result = cli_fs_runner.invoke(
cli,
[
"iac",
"scan",
"all",
"--json",
"tmp",
],
)
json_result = load_json(result)
assert_iac_version_displayed(json_result, 2)
assert_file_multiple_vulnerabilities_displayed(json_result)
@my_vcr.use_cassette("test_iac_scan_no_vulnerabilities")
def METHOD_NAME(cli_fs_runner: CliRunner):
Path("tmp/").mkdir(exist_ok=True)
Path("tmp/iac_file_no_vulnerabilities.tf").write_text(_IAC_NO_VULNERABILITIES)
result = cli_fs_runner.invoke(
cli,
[
"iac",
"scan",
"all",
"--json",
"tmp",
],
)
json_result = load_json(result)
assert_iac_version_displayed(json_result, 0)
assert len(json_result["entities_with_incidents"]) == 0
@my_vcr.use_cassette("test_iac_scan_multiple_files")
def test_display_multiple_files(cli_fs_runner: CliRunner):
Path("tmp/").mkdir(exist_ok=True)
Path("tmp/iac_file_single_vulnerability.tf").write_text(_IAC_SINGLE_VULNERABILITY)
Path("tmp/iac_file_multiple_vulnerabilities.tf").write_text(
_IAC_MULTIPLE_VULNERABILITIES
)
Path("tmp/iac_file_no_vulnerabilities.tf").write_text(_IAC_NO_VULNERABILITIES)
result = cli_fs_runner.invoke(
cli,
[
"iac",
"scan",
"all",
"--json",
"tmp",
],
)
json_result = load_json(result)
assert_iac_version_displayed(json_result, 3)
assert_file_single_vulnerability_displayed(json_result)
assert_file_multiple_vulnerabilities_displayed(json_result)
def load_json(result: Result) -> Dict[str, Any]:
return json.loads(result.stdout)
def assert_iac_version_displayed(json_result: Dict[str, Any], total_incidents: int):
assert re.match(r"\d\.\d{1,3}\.\d", json_result["iac_engine_version"])
assert json_result["type"] == "path_scan"
assert json_result["total_incidents"] == total_incidents
def assert_file_single_vulnerability_displayed(json_result: Dict[str, Any]):
file_result = [
file_result
for file_result in json_result["entities_with_incidents"]
if file_result["filename"] == "iac_file_single_vulnerability.tf"
]
assert len(file_result) == 1
assert (
S(
{
"filename": str,
"incidents": validators.All(
[INCIDENT_SCHEMA], validators.Length(min=1, max=1)
),
"total_incidents": 1,
}
)
== file_result[0]
)
assert file_result[0]["incidents"][0]["policy_id"] == "GG_IAC_0001"
def assert_file_multiple_vulnerabilities_displayed(json_result: Dict[str, Any]):
file_result = [
file_result
for file_result in json_result["entities_with_incidents"]
if file_result["filename"] == "iac_file_multiple_vulnerabilities.tf"
]
assert len(file_result) == 1
assert (
S(
{
"filename": str,
"incidents": validators.All(
[INCIDENT_SCHEMA], validators.Length(min=2, max=2)
),
"total_incidents": 2,
}
)
== file_result[0]
)
assert {incident["policy_id"] for incident in file_result[0]["incidents"]} == {
"GG_IAC_0002",
"GG_IAC_0003",
}
| null |
1,326 |
'''
Copyright (C) 2019 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
from cryptofeed.symbols import Symbol
import logging
from decimal import Decimal
from typing import Dict, Tuple
from yapic import json
from cryptofeed.connection import AsyncConnection, RestEndpoint, Routes, WebsocketEndpoint
from cryptofeed.defines import BID, ASK, BUY
from cryptofeed.defines import EXX as EXX_id
from cryptofeed.defines import L2_BOOK, SELL, TRADES
from cryptofeed.feed import Feed
from cryptofeed.types import OrderBook, Trade
LOG = logging.getLogger('feedhandler')
class EXX(Feed):
id = EXX_id
websocket_endpoints = [WebsocketEndpoint('wss://ws.exx.com/websocket')]
rest_endpoints = [RestEndpoint('https://api.exx.com', routes=Routes('/data/v1/tickers'))]
websocket_channels = {
L2_BOOK: 'ENTRUST_ADD',
TRADES: 'TRADE',
}
@classmethod
def _parse_symbol_data(cls, data: dict) -> Tuple[Dict, Dict]:
ret = {}
info = {'instrument_type': {}}
exchange = [key.upper() for key in data.keys()]
for sym in exchange:
b, q = sym.split("_")
s = Symbol(b, q)
ret[s.normalized] = sym
info['instrument_type'][s.normalized] = s.type
return ret, info
def __reset(self):
self._l2_book = {}
async def _book_update(self, msg: dict, timestamp: float):
"""
Snapshot:
[
[
'AE',
'1',
'BTC_USDT',
'1547941504',
{
'asks':[
[
'25000.00000000',
'0.02000000'
],
[
'19745.83000000',
'0.00200000'
],
[
'19698.96000000',
'0.00100000'
],
...
]
},
{
'bids':[
[
'3662.83040000',
'0.00100000'
],
[
'3662.77540000',
'0.01000000'
],
[
'3662.59900000',
'0.10300000'
],
...
]
}
]
]
Update:
['E', '1', '1547942636', 'BTC_USDT', 'ASK', '3674.91740000', '0.02600000']
"""
delta = {BID: [], ASK: []}
if msg[0] == 'AE':
# snapshot
delta = None
pair = self.exchange_symbol_to_std_symbol(msg[2])
ts = msg[3]
asks = msg[4]['asks'] if 'asks' in msg[4] else msg[5]['asks']
bids = msg[5]['bids'] if 'bids' in msg[5] else msg[4]['bids']
self._l2_book[pair] = OrderBook(self.id, pair, max_depth=self.max_depth)
self._l2_book[pair].book.bids = {Decimal(price): Decimal(amount) for price, amount in bids}
self._l2_book[pair].book.asks = {Decimal(price): Decimal(amount) for price, amount in asks}
else:
# Update
ts = msg[2]
pair = self.exchange_symbol_to_std_symbol(msg[3])
side = ASK if msg[4] == 'ASK' else BID
price = Decimal(msg[5])
amount = Decimal(msg[6])
if amount == 0:
if price in self._l2_book[pair].book[side]:
del self._l2_book[pair][side].book[price]
delta[side].append((price, 0))
else:
self._l2_book[pair].book[side][price] = amount
delta[side].append((price, amount))
await self.book_callback(L2_BOOK, self._l2_book[pair], timestamp, timestamp=ts, raw=msg, delta=delta)
async def METHOD_NAME(self, msg: dict, timestamp: float):
"""
Trade message
['T', '1', '1547947390', 'BTC_USDT', 'bid', '3683.74440000', '0.082', '33732290']
"""
pair = self.exchange_symbol_to_std_symbol(msg[3])
t = Trade(
self.id,
pair,
BUY if msg[4] == 'bid' else SELL,
Decimal(msg[6]),
Decimal(msg[5]),
float(msg[2]),
id=msg[7],
raw=msg
)
await self.callback(TRADES, t, timestamp)
async def message_handler(self, msg: str, conn, timestamp: float):
msg = json.loads(msg, parse_float=Decimal)
if isinstance(msg[0], list):
msg = msg[0]
if msg[0] == 'E' or msg[0] == 'AE':
await self._book_update(msg, timestamp)
elif msg[0] == 'T':
await self.METHOD_NAME(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def subscribe(self, conn: AsyncConnection):
self.__reset()
for chan in self.subscription:
for pair in self.subscription[chan]:
await conn.write(json.dumps({"dataType": f"1_{chan}_{pair}",
"dataSize": 50,
"action": "ADD"
}))
| null |
1,327 |
"""Utility functions for use with File and Block commands."""
# :license: MIT, see LICENSE for more details.
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import formatting
DEFAULT_NOTES_SIZE = 20
def METHOD_NAME(volumes, env):
"""Reduces all long notes found in the volumes list just if the format output is different from a JSON format.
:param list volumes: An list of storage volumes
:param env :A environment console.
"""
if env.format_output_is_json():
return
for volume in volumes:
if len(volume.get('notes', '')) > DEFAULT_NOTES_SIZE:
shortened_notes = volume['notes'][:DEFAULT_NOTES_SIZE]
volume['notes'] = shortened_notes
def build_output_table(env, volumes, columns, sortby):
"""Builds a formatting table for a list of volumes.
:param env :A Environment console.
:param list volumes: An list of storage volumes
:param columns :A ColumnFormatter for column names
:param str sortby :A string to sort by.
"""
table = formatting.Table(columns.columns)
if sortby in table.columns:
table.sortby = sortby
METHOD_NAME(volumes, env)
for volume in volumes:
table.add_row([value or formatting.blank()
for value in columns.row(volume)])
return table
def _format_name(obj):
if obj['type'] == 'VIRTUAL':
return f"{obj['hostname']}.{obj['domain']}"
elif obj['type'] == 'HARDWARE':
return f"{obj['hostname']}.{obj['domain']}"
elif obj['type'] == 'SUBNET':
name = f"{obj['networkIdentifier']}/{obj['cidr']}"
if 'note' in obj.keys():
name = f"{name} ({obj['note']})"
return name
elif obj['type'] == 'IP':
name = obj['ipAddress']
if 'note' in obj.keys():
name = f"{name} ({obj['note']})"
return name
else:
raise ValueError('Unknown type %s' % obj['type'])
COLUMNS = [
column_helper.Column('id', ('id',)),
column_helper.Column('name', _format_name, """
allowedVirtualGuests[hostname,domain],
allowedHardware[hostname,domain],
allowedSubnets[networkIdentifier,cidr,note],
allowedIpAddresses[ipAddress,note],
"""),
column_helper.Column('type', ('type',)),
column_helper.Column(
'private_ip_address',
('primaryBackendIpAddress',),
"""
allowedVirtualGuests.primaryBackendIpAddress
allowedHardware.primaryBackendIpAddress
allowedSubnets.primaryBackendIpAddress
allowedIpAddresses.primaryBackendIpAddress
"""),
column_helper.Column(
'source_subnet',
('allowedHost', 'sourceSubnet',),
"""
allowedVirtualGuests.allowedHost.sourceSubnet
allowedHardware.allowedHost.sourceSubnet
allowedSubnets.allowedHost.sourceSubnet
allowedIpAddresses.allowedHost.sourceSubnet
"""),
column_helper.Column(
'host_iqn',
('allowedHost', 'name',),
"""
allowedVirtualGuests.allowedHost.name
allowedHardware.allowedHost.name
allowedSubnets.allowedHost.name
allowedIpAddresses.allowedHost.name
"""),
column_helper.Column(
'username',
('allowedHost', 'credential', 'username',),
"""
allowedVirtualGuests.allowedHost.credential.username
allowedHardware.allowedHost.credential.username
allowedSubnets.allowedHost.credential.username
allowedIpAddresses.allowedHost.credential.username
"""),
column_helper.Column(
'password',
('allowedHost', 'credential', 'password',),
"""
allowedVirtualGuests.allowedHost.credential.password
allowedHardware.allowedHost.credential.password
allowedSubnets.allowedHost.credential.password
allowedIpAddresses.allowedHost.credential.password
"""),
column_helper.Column(
'allowed_host_id',
('allowedHost', 'id',),
"""
allowedVirtualGuests.allowedHost.id
allowedHardware.allowedHost.id
allowedSubnets.allowedHost.id
allowedIpAddresses.allowedHost.id
"""),
]
DEFAULT_COLUMNS = [
'id',
'name',
'type',
'private_ip_address',
'source_subnet',
'host_iqn',
'username',
'password',
'allowed_host_id',
]
REPLICATION_PARTNER_COLUMNS = [
column_helper.Column('ID', ('id',)),
column_helper.Column('Username', ('username',), mask="username"),
column_helper.Column('Account ID', ('accountId',), mask="accountId"),
column_helper.Column('Capacity (GB)', ('capacityGb',), mask="capacityGb"),
column_helper.Column('Hardware ID', ('hardwareId',), mask="hardwareId"),
column_helper.Column('Guest ID', ('guestId',), mask="guestId"),
column_helper.Column('Host ID', ('hostId',), mask="hostId"),
]
REPLICATION_PARTNER_DEFAULT = [
'ID',
'Username',
'Account ID',
'Capacity (GB)',
'Hardware ID',
'Guest ID',
'Host ID'
]
| null |
1,328 |
#!/usr/bin/env python3
import argparse
from collections import OrderedDict
import json
import matplotlib
import os
# To prevent _tkinter.TclError: https://stackoverflow.com/a/37605654
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa: E402
# This script compares different multi-threaded benchmark results.
# It takes the paths of multiple result folders (as created by benchmark_multithreaded.py),
# and creates a performance and scale-up comparison graph, per default as pdf,
# optionally as png via --format 'png'.
#
# Example usage:
# python3 ./scripts/compare_benchmarks_multithreaded.py path/to/result_folder_1 path/to/result_folder_2
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"results", nargs="+", help="Paths to individual result folders created by benchmark_multithreaded.py"
)
parser.add_argument("-f", "--format", choices=["pdf", "png"], type=str.lower, default="pdf")
return parser
def is_square(n):
return n**0.5 == int(n**0.5)
def get_subplot_row_and_column_count(num_plots):
while not is_square(num_plots):
num_plots += 1
return num_plots**0.5
def plot_performance(flipped_results, n_rows_cols, numa_borders, max_cores):
fig = plt.figure(figsize=(n_rows_cols * 3, n_rows_cols * 2))
plot_pos = 0
legend = OrderedDict()
for tpch, multiple_results in flipped_results.items():
plot_pos += 1
ax = fig.add_subplot(n_rows_cols, n_rows_cols, plot_pos)
ax.set_title(tpch)
for numa_border in numa_borders:
ax.axvline(numa_border, color="gray", linestyle="dashed", linewidth=1.0)
for label, one_result in multiple_results.items():
multithreaded_plot = ax.METHOD_NAME(one_result["cores"], one_result["items_per_second"], label=label, marker=".")
if label not in legend:
legend[label] = multithreaded_plot[0]
if "singlethreaded" in one_result:
label_single = label + " single-threaded"
singlethreaded_plot = ax.axhline(
one_result["singlethreaded"],
color=multithreaded_plot[0].get_color(),
linestyle="dashed",
linewidth=1.0,
label=label_single,
)
if label_single not in legend:
legend[label_single] = singlethreaded_plot
# The throuput can be from 0 queries/s to an arbitrarily large number, so no upper limit for the y-axis
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=0, xmax=max_cores)
# Create one legend for the whole plot
plt.figlegend((line_type for line_type in legend.values()), (label for label in legend.keys()), "lower right")
# This should prevent axes from different plots to overlap etc
plt.tight_layout()
# Add big axis descriptions for all plots
axis_description = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
axis_description.set_xlabel("Utilized cores", labelpad=10)
axis_description.set_ylabel("Throughput (queries / s)", labelpad=20)
result_plot_file = os.path.join(os.getcwd(), "benchmark_comparison_performance." + args.format)
plt.savefig(result_plot_file, bbox_inches="tight")
print("Plot saved as: " + result_plot_file)
def plot_scaleup(flipped_results, n_rows_cols, numa_borders, max_cores):
fig = plt.figure(figsize=(n_rows_cols * 3, n_rows_cols * 2))
plot_pos = 0
legend = OrderedDict()
for tpch, multiple_results in flipped_results.items():
plot_pos += 1
ax = fig.add_subplot(n_rows_cols, n_rows_cols, plot_pos)
ax.set_title(tpch)
for numa_border in numa_borders:
ax.axvline(numa_border, color="gray", linestyle="dashed", linewidth=1.0)
for label, one_result in multiple_results.items():
ips, cores = one_result["items_per_second"], one_result["cores"]
# Throughput per number of cores, relative to single-threaded performance
scaleup = [y / (x * one_result["singlethreaded"]) for y, x in zip(ips, cores)]
multithreaded_plot = ax.METHOD_NAME(cores, scaleup, label=label, marker=".")
if label not in legend:
legend[label] = multithreaded_plot[0]
ax.set_ylim(ymin=0.0, ymax=1.0)
ax.set_xlim(xmin=0, xmax=max_cores)
# Create one legend for the whole plot
plt.figlegend((line_type for line_type in legend.values()), (label for label in legend.keys()), "lower right")
# This should prevent axes from different plots to overlap etc
plt.tight_layout()
# Add big axis descriptions for all plots
axis_description = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
axis_description.set_xlabel("Utilized cores", labelpad=10)
axis_description.set_ylabel(
"Throughput (queries / s) per core\n(relative to single-threaded without scheduler)", labelpad=20
)
result_plot_file = os.path.join(os.getcwd(), "benchmark_comparison_scaleup." + args.format)
plt.savefig(result_plot_file, bbox_inches="tight")
print("Plot saved as: " + result_plot_file)
def METHOD_NAME(args):
max_cores = 0
results = {}
for result_dir in args.results:
# The label that will appear in the plot. It consists of the --result-name
# that was passed to benchmark_multithreaded.py
label = result_dir.rstrip("/").split("/")[-1]
results[label] = {}
one_result = results[label]
for _, _, files in os.walk(result_dir):
json_files = [f for f in files if f.split(".")[-1] == "json"]
# Add the results in sorted order (low core count -> high core count) for plotting later
# The lambda extracts the number of cores from the filename
for file in sorted(json_files, key=lambda filename: int(filename.split("-")[0])):
with open(os.path.join(result_dir, file), "r") as json_file:
json_data = json.load(json_file)
cores = json_data["context"]["cores"]
if cores > max_cores:
max_cores = cores
utilized_cores_per_numa_node = json_data["context"]["utilized_cores_per_numa_node"]
for benchmark in json_data["benchmarks"]:
name = benchmark["name"]
items_per_second = benchmark["items_per_second"]
if name not in one_result:
one_result[name] = {"cores": [], "items_per_second": []}
if cores == 0:
one_result[name]["singlethreaded"] = items_per_second
else:
one_result[name]["cores"].append(cores)
one_result[name]["items_per_second"].append(items_per_second)
numa_borders = [sum(utilized_cores_per_numa_node[:x]) for x in range(1, len(utilized_cores_per_numa_node))]
# Transform the results for plotting, since we create one subplot per TPC-H query
flipped_results = {}
for label, one_result in results.items():
for tpch, data in one_result.items():
if tpch not in flipped_results:
flipped_results[tpch] = {}
flipped_results[tpch][label] = data
n_rows_cols = get_subplot_row_and_column_count(len(flipped_results))
plot_performance(flipped_results, n_rows_cols, numa_borders, max_cores)
plot_scaleup(flipped_results, n_rows_cols, numa_borders, max_cores)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
METHOD_NAME(args)
| null |
1,329 |
#!/opt/local/bin/python3.2
#!/usr/bin/python
"""
Will find a svn revision that is the last to give a specified output
Command line:
[-f first_revision] [-l last_revision] [-d regex] executable arg1 ...
default first_revision is 1500
default last_revision is the current one
default regex is "Refutation found."
Looks for the latest revision for which the output of
executable arg1 ...
contains line that matches regex.
"""
import sys
import platform
import subprocess
import re
import time
import tempfile
import os
import math
DEVNULL = open('/dev/null', 'w')
revisionRE = re.compile("^Revision: ([0-9]+)$")
def getCmdResult(cmdLine):
"""return output of a command in a string"""
res = None
try:
resBytes = subprocess.check_output(cmdLine, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
resBytes = e.output
return str(resBytes,encoding="ascii")
def execCmd(cmdLine, report):
if report:
cmdStr = " ".join(cmdLine)
print(cmdStr, end="... ")
sys.stdout.flush()
res = subprocess.call(cmdLine, stderr=DEVNULL, stdout=DEVNULL)
#res = subprocess.call(cmdLine)
if report:
print("done")
return res==0
def METHOD_NAME(linesStr, regex, fullMatch):
lines = linesStr.split("\n")
for line in lines:
mo = None
if fullMatch:
mo = regex.match(line)
else:
mo = regex.search(line)
if mo:
return mo
return None
class Failure(Exception):
def __init__(self, msg):
self.msg = msg
def getCurrentRevision():
infoOut = getCmdResult(["svn", "info"])
revMO = METHOD_NAME(infoOut, revisionRE, True)
if revMO==None:
raise Failure("SVN repository not found")
revStr = revMO.group(1)
return int(revStr)
vampCmdLine = None
buildTgt = "vampire"
desiredRE = re.compile("Refutation found.")
#firstRevision = 1400
firstRevision = 1500
lastRevision = None
def readVampCmdLine(args):
global vampCmdLine
global buildTgt
vampCmdLine = args
execFile = vampCmdLine[0]
absExec = os.path.abspath(execFile)
repositoryPath,fname = os.path.split(absExec)
buildTgt = fname
relExec = "./"+fname
vampCmdLine[0] = relExec
print("repository path: ", repositoryPath)
os.chdir(repositoryPath)
def readArgs(args):
global vampCmdLine
global desiredRE
global firstRevision
global lastRevision
while True:
if args[0]=="-f":
firstRevision = int(args[1])
args = args[2:]
elif args[0]=="-l":
lastRevision = int(args[1])
args = args[2:]
elif args[0]=="-d":
desiredRE = re.compile(args[1])
args = args[2:]
else:
break
readVampCmdLine(args)
if lastRevision==None:
lastRevision = getCurrentRevision()
def switchToRevision(revNum):
global buildTgt
if not execCmd(["svn","update","-r",str(revNum)], True):
raise Failure("failed: svn update")
if not execCmd(["make","depend"], True):
raise Failure("failed: svn update")
if not execCmd(["make","-j","2",buildTgt], True):
raise Failure("failed: make %s" % buildTgt)
def checkSuccess():
global vampCmdLine
global desiredRE
vampOut = getCmdResult(vampCmdLine)
print(vampOut)
mo = METHOD_NAME(vampOut, desiredRE, False)
return mo!=None
readArgs(sys.argv[1:])
print('Looking for regex "%s" in outputs of %s between revisions %d and %d' %
(desiredRE.pattern, buildTgt, firstRevision, lastRevision))
switchToRevision(lastRevision)
if checkSuccess():
print ("The final revision %s succeeded" % lastRevision)
sys.exit(0)
switchToRevision(firstRevision)
if not checkSuccess():
print ("The fist revision %s did not succeed" % firstRevision)
sys.exit(1)
minRev = firstRevision
maxRev = lastRevision-1
while minRev!=maxRev:
assert minRev<maxRev
mid = (minRev+maxRev+1)//2
assert mid<=maxRev
assert mid>minRev
switchToRevision(mid)
if checkSuccess():
minRev = mid
else:
maxRev = mid-1
assert minRev==maxRev
resultRev = minRev
if getCurrentRevision()!=resultRev:
switchToRevision(resultRev)
print('The last revision where regex "%s" is in outputs of %s is %d' %
(desiredRE.pattern, buildTgt, resultRev))
| null |
1,330 |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
Items: typing_extensions.TypeAlias = schemas.BinarySchema
class FilesTuple(
typing.Tuple[
typing.Union[bytes, schemas.FileIO],
...
]
):
def __new__(cls, arg: typing.Union[FilesTupleInput, FilesTuple], configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None):
return Files.validate(arg, configuration=configuration)
FilesTupleInput = typing.Union[
typing.List[
typing.Union[
bytes,
io.FileIO,
io.BufferedReader,
schemas.FileIO
],
],
typing.Tuple[
typing.Union[
bytes,
io.FileIO,
io.BufferedReader,
schemas.FileIO
],
...
]
]
@dataclasses.dataclass(frozen=True)
class Files(
schemas.Schema[schemas.immutabledict, FilesTuple]
):
types: typing.FrozenSet[typing.Type] = frozenset({tuple})
items: typing.Type[Items] = dataclasses.field(default_factory=lambda: Items) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
tuple: FilesTuple
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
FilesTupleInput,
FilesTuple,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> FilesTuple:
return super().validate_base(
arg,
configuration=configuration,
)
Properties = typing.TypedDict(
'Properties',
{
"files": typing.Type[Files],
}
)
class SchemaDict(schemas.immutabledict[str, typing.Tuple[schemas.OUTPUT_BASE_TYPES]]):
__required_keys__: typing.FrozenSet[str] = frozenset({
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"files",
})
def __new__(
cls,
*,
files: typing.Union[
FilesTupleInput,
FilesTuple,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {}
for key, val in (
("files", files),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
arg_.update(kwargs)
used_arg_ = typing.cast(SchemaDictInput, arg_)
return Schema.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
SchemaDictInput,
SchemaDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> SchemaDict:
return Schema.validate(arg, configuration=configuration)
@property
def files(self) -> typing.Union[FilesTuple, schemas.Unset]:
val = self.get("files", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
FilesTuple,
val
)
def METHOD_NAME(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)
return self.get(name, schemas.unset)
SchemaDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class Schema(
schemas.Schema[SchemaDict, tuple]
):
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: SchemaDict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
SchemaDictInput,
SchemaDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> SchemaDict:
return super().validate_base(
arg,
configuration=configuration,
)
| null |
1,331 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from sublime import Region
from NeoVintageous.nv.options import get_option
# DEPRECATED Use view_find_in_range()
def find_in_range(view, term: str, start: int, end: int, flags: int = 0):
found = view.find(term, start, flags)
if found and found.b <= end:
return found
# DEPRECATED Use view_find_all()
def find_all_in_range(view, term: str, start: int, end: int, flags: int = 0) -> list:
matches = [] # type: list
while True:
m = find_in_range(view, term, start, end, flags)
if m == Region(-1, -1):
return matches
if not m:
return matches
if m.end() > end or m.begin() < start:
return matches
matches.append(m)
start = m.end()
def find_wrapping(view, term: str, start: int, end: int, flags: int = 0, times: int = 1):
try:
current_sel = view.sel()[0]
except IndexError:
return
for x in range(times):
match = find_in_range(view, term, start, end, flags)
# make sure we wrap around the end of the buffer
if not match:
if not get_option(view, 'wrapscan'):
return
start = 0
# Extend the end of search to the end of current word, because
# otherwise the current word would be excluded and not found.
# See https://github.com/NeoVintageous/NeoVintageous/issues/223.
end = current_sel.a
end = view.word(current_sel.a).b
match = find_in_range(view, term, start, end, flags)
if not match:
return
start = match.b
return match
def reverse_find_wrapping(view, term: str, start: int, end: int, flags: int = 0, times: int = 1):
try:
current_sel = view.sel()[0]
except IndexError:
return
# Search wrapping around the end of the buffer.
for x in range(times):
match = reverse_search(view, term, start, end, flags)
# Start searching in the lower half of the buffer if we aren't doing it yet.
if not match and start <= current_sel.b:
if not get_option(view, 'wrapscan'):
return
# Extend the start of search to start of current word, because
# otherwise the current word would be excluded and not found.
# See https://github.com/NeoVintageous/NeoVintageous/issues/223.
start = view.word(current_sel.b).a
end = view.size()
match = reverse_search(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
end = match.a
return match
def METHOD_NAME(view, term: str, start: int, end: int, flags: int = 0):
found = find_in_range(view, term, start, end, flags)
last_found = found
while found:
found = find_in_range(view, term, found.b, end, flags)
if not found or found.b > end:
break
last_found = found if found else last_found
return last_found
# The @start position is linewise.
#
# The @end position is NOT linewise.
#
# For a characterwise reverse search use reverse_search_by_pt().
#
# TODO REVIEW The current implementation of the @end position is not technically
# not linewise. The start position *is* linewise. I don't know if this is
# causing bugs or if internals depends on this functionality, so "fixing it" and
# making it a true linewise search may break things in unexpected ways. It needs
# reviewing.
#
# The @start position is where the search ends.
#
# The @end position is where the search starts.
#
# TODO REVIEW The @end and @start position seem to be inverted i.e. the @start
# position should be the point where the search starts and the @end position
# should be where it ends oppose to the current behaviour.
#
# TODO should word the same as view.find() and return Region(-1, -1), rather than None, when not found
def reverse_search(view, term: str, start: int, end: int, flags: int = 0):
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = Region(lo_line.a, middle_line.b)
hi_region = Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return METHOD_NAME(view, term, hi_line.a, min(hi_line.b, end), flags)
def reverse_search_by_pt(view, term: str, start: int, end: int, flags: int = 0):
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = Region(lo_line.a, middle_line.b)
hi_region = Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return METHOD_NAME(view, term, max(hi_line.a, start), min(hi_line.b, end), flags)
| null |
1,332 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImage','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DiskDeviceMappings(self): # RepeatList
return self.get_query_params().get('DiskDeviceMapping')
def set_DiskDeviceMappings(self, DiskDeviceMapping): # RepeatList
for depth1 in range(len(DiskDeviceMapping)):
if DiskDeviceMapping[depth1].get('SnapshotId') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.SnapshotId', DiskDeviceMapping[depth1].get('SnapshotId'))
if DiskDeviceMapping[depth1].get('Size') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.Size', DiskDeviceMapping[depth1].get('Size'))
if DiskDeviceMapping[depth1].get('DiskType') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.DiskType', DiskDeviceMapping[depth1].get('DiskType'))
if DiskDeviceMapping[depth1].get('Device') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.Device', DiskDeviceMapping[depth1].get('Device'))
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SnapshotId(self): # String
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self, SnapshotId): # String
self.add_query_param('SnapshotId', SnapshotId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_BootMode(self): # String
return self.get_query_params().get('BootMode')
def METHOD_NAME(self, BootMode): # String
self.add_query_param('BootMode', BootMode)
def get_ImageName(self): # String
return self.get_query_params().get('ImageName')
def set_ImageName(self, ImageName): # String
self.add_query_param('ImageName', ImageName)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_Architecture(self): # String
return self.get_query_params().get('Architecture')
def set_Architecture(self, Architecture): # String
self.add_query_param('Architecture', Architecture)
def get_DetectionStrategy(self): # String
return self.get_query_params().get('DetectionStrategy')
def set_DetectionStrategy(self, DetectionStrategy): # String
self.add_query_param('DetectionStrategy', DetectionStrategy)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_ImageFamily(self): # String
return self.get_query_params().get('ImageFamily')
def set_ImageFamily(self, ImageFamily): # String
self.add_query_param('ImageFamily', ImageFamily)
def get_ImageVersion(self): # String
return self.get_query_params().get('ImageVersion')
def set_ImageVersion(self, ImageVersion): # String
self.add_query_param('ImageVersion', ImageVersion)
| null |
1,333 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# A simple broker for simulating broker clients. Shamelessly stolen from the
# proton python examples. Accepts incoming links and forwards messages based
# on target address.
import collections
from threading import Thread
import uuid
from proton import Endpoint
from proton.handlers import MessagingHandler
from proton.reactor import Container
from system_test import TIMEOUT, Logger
class FakeBroker(MessagingHandler):
"""
A fake broker-like service that listens for client connections
"""
class _Queue:
def __init__(self, name, logger, dynamic=False):
self.dynamic = dynamic
self.queue = collections.deque()
self.consumers = []
self.logger = logger
self.name = name
self.sent = 0
self.recv = 0
def subscribe(self, consumer):
self.consumers.append(consumer)
def unsubscribe(self, consumer):
if consumer in self.consumers:
self.consumers.remove(consumer)
return len(self.consumers) == 0 and (self.dynamic or len(self.queue) == 0)
def publish(self, message):
self.recv += 1
self.logger.log("Received message %d" % self.recv)
self.queue.append(message)
return self.dispatch()
def dispatch(self, consumer=None):
if consumer:
c = [consumer]
else:
c = self.consumers
count = 0
while True:
rc = self._deliver_to(c)
count += rc
if rc == 0:
break
return count
def _deliver_to(self, consumers):
try:
result = 0
for c in consumers:
if c.credit:
c.send(self.queue.popleft())
result += 1
self.sent += 1
self.logger.log("Sent message %d" % self.sent)
return result
except IndexError: # no more messages
return 0
def __init__(self, url, container_id=None, **handler_kwargs):
super(FakeBroker, self).__init__(**handler_kwargs)
self.url = url
self.queues = {}
self.acceptor = None
self.in_count = 0
self.out_count = 0
self.link_errors = 0
self._connections = []
self._error = None
self._container = Container(self)
self._container.container_id = container_id or 'FakeBroker'
self._logger = Logger(title=self._container.container_id)
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._stop_thread = False
self._thread.start()
def _main(self):
self._container.timeout = 1.0
self._container.start()
self._logger.log("Starting reactor thread")
while self._container.process():
if self._stop_thread:
if self.acceptor:
self.acceptor.close()
self.acceptor = None
for c in self._connections:
c.close()
self._connections = []
self._logger.log("reactor thread done")
def join(self):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
self._logger.log("thread done")
if self._thread.is_alive():
raise Exception("FakeBroker did not exit")
if self._error:
raise Exception(self._error)
def on_start(self, event):
self.acceptor = event.container.listen(self.url)
def _queue(self, address):
if address not in self.queues:
self.queues[address] = self._Queue(address, self._logger)
return self.queues[address]
def on_link_opening(self, event):
if event.link.is_sender:
if event.link.remote_source.dynamic:
address = str(uuid.uuid4())
event.link.source.address = address
q = self._Queue(address, self._logger, True)
self.queues[address] = q
q.subscribe(event.link)
self._logger.log("dynamic sending link opened %s" % address)
elif event.link.remote_source.address:
event.link.source.address = event.link.remote_source.address
self._queue(event.link.source.address).subscribe(event.link)
self._logger.log("sending link opened %s" % event.link.source.address)
elif event.link.remote_target.address:
event.link.target.address = event.link.remote_target.address
self._logger.log("receiving link opened %s" % event.link.target.address)
def _unsubscribe(self, link):
if link.source.address in self.queues and self.queues[link.source.address].unsubscribe(link):
del self.queues[link.source.address]
def METHOD_NAME(self, event):
self._logger.log("link error")
self.link_errors += 1
self.on_link_closing(event)
def on_link_closing(self, event):
self._logger.log("link closing")
if event.link.is_sender:
self._unsubscribe(event.link)
def on_connection_opening(self, event):
pn_conn = event.connection
pn_conn.container = self._container.container_id
def on_connection_opened(self, event):
self._logger.log("connection opened")
self._connections.append(event.connection)
def on_connection_closing(self, event):
self.remove_stale_consumers(event.connection)
def on_connection_closed(self, event):
self._logger.log("connection closed")
try:
self._connections.remove(event.connection)
except ValueError:
pass
def on_disconnected(self, event):
self.remove_stale_consumers(event.connection)
def remove_stale_consumers(self, connection):
link = connection.link_head(Endpoint.REMOTE_ACTIVE)
while link:
if link.is_sender:
self._unsubscribe(link)
link = link.next(Endpoint.REMOTE_ACTIVE)
def on_sendable(self, event):
self.out_count += self._queue(event.link.source.address).dispatch(event.link)
def on_message(self, event):
self.in_count += 1
self.out_count += self._queue(event.link.target.address).publish(event.message)
def dump_log(self):
self._logger.dump()
class FakeService(FakeBroker):
"""
Like a broker, but proactively connects to the message bus
Useful for testing link routes
"""
def __init__(self, url, container_id=None, **handler_kwargs):
super(FakeService, self).__init__(url, container_id, **handler_kwargs)
def on_start(self, event):
event.container.connect(url=self.url)
| null |
1,334 |
from unittest import TestCase
import service
from simulation.avatar.avatar_manager import AvatarManager
from simulation.game_state import GameState
from simulation.interactables.score_location import ScoreLocation
from simulation.location import Location
from simulation.world_map import WorldMap
from .test_simulation.dummy_avatar import MoveEastDummy
from .test_simulation.maps import MockCell, MockPickup
from .test_simulation.mock_game_state import MockGameState
import pytest
@pytest.fixture(scope="module")
def avatar_manager():
class DummyAvatarManager(AvatarManager):
avatars = [MoveEastDummy(1, Location(0, -1))]
return DummyAvatarManager()
@pytest.fixture(scope="module")
def world_state_json(avatar_manager):
CELLS = [
[
{
"interactable": MockPickup("b"),
"avatar": avatar_manager.avatars[0],
},
{},
{},
],
[{}, {"habitable": False}, {"interactable": MockPickup("a")}],
]
grid = {
Location(x, y - 1): MockCell(Location(x, y - 1), **CELLS[x][y])
for y in range(3)
for x in range(2)
}
grid[Location(0, 1)].interactable = ScoreLocation(grid[Location(0, 1)])
test_game_state = GameState(WorldMap(grid, {}), avatar_manager)
return test_game_state.serialize()
def METHOD_NAME(world_state_json):
"""
Ensures the "players" element of the get_game_state() JSON returns the correct information for the dummy
avatar provided into the world.
NOTE: Orientation (and others) may be hard coded. This test WILL and SHOULD fail if the functionality is added.
"""
player_list = world_state_json["players"]
assert len(player_list) == 1
details = player_list[0]
assert details["id"] == 1
assert details["location"]["x"] == 0
assert details["location"]["y"] == -1
assert details["orientation"] == "north"
def test_correct_json_score_locations(world_state_json):
"""
Ensures the correct score location in the "score_locations" element; is returned by the JSON.
"""
interactable_list = world_state_json["interactables"]
for interactable in interactable_list:
if "ScoreLocation" in interactable:
assert interactable["location"]["x"] == 0
assert interactable["location"]["y"] == 1
def test_correct_json_north_east_corner(world_state_json):
"""
Top right corner of the map must be correct to determine the map size.
"""
north_east_corner = world_state_json["northEastCorner"]
assert north_east_corner["x"] == 1
assert north_east_corner["y"] == 1
def test_correct_json_south_west_corner(world_state_json):
"""
Bottom left corner of the map must be correct to determine the map size.
"""
south_west_corner = world_state_json["southWestCorner"]
assert south_west_corner["x"] == 0
assert south_west_corner["y"] == -1
def test_correct_json_era(world_state_json):
"""
Ensure that the era (for the assets in the frontend) is correct.
NOTE: This is hard coded right now to "future". This test should fail when this functionality is added.
"""
era = world_state_json["era"]
assert era == "future"
def test_correct_json_world_interactables_returned_is_correct_amount(world_state_json):
"""
The JSON returns the correct amount of pickups.
"""
interactable_list = world_state_json["interactables"]
assert len(interactable_list) == 3
def test_correct_json_world_obstacles(world_state_json):
"""
JSON generated must return correct location, width, height, type and orientation about obstacles.
NOTE: Obstacles are highly hard coded right now. Only location changes. If any functionality is added, this test
WILL and SHOULD fail.
"""
obstacle_list = world_state_json["obstacles"]
assert len(obstacle_list) == 1
assert obstacle_list[0]["location"]["x"] == 1
assert obstacle_list[0]["location"]["y"] == 0
assert obstacle_list[0]["texture"] == 1
| null |
1,335 |
# python imports
import os
import sys
import numpy as np
from argparse import ArgumentParser
import nibabel as nib
from scipy.ndimage import binary_dilation
from scipy.ndimage.morphology import distance_transform_edt
# add main folder to python path and import SynthSR packages
code_home = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
print(code_home)
sys.path.append(code_home)
# ================================================================================================
# Main Entrypoint
# ================================================================================================
def main():
# parse arguments
parser = ArgumentParser()
parser.add_argument("input", type=str,
help="SynthSR input")
parser.add_argument("ribbon", type=str,
help="Ribbon file")
parser.add_argument("fake", type=str,
help="fake image file")
parser.add_argument("output", type=str,
help="output")
args = vars(parser.parse_args())
print('Reading inputs')
im, aff, hdr = load_volume(args['input'], im_only=False, dtype='float')
imR, affR, hdrR = load_volume(args['ribbon'], im_only=False, dtype='float')
imF, affF, hdrF = load_volume(args['fake'], im_only=False, dtype='float')
if np.max(np.abs(aff-affR))>1e-3:
raise Exception('headers differ')
if np.max(np.abs(aff-affF))>1e-3:
raise Exception('headers differ')
print('Processing')
M = ( (imR==3) | (imR==42) )
D = distance_transform_edt(M==0)
D = D - 1.0
D[D<0] = 0
Prib = np.exp(-1.0*D)
Pfake = (1.0 - Prib)
I = (imF * Prib) + (im * Pfake)
print('Writing to disk')
METHOD_NAME(I, aff, None, args['output'])
print('Done!')
# ================================================================================================
# Auxiliary functions
# ================================================================================================
def METHOD_NAME(volume, aff, header, path, res=None, dtype=None, n_dims=3):
mkdir(os.path.dirname(path))
if '.npz' in path:
np.savez_compressed(path, vol_data=volume)
else:
if header is None:
header = nib.Nifti1Header()
if isinstance(aff, str):
if aff == 'FS':
aff = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
elif aff is None:
aff = np.eye(4)
nifty = nib.Nifti1Image(volume, aff, header)
if dtype is not None:
if 'int' in dtype:
volume = np.round(volume)
volume = volume.astype(dtype=dtype)
nifty.set_data_dtype(dtype)
if res is not None:
if n_dims is None:
n_dims, _ = get_dims(volume.shape)
res = reformat_to_list(res, length=n_dims, dtype=None)
nifty.header.set_zooms(res)
nib.save(nifty, path)
def mkdir(path_dir):
"""Recursively creates the current dir as well as its parent folders if they do not already exist."""
if path_dir[-1] == '/':
path_dir = path_dir[:-1]
if not os.path.isdir(path_dir):
list_dir_to_create = [path_dir]
while not os.path.isdir(os.path.dirname(list_dir_to_create[-1])):
list_dir_to_create.append(os.path.dirname(list_dir_to_create[-1]))
for dir_to_create in reversed(list_dir_to_create):
os.mkdir(dir_to_create)
def build_binary_structure(connectivity, n_dims, shape=None):
"""Return a dilation/erosion element with provided connectivity"""
if shape is None:
shape = [connectivity * 2 + 1] * n_dims
else:
shape = reformat_to_list(shape, length=n_dims)
dist = np.ones(shape)
center = tuple([tuple([int(s / 2)]) for s in shape])
dist[center] = 0
dist = distance_transform_edt(dist)
struct = (dist <= connectivity) * 1
return struct
def load_volume(path_volume, im_only=True, squeeze=True, dtype=None, aff_ref=None):
assert path_volume.endswith(('.nii', '.nii.gz', '.mgz', '.npz')), 'Unknown data file: %s' % path_volume
if path_volume.endswith(('.nii', '.nii.gz', '.mgz')):
x = nib.load(path_volume)
if squeeze:
volume = np.squeeze(x.get_fdata())
else:
volume = x.get_fdata()
aff = x.affine
header = x.header
else: # npz
volume = np.load(path_volume)['vol_data']
if squeeze:
volume = np.squeeze(volume)
aff = np.eye(4)
header = nib.Nifti1Header()
if dtype is not None:
if 'int' in dtype:
volume = np.round(volume)
volume = volume.astype(dtype=dtype)
# align image to reference affine matrix
if aff_ref is not None:
n_dims, _ = get_dims(list(volume.shape), max_channels=10)
volume, aff = align_volume_to_ref(volume, aff, aff_ref=aff_ref, return_aff=True, n_dims=n_dims)
if im_only:
return volume
else:
return volume, aff, header
# execute script
if __name__ == '__main__':
main()
| null |
1,336 |
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mt_opt dataset."""
from __future__ import annotations
import os
from typing import Any, Dict, Generator, Tuple
import numpy as np
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
Datasets for the [MT-Opt paper](https://arxiv.org/abs/2104.08212).
"""
_CITATION = """
@misc{kalashnikov2021mtopt,
title={MT-Opt: Continuous Multi-Task Robotic Reinforcement Learning at Scale},
author={Dmitry Kalashnikov and Jacob Varley and Yevgen Chebotar and Benjamin Swanson and Rico Jonschkowski and Chelsea Finn and Sergey Levine and Karol Hausman},
year={2021},
eprint={2104.08212},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
"""
_BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name='rlds',
description=(
'This dataset contains task episodes collected across afleet of'
' real robots. It follows the [RLDS'
' format](https://github.com/google-research/rlds)to represent'
' steps and episodes.'
),
),
tfds.core.BuilderConfig(
name='sd',
description=(
'The success detectors dataset that contains human curated'
' definitions of tasks completion.'
),
),
]
def METHOD_NAME():
return tfds.features.FeaturesDict({
'action': tfds.features.FeaturesDict({
'close_gripper': np.bool_,
'open_gripper': np.bool_,
'target_pose': tfds.features.Tensor(
shape=(7,), dtype=np.float32, encoding=tfds.features.Encoding.ZLIB
),
'terminate': np.bool_,
}),
'is_first': np.bool_,
'is_last': np.bool_,
'is_terminal': np.bool_,
'observation': tfds.features.FeaturesDict({
'gripper_closed': np.bool_,
'height_to_bottom': np.float32,
'image': tfds.features.Image(shape=(512, 640, 3), dtype=np.uint8),
'state_dense': tfds.features.Tensor(
shape=(7,), dtype=np.float32, encoding=tfds.features.Encoding.ZLIB
),
}),
})
def _name_to_features(config_name: str):
if config_name == 'rlds':
return tfds.features.FeaturesDict({
'episode_id': np.str_,
'skill': np.uint8,
'steps': tfds.features.Dataset(METHOD_NAME()),
'task_code': np.str_,
})
return tfds.features.FeaturesDict({
'image_0': tfds.features.Image(shape=(512, 640, 3), dtype=np.uint8),
'image_1': tfds.features.Image(shape=(480, 640, 3), dtype=np.uint8),
'image_2': tfds.features.Image(shape=(480, 640, 3), dtype=np.uint8),
'success': np.bool_,
'task_code': np.str_,
})
# To encode, we use sequence instead of nested dataset. Otherwise, Beam has
# issues calculating the size of the yielded examples (b/219881125)
def _name_to_features_encode(config_name: str):
if config_name == 'rlds':
return tfds.features.FeaturesDict({
'episode_id': np.str_,
'skill': np.uint8,
'steps': tfds.features.Sequence(METHOD_NAME()),
'task_code': np.str_,
})
return tfds.features.FeaturesDict({
'image_0': tfds.features.Image(shape=(512, 640, 3), dtype=np.uint8),
'image_1': tfds.features.Image(shape=(480, 640, 3), dtype=np.uint8),
'image_2': tfds.features.Image(shape=(480, 640, 3), dtype=np.uint8),
'success': np.bool_,
'task_code': np.str_,
})
_NAME_TO_SPLITS = {
'sd': {
'train': 1024,
'test': 256,
},
'rlds': {
'train': 2048,
},
}
def _filename(prefix: str, num_shards: int, shard_id: int):
return os.fspath(
tfds.core.Path(f'{prefix}-{shard_id:05d}-of-{num_shards:05d}')
)
def _get_files(prefix: str, ds_name: str, split: str, num_shards: int):
prefix = f'{prefix}/mt_opt_{ds_name}/1.0.0/mt_opt_{ds_name}-{split}.tfrecord'
return [_filename(prefix, num_shards, i) for i in range(num_shards)]
class MtOpt(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for mt_opt datasets."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
BUILDER_CONFIGS = _BUILDER_CONFIGS
_INPUT_FILE_PREFIX = 'gs://gresearch/robotics/'
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=_name_to_features(self.builder_config.name),
supervised_keys=None,
homepage='https://karolhausman.github.io/mt-opt/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
ds_name = self.builder_config.name
splits = {}
for split, shards in _NAME_TO_SPLITS[ds_name].items():
paths = {
'file_paths': _get_files(
self._INPUT_FILE_PREFIX, ds_name, split, shards
)
}
splits[split] = self._generate_examples(paths)
return splits
def _generate_examples_one_file(
self, path
) -> Generator[Tuple[str, Dict[str, Any]], None, None]:
"""Yields examples from one file."""
# Dataset of tf.Examples containing full episodes.
example_ds = tf.data.TFRecordDataset(filenames=str(path))
example_features = _name_to_features_encode(self.builder_config.name)
example_specs = example_features.get_serialized_info()
parser = tfds.core.example_parser.ExampleParser(example_specs)
parsed_examples = example_ds.map(parser.parse_example)
decoded_examples = parsed_examples.map(example_features.decode_example)
for index, example in enumerate(tfds.as_numpy(decoded_examples)):
if self.builder_config.name == 'rlds':
id_key = 'episode_id'
else:
id_key = 'task_code'
example_id = str(index) + str(example[id_key]) + str(hash(path))
yield example_id, example
def _generate_examples(self, paths):
"""Yields examples."""
beam = tfds.core.lazy_imports.apache_beam
file_paths = paths['file_paths']
return beam.Create(file_paths) | beam.FlatMap(
self._generate_examples_one_file
)
| null |
1,337 |
import logging
import tempfile
import unittest
from pathlib import Path
from processors import areatree
from processors.areatree.process import (
_areatree_lines,
_extract_building_prefix,
_extract_id_and_type,
_extract_names,
_split_line,
)
class AreatreeExtractNames(unittest.TestCase):
def test_extract_names_with_short_name(self) -> None:
"""If there is a short name, it is returned as well"""
names = ["Mathematics Informatics", "mi"]
expected_output = {"name": "Mathematics Informatics", "short_name": "mi"}
self.assertEqual(_extract_names(names), expected_output)
def test_extract_names_without_short_name(self) -> None:
"""If there is no short name, only the name is returned"""
names = ["Mathematics Informatics"]
expected_output = {"name": "Mathematics Informatics"}
self.assertEqual(_extract_names(names), expected_output)
def test_extract_names_with_long_short_name(self) -> None:
"""If the short name is longer than 20 chars, a warning is raised"""
names = ["Mechanical Engineering", "ThisIsAVeryLongNameForAShortName"]
expected_output = {"name": "Mechanical Engineering", "short_name": "ThisIsAVeryLongNameForAShortName"}
with self.assertLogs(level=logging.WARNING) as recorded_logs:
self.assertEqual(_extract_names(names), expected_output)
self.assertIn(
"'ThisIsAVeryLongNameForAShortName' is very long for a short name (>20 chars)",
recorded_logs.output[0],
)
def METHOD_NAME(self) -> None:
"""If there are more than two names, an error is raised"""
names = ["Name1", "Name2", "Name3"]
with self.assertRaises(RuntimeError):
_extract_names(names)
with self.assertRaises(IndexError):
_extract_names([])
class AreatreeExtractBuildingPrefix(unittest.TestCase):
def test_dash_separator(self) -> None:
"""If the building id is separated by a dash, it is returned as a string"""
expected_result = {"b_prefix": "b1-b2-b3"}
self.assertEqual(_extract_building_prefix("b1-b2-b3"), expected_result)
def test_areatree_uncertain(self) -> None:
"""If the building id starts with a dash, it is marked as uncertain"""
expected_result = {"data_quality": {"areatree_uncertain": True}, "b_prefix": "b1-b2"}
self.assertEqual(_extract_building_prefix("-b1-b2"), expected_result)
def test_comma_separator(self) -> None:
"""If the building id is separated by a comma, it is split into a list"""
expected_result = {"b_prefix": ["b1", "b2", "b3"]}
self.assertEqual(_extract_building_prefix("b1,b2,b3"), expected_result)
def test_empty(self) -> None:
"""If the building id is empty, an empty dict is returned"""
self.assertEqual(_extract_building_prefix(""), {})
def test_building_ids_without_separator(self) -> None:
"""If the building id is not separated by a dash or comma, it is returned as is"""
expected_result = {"b_prefix": "b1"}
self.assertEqual(_extract_building_prefix("b1"), expected_result)
class AreatreeExtractIdAndType(unittest.TestCase):
def test_specified_type(self) -> None:
"""If the type is specified, it is returned"""
expected = {"id": "abc", "type": "building"}
self.assertEqual(_extract_id_and_type("abc[building]", None), expected)
self.assertEqual(_extract_id_and_type("abc[building]", "cdf"), expected)
def test_comma(self) -> None:
"""If the id is inferable from the line, it is returned"""
expected = {"id": "123", "visible_id": "visible_id", "type": "area"}
self.assertEqual(_extract_id_and_type("123,visible_id", None), expected)
self.assertEqual(_extract_id_and_type("123,visible_id", "cdf"), expected)
def test_single_id(self) -> None:
"""If the id is inferable from the line, it is returned"""
expected = {"id": "xyz", "type": "building"}
self.assertEqual(_extract_id_and_type("xyz", "xyz"), expected)
def test_id_not_inferable(self) -> None:
"""If the id is not inferable from the line, an error is raised"""
with self.assertRaises(RuntimeError):
_extract_id_and_type("", ["b_prefix1", "b_prefix2"])
with self.assertRaises(RuntimeError):
_extract_id_and_type("123,visible_id,extra_id", ["b_prefix1", "b_prefix2"])
with self.assertRaises(RuntimeError):
_extract_id_and_type("123,visible_id,extra_id", None)
class AreatreeLinesTestCase(unittest.TestCase):
def test_empty_file(self) -> None:
"""Empty file returns empty list"""
with tempfile.NamedTemporaryFile() as file:
areatree.process.AREATREE_FILE = Path(file.name)
self.assertEqual(list(_areatree_lines()), [])
def test_comment_lines(self) -> None:
"""Comment lines are removed"""
with tempfile.NamedTemporaryFile(mode="w+") as file:
areatree.process.AREATREE_FILE = Path(file.name)
file.write("line1\n")
file.write("\n") # Empty line
file.write("# Comment line\n")
file.write("line2\n")
file.flush()
self.assertEqual(list(_areatree_lines()), ["line1", "line2"])
def test_inline_comments(self) -> None:
"""Inline comments are removed"""
with tempfile.NamedTemporaryFile(mode="w+") as file:
areatree.process.AREATREE_FILE = Path(file.name)
file.write("line1#comment1\n")
file.write("line2#comment2 # comment 3\n")
file.flush()
self.assertEqual(list(_areatree_lines()), ["line1", "line2"])
def test_file_preserves_indentation(self) -> None:
"""Indentation is preserved"""
with tempfile.NamedTemporaryFile(mode="w+") as file:
areatree.process.AREATREE_FILE = Path(file.name)
file.write(" line1 \n")
file.write(" line2\n")
file.write("line3")
file.flush()
self.assertEqual(list(_areatree_lines()), [" line1", " line2", "line3"])
class SplitLineTestCase(unittest.TestCase):
def test_valid_line(self) -> None:
"""Valid lines are split correctly"""
self.assertEqual(_split_line("1:Building A:123,456"), ("1", "Building A", "123,456"))
def test_invalid_line_missing_parts(self) -> None:
"""Missing parts are not allowed"""
with self.assertRaises(RuntimeError):
_split_line("1:Building A")
def test_invalid_line_extra_parts(self) -> None:
"""Extra parts are not allowed"""
with self.assertRaises(RuntimeError):
_split_line("1:Building A:123,456:extra_part")
if __name__ == "__main__":
unittest.main()
| null |
1,338 |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from intern.remote.boss import BossRemote
import random
import requests
from requests import Session, HTTPError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import unittest
API_VER = 'v1'
class ProjectUserRoleTest_v1(unittest.TestCase):
"""Integration tests of the Boss user-role API.
"""
@classmethod
def setUpClass(cls):
"""Do an initial DB clean up in case something went wrong the last time.
If a test failed really badly, the DB might be in a bad state despite
attempts to clean up during tearDown().
"""
cls.rmt = BossRemote('test.cfg', API_VER)
# Turn off SSL cert verification. This is necessary for interacting with
# developer instances of the Boss.
cls.rmt.project_service.session_send_opts = {'verify': False}
cls.rmt.metadata_service.session_send_opts = {'verify': False}
cls.rmt.volume_service.session_send_opts = {'verify': False}
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
cls.admin = 'admin'
cls.user_mgr = 'user-manager'
cls.rsrc_mgr = 'resource-manager'
cls.user = 'role_test_user{}'.format(random.randint(0, 9999))
cls.rmt.add_user(cls.user, 'John', 'Doe', 'jd{}@me.com'.format(random.randint(0, 9999)), 'password')
cls.cleanup_db()
@classmethod
def tearDownClass(cls):
cls.cleanup_db()
# Currently can't delete users unless admin
#cls.rmt.delete_user(cls.user)
@classmethod
def cleanup_db(cls):
"""Clean up the data model objects used by this test case.
This method is used by both tearDown() and setUpClass(). Don't do
anything if an exception occurs during delete_user_role(). The role
may not have existed for a particular test.
"""
try:
cls.rmt.delete_user_role(cls.user, cls.user_mgr)
except HTTPError:
pass
try:
cls.rmt.delete_user_role(cls.user, cls.rsrc_mgr)
except HTTPError:
pass
def setUp(self):
pass
def tearDown(self):
self.cleanup_db()
def test_add_role(self):
self.rmt.add_user_role(self.user, self.rsrc_mgr)
expected = [self.rsrc_mgr]
actual = self.rmt.get_user_roles(self.user)
six.assertCountEqual(self, expected, actual)
def test_fail_add_user_manager(self):
# only admin can add a user manager
with self.assertRaises(HTTPError):
self.rmt.add_user_role(self.user, self.user_mgr)
def test_add_invalid_user(self):
with self.assertRaises(HTTPError):
self.rmt.add_user_role('foo', self.admin)
def test_add_invalid_role(self):
with self.assertRaises(HTTPError):
self.rmt.add_user_role(self.user, 'foo')
def test_add_invalid_admin_role(self):
# only the root account has the admin role
with self.assertRaises(HTTPError):
self.rmt.add_user_role(self.user, self.admin)
def test_delete_role(self):
self.rmt.add_user_role(self.user, self.rsrc_mgr)
self.rmt.delete_user_role(self.user, self.rsrc_mgr)
actual = self.rmt.get_user_roles(self.user)
self.assertEqual([], actual)
def test_delete_invalid_user(self):
with self.assertRaises(HTTPError):
self.rmt.delete_user_role('foo', self.admin)
def test_delete_invalid_role(self):
with self.assertRaises(HTTPError):
self.rmt.delete_user_role(self.user, 'foo')
def METHOD_NAME(self):
actual = self.rmt.get_user_roles(self.user)
self.assertEqual([], actual)
def test_get_roles_invalid_user(self):
with self.assertRaises(HTTPError):
self.rmt.get_user_roles('foo')
if __name__ == '__main__':
unittest.main()
| null |
1,339 |
################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2018-2022 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from django.urls.base import reverse
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from .gui.actions import BulkEntityAction, EntityAction
from .gui.merge import merge_form_registry
class EditAction(EntityAction):
id = EntityAction.generate_id('creme_core', 'edit')
type = 'redirect'
label = _('Edit')
icon = 'edit'
@property
def url(self):
return self.instance.get_edit_absolute_url()
@property
def is_enabled(self):
return bool(self.url) and self.user.has_perm_to_change(self.instance)
class DeleteAction(EntityAction):
id = EntityAction.generate_id('creme_core', 'delete')
type = 'delete'
label = _('Delete')
icon = 'delete'
@property
def url(self):
return self.instance.get_delete_absolute_url()
@property
def is_enabled(self):
return bool(self.url) and self.user.has_perm_to_delete(self.instance)
class ViewAction(EntityAction):
id = EntityAction.generate_id('creme_core', 'view')
type = 'redirect'
label = _('See')
icon = 'view'
is_default = True
@property
def url(self):
return self.instance.get_absolute_url()
@property
def help_text(self):
return gettext('Go to the entity {entity}').format(entity=self.instance)
@property
def is_enabled(self):
return bool(self.url) and self.user.has_perm_to_view(self.instance)
class CloneAction(EntityAction):
id = EntityAction.generate_id('creme_core', 'clone')
type = 'clone'
label = _('Clone')
icon = 'clone'
@property
def url(self):
return self.instance.get_clone_absolute_url()
def _get_data(self):
return {
'id': self.instance.id,
}
@property
def is_enabled(self):
instance = self.instance
user = self.user
return (
bool(self.url)
and user.has_perm_to_create(instance)
and user.has_perm_to_view(instance)
)
class BulkEditAction(BulkEntityAction):
id = BulkEntityAction.generate_id('creme_core', 'bulk_edit')
type = 'edit-selection'
url_name = 'creme_core__bulk_update'
label = _('Multiple update')
icon = 'edit'
@property
def url(self):
return reverse(self.url_name, args=(self.ctype.id,))
class BulkDeleteAction(BulkEntityAction):
id = BulkEntityAction.generate_id('creme_core', 'bulk_delete')
type = 'delete-selection'
url_name = 'creme_core__delete_entities'
label = _('Multiple deletion')
icon = 'delete'
class BulkAddPropertyAction(BulkEntityAction):
id = BulkEntityAction.generate_id('creme_core', 'bulk_add_property')
type = 'addto-selection'
url_name = 'creme_core__add_properties_bulk'
label = _('Multiple property adding')
icon = 'property'
@property
def url(self):
return reverse(self.url_name, args=(self.ctype.id,))
class BulkAddRelationAction(BulkEntityAction):
id = BulkEntityAction.generate_id('creme_core', 'bulk_add_relation')
type = 'addto-selection'
url_name = 'creme_core__create_relations_bulk'
label = _('Multiple relationship adding')
icon = 'relations'
@property
def url(self):
return reverse(self.url_name, args=(self.ctype.id,))
class MergeAction(BulkEntityAction):
id = BulkEntityAction.generate_id('creme_core', 'merge')
type = 'merge-selection'
url_name = 'creme_core__merge_entities'
label = _('Merge 2 entities')
icon = 'merge'
bulk_max_count = 2
bulk_min_count = 2
merge_form_registry = merge_form_registry
def METHOD_NAME(self) -> bool:
return self.merge_form_registry.get(self.model) is not None
@property
def is_enabled(self):
return self.METHOD_NAME()
@property
def is_visible(self):
return self.METHOD_NAME()
| null |
1,340 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class ExportSuspEventsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'ExportSuspEvents')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TimeEnd(self): # String
return self.get_query_params().get('TimeEnd')
def set_TimeEnd(self, TimeEnd): # String
self.add_query_param('TimeEnd', TimeEnd)
def get_TargetType(self): # String
return self.get_query_params().get('TargetType')
def set_TargetType(self, TargetType): # String
self.add_query_param('TargetType', TargetType)
def METHOD_NAME(self): # String
return self.get_query_params().get('Remark')
def set_Remark(self, Remark): # String
self.add_query_param('Remark', Remark)
def get_ContainerFieldName(self): # String
return self.get_query_params().get('ContainerFieldName')
def set_ContainerFieldName(self, ContainerFieldName): # String
self.add_query_param('ContainerFieldName', ContainerFieldName)
def get_SourceIp(self): # String
return self.get_query_params().get('SourceIp')
def set_SourceIp(self, SourceIp): # String
self.add_query_param('SourceIp', SourceIp)
def get_ContainerFieldValue(self): # String
return self.get_query_params().get('ContainerFieldValue')
def set_ContainerFieldValue(self, ContainerFieldValue): # String
self.add_query_param('ContainerFieldValue', ContainerFieldValue)
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_From(self): # String
return self.get_query_params().get('From')
def set_From(self, _From): # String
self.add_query_param('From', _From)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Dealed(self): # String
return self.get_query_params().get('Dealed')
def set_Dealed(self, Dealed): # String
self.add_query_param('Dealed', Dealed)
def get_CurrentPage(self): # String
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # String
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_AssetsTypeLists(self): # RepeatList
return self.get_query_params().get('AssetsTypeList')
def set_AssetsTypeLists(self, AssetsTypeList): # RepeatList
for depth1 in range(len(AssetsTypeList)):
self.add_query_param('AssetsTypeList.' + str(depth1 + 1), AssetsTypeList[depth1])
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_TimeStart(self): # String
return self.get_query_params().get('TimeStart')
def set_TimeStart(self, TimeStart): # String
self.add_query_param('TimeStart', TimeStart)
def get_Levels(self): # String
return self.get_query_params().get('Levels')
def set_Levels(self, Levels): # String
self.add_query_param('Levels', Levels)
def get_ParentEventTypes(self): # String
return self.get_query_params().get('ParentEventTypes')
def set_ParentEventTypes(self, ParentEventTypes): # String
self.add_query_param('ParentEventTypes', ParentEventTypes)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
| null |
1,341 |
from typing import (
List,
Optional,
)
from galaxy_test.api._framework import ApiTestCase
from galaxy_test.base.decorators import (
requires_admin,
using_requirement,
)
from galaxy_test.base.populators import DatasetPopulator
class TestGroupUsersApi(ApiTestCase):
dataset_populator: DatasetPopulator
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
@requires_admin
def test_index(self, group_name: Optional[str] = None):
group_name = group_name or "test-group_users"
group = self._create_group(group_name)
encoded_group_id = group["id"]
group_users = self._get_group_users(encoded_group_id)
assert isinstance(group_users, list)
assert len(group_users) > 0
for group_user in group_users:
self._assert_valid_group_user(group_user)
def test_index_only_admin(self):
encoded_group_id = "any-group-id"
response = self._get(f"groups/{encoded_group_id}/users")
self._assert_status_code_is(response, 403)
@requires_admin
def test_index_unknown_group_raises_400(self):
encoded_group_id = "unknown-group-id"
response = self._get(f"groups/{encoded_group_id}/users", admin=True)
self._assert_status_code_is(response, 400)
@requires_admin
def test_show(self):
encoded_user_id = self.dataset_populator.user_id()
group = self._create_group("test-group-show-user", encoded_user_ids=[encoded_user_id])
encoded_group_id = group["id"]
response = self._get(f"groups/{encoded_group_id}/users/{encoded_user_id}", admin=True)
self._assert_status_code_is(response, 200)
group_user = response.json()
self._assert_valid_group_user(group_user, assert_id=encoded_user_id)
def test_show_only_admin(self):
encoded_group_id = "any-group-id"
encoded_user_id = "any-user-id"
response = self._get(f"groups/{encoded_group_id}/users/{encoded_user_id}")
self._assert_status_code_is(response, 403)
@requires_admin
def test_show_unknown_raises_400(self):
group = self._create_group("test-group-with-unknown-user")
encoded_group_id = group["id"]
encoded_user_id = "unknown-user-id"
response = self._get(f"groups/{encoded_group_id}/users/{encoded_user_id}", admin=True)
self._assert_status_code_is(response, 400)
@requires_admin
def test_update(self):
group_name = "group-without-users"
group = self._create_group(group_name, encoded_user_ids=[])
encoded_group_id = group["id"]
group_users = self._get_group_users(encoded_group_id)
assert len(group_users) == 0
encoded_user_id = self.dataset_populator.user_id()
update_response = self._put(f"groups/{encoded_group_id}/users/{encoded_user_id}", admin=True)
self._assert_status_code_is_ok(update_response)
group_user = update_response.json()
self._assert_valid_group_user(group_user, assert_id=encoded_user_id)
assert group_user["url"] == f"/api/groups/{encoded_group_id}/user/{encoded_user_id}"
def test_update_only_admin(self):
encoded_group_id = "any-group-id"
encoded_user_id = "any-user-id"
response = self._put(f"groups/{encoded_group_id}/users/{encoded_user_id}")
self._assert_status_code_is(response, 403)
@requires_admin
def test_delete(self):
group_name = "group-with-user-to-delete"
encoded_user_id = self.dataset_populator.user_id()
group = self._create_group(group_name, encoded_user_ids=[encoded_user_id])
encoded_group_id = group["id"]
group_users = self._get_group_users(encoded_group_id)
assert len(group_users) == 1
delete_response = self._delete(f"groups/{encoded_group_id}/users/{encoded_user_id}", admin=True)
self._assert_status_code_is_ok(delete_response)
group_user = delete_response.json()
self._assert_valid_group_user(group_user, assert_id=encoded_user_id)
group_users = self._get_group_users(encoded_group_id)
assert len(group_users) == 0
def METHOD_NAME(self):
encoded_group_id = "any-group-id"
encoded_user_id = "any-user-id"
response = self._delete(f"groups/{encoded_group_id}/users/{encoded_user_id}")
self._assert_status_code_is(response, 403)
@requires_admin
def test_delete_unknown_raises_400(self):
group_name = "group-without-user-to-delete"
group = self._create_group(group_name, encoded_user_ids=[])
encoded_group_id = group["id"]
group_users = self._get_group_users(encoded_group_id)
assert len(group_users) == 0
encoded_user_id = "unknown-user-id"
delete_response = self._delete(f"groups/{encoded_group_id}/users/{encoded_user_id}", admin=True)
self._assert_status_code_is(delete_response, 400)
def _create_group(self, group_name: str, encoded_user_ids: Optional[List[str]] = None):
if encoded_user_ids is None:
encoded_user_ids = [self.dataset_populator.user_id()]
user_ids = encoded_user_ids
payload = {
"name": group_name,
"user_ids": user_ids,
}
using_requirement("admin")
response = self._post("groups", payload, admin=True, json=True)
self._assert_status_code_is(response, 200)
group = response.json()[0] # POST /api/groups returns a list
return group
def _get_group_users(self, encoded_group_id: str):
using_requirement("admin")
response = self._get(f"groups/{encoded_group_id}/users", admin=True)
self._assert_status_code_is(response, 200)
group_users = response.json()
return group_users
def _assert_valid_group_user(self, user, assert_id=None):
self._assert_has_keys(user, "id", "email", "url")
if assert_id is not None:
assert user["id"] == assert_id
| null |
1,342 |
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import tqdm
import os
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.monitor as M
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
from nnabla.ext_utils import get_extension_context
from args import get_args
from models import MLP
from datasets import point_cloud_data_source, point_cloud_data_iterator
import utils
def points_loss(y):
return F.sum(y ** 2.0).apply(persistent=True)
def normals_loss(y, x, n, with_normals):
loss_normals = 0.0
if args.with_normals:
grads = nn.grad([y], [x])
g = grads[0]
loss_normals = F.sum((g - n) ** 2.0, axis=[1]) ** 0.5
loss_normals = F.sum(loss_normals).apply(persistent=True)
return loss_normals
def eikonal_reg(x, r, model, volume_factor):
# volumetric space and neighbor points
xu = (2 * args.volume_factor) * F.rand(shape=x.shape) - args.volume_factor
xn = x + r * F.randn(shape=x.shape)
xa = F.concatenate(*[xu, xn], axis=0).apply(need_grad=True)
ya = model(xa)
grads = nn.grad([ya], [xa])
norms = [F.sum(g ** 2.0, axis=[1]) ** 0.5 for g in grads]
gp = sum([F.mean((n - 1.0) ** 2.0) for n in norms])
return gp.apply(persistent=True)
def feed_data(inputs, di, with_normals):
x, n, r = inputs
pts, nrm, rad = di.next()
x.d = pts
r.d = rad
if with_normals:
n.d = nrm
def METHOD_NAME(model, pts_true, grid_size, volume_factor, monitor_distances,
i, save_interval_epoch=1):
if i % save_interval_epoch != 0:
return
pts, vol = utils.compute_pts_vol(model, grid_size, volume_factor)
mesh = utils.create_mesh_from_volume(vol)
pcd = mesh.sample_points_poisson_disk(len(pts_true), seed=412)
pts_pred = np.asarray(pcd.points)
pts_pred = utils.normalize(pts_pred)
# Pair-wise distance
cd0, cd1, cd, hd0, hd1, hd = utils.chamfer_hausdorff_dists(
pts_pred, pts_true)
for m, d in zip(monitor_distances, [cd0, cd1, cd, hd0, hd1, hd]):
m.add(i, d)
def save_params(monitor_path, i, save_interval_epoch=1):
if i % save_interval_epoch != 0:
return
nn.save_parameters(os.path.join(monitor_path, "param_{:05d}.h5".format(i)))
def main(args):
# Context
ctx = get_extension_context("cudnn", device_id=args.device_id)
nn.set_default_context(ctx)
# Network
model = MLP(args.dims, args.ldims)
# points loss
x = nn.Variable([args.batch_size, 3]).apply(need_grad=True)
y = model(x)
loss_points = points_loss(y)
# normals loss
n = nn.Variable([args.batch_size, 3])
loss_normals = normals_loss(y, x, n, args.with_normals)
# eikonal regularization
r = nn.Variable([args.batch_size, 1])
reg_eikonal = eikonal_reg(x, r, model, args.volume_factor)
# total loss
loss = loss_points + args.tau * loss_normals + args.lam * reg_eikonal
# Dataset (input is normalized in [-1, 1])
ds = point_cloud_data_source(
args.fpath, args.knn, args.test_rate, test=False)
di = point_cloud_data_iterator(ds, args.batch_size)
ds = point_cloud_data_source(args.fpath, -1, test=True)
pts_true = ds.points
# Solver
solver = S.Adam(args.learning_rate, args.beta0, args.beta1)
solver.set_parameters(nn.get_parameters())
# Monitor
monitor = M.Monitor(args.monitor_path)
monitor_time = M.MonitorTimeElapsed(
"Training time", monitor, interval=100, verbose=False)
monitor_points_loss = M.MonitorSeries(
"Points loss", monitor, interval=100, verbose=False)
monitor_normals_loss = M.MonitorSeries(
"Normals loss", monitor, interval=100, verbose=False)
monitor_eikonal_reg = M.MonitorSeries(
"Eikonal reg", monitor, interval=100, verbose=False)
monitor_total_loss = M.MonitorSeries(
"Total loss", monitor, interval=100, verbose=False)
monitor_distances = [
M.MonitorSeries("Chamfer Pred True",
monitor, interval=1, verbose=False),
M.MonitorSeries("Chamfer True Pred",
monitor, interval=1, verbose=False),
M.MonitorSeries("Chamfer", monitor,
interval=1, verbose=False),
M.MonitorSeries("Hausdorff Pred True",
monitor, interval=1, verbose=False),
M.MonitorSeries("Hausdorff True Pred",
monitor, interval=1, verbose=False),
M.MonitorSeries("Hausdorff", monitor, interval=1, verbose=False)]
# Train
iter_per_epoch = di.size // args.batch_size
for i in tqdm.tqdm(range(args.train_epoch), desc="train-loop"):
# evaluate
METHOD_NAME(model, pts_true, args.grid_size, args.volume_factor, monitor_distances,
i, args.save_interval_epoch)
for j in range(iter_per_epoch):
# feed data
feed_data([x, n, r], di, args.with_normals)
# zero_grad, forward, backward, update
solver.zero_grad()
loss.forward()
loss.backward(clear_buffer=True)
solver.update()
# monitor
monitor_points_loss.add(i * iter_per_epoch + j, loss_points.d)
monitor_normals_loss.add(
i * iter_per_epoch + j, loss_normals.d) if args.with_normals else None
monitor_eikonal_reg.add(i * iter_per_epoch + j, reg_eikonal.d)
monitor_total_loss.add(i * iter_per_epoch + j, loss.d)
monitor_time.add(i)
# save
save_params(args.monitor_path, i, args.save_interval_epoch)
save_params(args.monitor_path, args.train_epoch)
METHOD_NAME(model, pts_true, args.grid_size, args.volume_factor, monitor_distances,
args.train_epoch)
if __name__ == '__main__':
args = get_args()
utils.save_args(args)
main(args)
| null |
1,343 |
# Copyright 2018 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Import('*')
if env['CONF']['USE_SYSTEMC'] and GetOption('with_systemc_tests'):
from gem5_scons import Transform
import os
import os.path
import json
src = str(Dir('.').srcdir)
class SystemCTest(object):
def __init__(self, dirname, name):
self.name = name
self.reldir = os.path.relpath(dirname, src)
self.target = os.path.join(self.reldir, name)
self.sources = []
self.deps = []
self.compile_only = False
def add_source(self, source):
self.sources.append(os.path.join(self.reldir, source))
def METHOD_NAME(self, sources):
for source in sources:
self.sources.append(os.path.join(self.reldir, '..', source))
def properties(self):
return {
'name' : self.name,
'path' : self.reldir,
'compile_only' : self.compile_only,
'deps' : self.deps
}
test_dir = Dir('.')
class SystemCTestBin(Executable):
def __init__(self, test):
all_sources = test.sources + [with_tag('main')]
super().__init__(test.target, *all_sources)
self.reldir = test.reldir
self.test_deps = test.deps
@classmethod
def declare_all(cls, env):
env = env.Clone()
# Turn off extra warnings and Werror for the tests.
to_remove = ['-Wall', '-Wundef', '-Wextra', '-Werror']
env['CCFLAGS'] = \
list(filter(lambda f: f not in to_remove, env['CCFLAGS']))
env.Append(CPPPATH=test_dir.Dir('include'))
env.Append(LIBPATH=['${BUILDDIR}'], LIBS=['gem5_${ENV_LABEL}'])
env.AddLocalRPATH('${BUILDDIR}')
env['OBJSUFFIX'] = '.sc' + env['OBJSUFFIX'][1:]
env['SHOBJSUFFIX'] = '.sc' + env['OBJSUFFIX'][1:]
super().declare_all(env)
def declare(self, env):
test_bin, _u = super().declare(env)
test_dir = self.dir.Dir(self.reldir)
for dep in self.test_deps:
env.Depends(test_bin, test_dir.File(dep))
return test_bin
tests = []
def new_test(dirname, name):
test = SystemCTest(dirname, name)
tests.append(test)
return test
def scan_dir_for_tests(subdir):
subdir_src = Dir('.').srcdir.Dir(subdir)
for root, dirs, files in os.walk(str(subdir_src)):
# If there's a 'DONTRUN' file in this directory, skip it and any
# child directories.
if 'DONTRUN' in files:
del dirs[:]
return
endswith = lambda sfx: list(filter(
lambda n: n.endswith(sfx), files))
cpps = endswith('.cpp')
if not cpps:
continue
def get_entries(fname):
with open(os.path.join(root, fname)) as content:
lines = content.readlines
# Get rid of leading and trailing whitespace.
lines = map(lambda x: x.strip(), content.readlines())
# Get rid of blank lines.
lines = list(filter(lambda x: x, lines))
return lines
# If there's only one source file, then that files name is the test
# name, and it's the source for that test.
if len(cpps) == 1:
cpp = cpps[0]
test = new_test(root, os.path.splitext(cpp)[0])
test.add_source(cpp)
# Otherwise, expect there to be a file that ends in .f. That files
# name is the test name, and it will list the source files with
# one preceeding path component.
else:
fs = endswith('.f')
if len(fs) != 1:
print("In %s, expected 1 *.f file, but found %d.",
root, len(fs))
for f in fs:
print(os.path.join(root, f))
return
f = fs[0]
test = new_test(root, os.path.splitext(f)[0])
# Add all the sources to this test.
test.METHOD_NAME(get_entries(f))
if 'COMPILE' in files:
test.compile_only = True
if 'DEPS' in files:
test.deps = get_entries('DEPS')
scan_dir_for_tests('systemc')
scan_dir_for_tests('tlm')
def build_tests_json(target, source, env):
data = { test.target : test.properties() for test in tests }
with open(str(target[0]), "w") as tests_json:
json.dump(data, tests_json)
AlwaysBuild(env.Command(File('tests.json'), None,
MakeAction(build_tests_json, Transform("TESTJSON"))))
for test in tests:
SystemCTestBin(test)
| null |
1,344 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import json
from collections import OrderedDict
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Union
import yaml
def build_arch_config(config_dict: Dict[str, Any]) -> ArchConfig:
"""Build an `ArchConfig` object from a sampled config dictionary.
Args:
config_dict: Config dictionary
Returns:
`ArchConfig` object.
"""
ARCH_CONFIGS = {"default": ArchConfig, "config_list": ArchConfigList}
config_type = config_dict.get("_config_type", "default")
return ARCH_CONFIGS[config_type](config_dict)
class ArchConfig:
"""Store architecture configs."""
def __init__(self, config_dict: Dict[str, Union[dict, float, int, str]]) -> None:
"""Initialize the class.
Args:
config_dict: Configuration dictionary.
"""
# Set that stores all parameters used to build the model instance
self._used_params = set()
# Original config dictionary
self._config_dict = deepcopy(config_dict)
# ArchConfig nodes
self.nodes = OrderedDict()
for param_name, param in self._config_dict.items():
if isinstance(param, dict):
self.nodes[param_name] = build_arch_config(param)
else:
self.nodes[param_name] = param
def __repr__(self) -> str:
class ArchConfigJsonEncoder(json.JSONEncoder):
def METHOD_NAME(self, o):
if isinstance(o, ArchConfig):
return o.to_dict(remove_metadata_info=True)
return super().METHOD_NAME(o)
cls_name = self.__class__.__name__
return f"{cls_name}({json.dumps(self, cls=ArchConfigJsonEncoder, indent=4)})"
def __contains__(self, param_name: str) -> bool:
return param_name in self.nodes
def get_used_params(self) -> Dict[str, Union[Dict, bool]]:
"""Get the parameter usage tree.
Terminal nodes with value `True` represent architecture parameters that were used
by calling `ArchConfig.pick(param_name)`.
Returns:
Used parameters.
"""
used_params = OrderedDict()
for param_name, param in self.nodes.items():
used_params[param_name] = param_name in self._used_params
if isinstance(param, ArchConfig):
used_params[param_name] = param.get_used_params()
return used_params
def pick(self, param_name: str, METHOD_NAME: Optional[Any] = None, record_usage: Optional[bool] = True) -> Any:
"""Pick an architecture parameter, possibly recording its usage.
Args:
param_name: Architecture parameter name
default: Default value to return if parameter is not found. If `None`, an
exception is raised.
record_usage: If this parameter should be recorded as 'used' in
`ArchConfig._used_params`.
Returns:
Parameter value.
"""
if param_name in self.nodes:
param_value = self.nodes[param_name]
else:
if METHOD_NAME is None:
raise ValueError(
f"Architecture parameter {param_name} not found in config and "
f"no default value provided. Available parameters are: {self.nodes.keys()}"
)
param_value = METHOD_NAME
if record_usage:
self._used_params.add(param_name)
return param_value
def to_dict(self, remove_metadata_info: Optional[bool] = False) -> OrderedDict:
"""Convert `ArchConfig` object to an ordered dictionary.
Args:
remove_metadata_info: If keys used to store extra metadata should be removed.
Returns:
Ordered dictionary.
"""
return OrderedDict(
(k, v.to_dict(remove_metadata_info)) if isinstance(v, ArchConfig) else (k, v)
for k, v in self.nodes.items()
if not remove_metadata_info or not k.startswith("_")
)
def to_file(self, path: str) -> None:
"""Save `ArchConfig` object to a file.
Args:
path: Path to save the file to.
"""
path = Path(path)
path = path.parent / f"{path.name}.json" if path.suffix == "" else path
d = self.to_dict()
if path.suffix == ".yaml":
yaml.dump(d, open(path, "w", encoding="utf-8"), default_flow_style=False, sort_keys=False)
elif path.suffix == ".json":
json.dump(d, open(path, "w", encoding="utf-8"), indent=4)
else:
raise ValueError(f"Unsupported file extension {path.suffix}")
@classmethod
def from_file(cls, path: str) -> ArchConfig:
"""Load `ArchConfig` object from a file.
Args:
path: Path to load the file from.
Returns:
`ArchConfig` object.
"""
path = Path(path)
path = path.parent / f"{path.name}.json" if path.suffix == "" else path
if path.suffix == ".yaml":
d = yaml.load(open(path, "r", encoding="utf-8"), Loader=yaml.Loader)
elif path.suffix == ".json":
d = json.load(open(path, "r", encoding="utf-8"))
else:
raise ValueError(f"Unsupported file extension {path.suffix}")
return build_arch_config(d)
class ArchConfigList(ArchConfig):
"""Store a list of architecture configs."""
def __init__(self, config: OrderedDict):
"""Initialize the class.
Args:
config: Configuration dictionary.
"""
super().__init__(config)
assert "_configs" in config
assert "_repeat_times" in config
self.max_size = config["_repeat_times"]
def __len__(self) -> int:
self._used_params.add("_repeat_times")
return self.max_size
def __getitem__(self, idx: int) -> ArchConfig:
if 0 <= idx < len(self):
self._used_params.add("_repeat_times")
return self.nodes["_configs"].pick(str(idx))
raise IndexError
def __iter__(self):
yield from [self[i] for i in range(len(self))]
def pick(self, param_name: str, record_usage: Optional[bool] = True) -> None:
raise ValueError(
"Attempted to use .pick in an ArchConfigList instance. "
"Select a config first using indexing (e.g `config_list[i]`)."
)
def to_dict(self, remove_metadata_info: Optional[bool] = False) -> OrderedDict:
if remove_metadata_info:
return [
self.nodes["_configs"].pick(str(i), record_usage=False).to_dict(remove_metadata_info)
for i in range(self.max_size)
][:self.max_size]
return super().to_dict(remove_metadata_info)
| null |
1,345 |
"""
@file
@brief Process project data, scaling keyframe X coordinates by the given factor
@author Jonathan Thomas <[email protected]>
@author FeRD (Frank Dana) <[email protected]>
@section LICENSE
Copyright (c) 2008-2020 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
from classes.logger import log
class KeyframeScaler:
"""This factory class produces scaler objects which, when called,
will apply the assigned scaling factor to the keyframe points
in a project data dictionary. Keyframe X coordinate values are
multiplied by the scaling factor, except X=1 (because the first
frame never changes)"""
def METHOD_NAME(self, value: float) -> int:
"""Scale value by some factor, except for 1 (leave that alone)"""
if value == 1.0:
return value
# Round to nearest INT
return round(value * self._scale_factor)
def _update_prop(self, prop: dict, scale_y = False):
"""To keep keyframes at the same time in video,
update frame numbers to the new framerate.
scale_y: if the y coordinate also represents a frame number,
this flag will scale both x and y.
"""
# Create a list of lists of keyframe points for this prop
if "red" in prop:
# It's a color, one list of points for each channel
keyframes = [prop[color].get("Points", []) for color in prop]
else:
# Not a color, just a single list of points
keyframes = [prop.get("Points", [])]
for k in keyframes:
if (scale_y):
# Y represents a frame number. Scale it too
log.debug("Updating x and y coordinates of time keyframes")
[point["co"].update({
"X": self.METHOD_NAME(point["co"].get("X", 0.0)),
"Y": self.METHOD_NAME(point["co"].get("Y", 0.0))
}) for point in k if "co" in point]
else:
# Scale the X coordinate (frame #) by the stored factor
[point["co"].update({
"X": self.METHOD_NAME(point["co"].get("X", 0.0)),
}) for point in k if "co" in point]
def _process_item(self, item: dict):
"""Process all the dict sub-members of the current dict"""
props = [ prop for prop in item
if isinstance(item[prop], dict)]
for prop_name in props:
self._update_prop(item[prop_name], scale_y=prop_name == "time")
def __call__(self, data: dict) -> dict:
"""Apply the stored scaling factor to a project data dict"""
# Look for keyframe objects in clips
for clip in data.get('clips', []):
self._process_item(clip)
# Also update any effects applied to the clip
for effect in clip.get("effects", []):
self._process_item(effect)
# Look for keyframe objects in project effects (transitions)
for effect in data.get('effects', []):
self._process_item(effect)
# return the scaled project data
return data
def __init__(self, factor: float):
"""Store the scale factor assigned to this instance"""
self._scale_factor = factor
| null |
1,346 |
from future.moves.urllib.parse import urlparse
from django.db import connection
from django.db.models import Sum
import requests
import logging
from django.apps import apps
from api.caching.utils import storage_usage_cache
from framework.postcommit_tasks.handlers import enqueue_postcommit_task
from api.caching import settings as cache_settings
from framework.celery_tasks import app
from website import settings
logger = logging.getLogger(__name__)
def METHOD_NAME():
# TODO: this should get the varnish servers from HAProxy or a setting
return settings.VARNISH_SERVERS
def get_bannable_urls(instance):
from osf.models import Comment
bannable_urls = []
parsed_absolute_url = {}
if not hasattr(instance, 'absolute_api_v2_url'):
logger.warning('Tried to ban {}:{} but it didn\'t have a absolute_api_v2_url method'.format(instance.__class__, instance))
return [], ''
for host in METHOD_NAME():
# add instance url
varnish_parsed_url = urlparse(host)
parsed_absolute_url = urlparse(instance.absolute_api_v2_url)
url_string = '{scheme}://{netloc}{path}.*'.format(
scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_absolute_url.path,
)
bannable_urls.append(url_string)
if isinstance(instance, Comment):
try:
parsed_target_url = urlparse(instance.target.referent.absolute_api_v2_url)
except AttributeError:
# some referents don't have an absolute_api_v2_url
# I'm looking at you NodeWikiPage
# Note: NodeWikiPage has been deprecated. Is this an issue with WikiPage/WikiVersion?
pass
else:
url_string = '{scheme}://{netloc}{path}.*'.format(
scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_target_url.path,
)
bannable_urls.append(url_string)
try:
parsed_root_target_url = urlparse(instance.root_target.referent.absolute_api_v2_url)
except AttributeError:
# some root_targets don't have an absolute_api_v2_url
pass
else:
url_string = '{scheme}://{netloc}{path}.*'.format(
scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_root_target_url.path,
)
bannable_urls.append(url_string)
return bannable_urls, parsed_absolute_url.hostname
@app.task(max_retries=5, default_retry_delay=60)
def ban_url(instance):
# TODO: Refactor; Pull url generation into postcommit_task handling so we only ban urls once per request
timeout = 0.3 # 300ms timeout for bans
if settings.ENABLE_VARNISH:
bannable_urls, hostname = get_bannable_urls(instance)
for url_to_ban in set(bannable_urls):
try:
response = requests.request(
'BAN', url_to_ban, timeout=timeout, headers=dict(
Host=hostname,
),
)
except Exception as ex:
logger.error('Banning {} failed: {}'.format(
url_to_ban,
ex.message,
))
else:
if not response.ok:
logger.error('Banning {} failed: {}'.format(
url_to_ban,
response.text,
))
else:
logger.info('Banning {} succeeded'.format(
url_to_ban,
))
@app.task(max_retries=5, default_retry_delay=10)
def update_storage_usage_cache(target_id, target_guid, per_page=500000):
if not settings.ENABLE_STORAGE_USAGE_CACHE:
return
sql = """
SELECT count(size), sum(size) from
(SELECT size FROM osf_basefileversionsthrough AS obfnv
LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id
LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id
LEFT JOIN django_content_type type on file.target_content_type_id = type.id
WHERE file.provider = 'osfstorage'
AND type.model = 'abstractnode'
AND file.deleted_on IS NULL
AND file.target_object_id=%s
ORDER BY version.id
LIMIT %s OFFSET %s) file_page
"""
count = per_page
offset = 0
storage_usage_total = 0
with connection.cursor() as cursor:
while count:
cursor.execute(sql, [target_id, per_page, offset])
result = cursor.fetchall()
storage_usage_total += int(result[0][1]) if result[0][1] else 0
count = int(result[0][0]) if result[0][0] else 0
offset += count
key = cache_settings.STORAGE_USAGE_KEY.format(target_id=target_guid)
storage_usage_cache.set(key, storage_usage_total, settings.STORAGE_USAGE_CACHE_TIMEOUT)
def update_storage_usage(target):
Preprint = apps.get_model('osf.preprint')
if settings.ENABLE_STORAGE_USAGE_CACHE and not isinstance(target, Preprint) and not target.is_quickfiles:
enqueue_postcommit_task(update_storage_usage_cache, (target.id, target._id,), {}, celery=True)
def update_storage_usage_with_size(payload):
BaseFileNode = apps.get_model('osf.basefilenode')
AbstractNode = apps.get_model('osf.abstractnode')
metadata = payload.get('metadata') or payload.get('destination')
if not metadata.get('nid'):
return
target_node = AbstractNode.load(metadata['nid'])
if target_node.is_quickfiles:
return
action = payload['action']
provider = metadata.get('provider', 'osfstorage')
target_file_id = metadata['path'].replace('/', '')
target_file_size = metadata.get('sizeInt', 0)
if target_node.storage_limit_status is settings.StorageLimits.NOT_CALCULATED:
return update_storage_usage(target_node)
current_usage = target_node.storage_usage
target_file = BaseFileNode.load(target_file_id)
if target_file and action in ['copy', 'delete', 'move']:
target_file_size = target_file.versions.aggregate(Sum('size'))['size__sum'] or target_file_size
if action in ['create', 'update', 'copy'] and provider == 'osfstorage':
current_usage += target_file_size
elif action == 'delete' and provider == 'osfstorage':
current_usage = max(current_usage - target_file_size, 0)
elif action in 'move':
source_node = AbstractNode.load(payload['source']['nid']) # Getting the 'from' node
source_provider = payload['source']['provider']
if target_node == source_node and source_provider == provider:
return # Its not going anywhere.
if source_provider == 'osfstorage' and not source_node.is_quickfiles:
if source_node.storage_limit_status is settings.StorageLimits.NOT_CALCULATED:
return update_storage_usage(source_node)
source_node_usage = source_node.storage_usage
source_node_usage = max(source_node_usage - target_file_size, 0)
key = cache_settings.STORAGE_USAGE_KEY.format(target_id=source_node._id)
storage_usage_cache.set(key, source_node_usage, settings.STORAGE_USAGE_CACHE_TIMEOUT)
current_usage += target_file_size
if provider != 'osfstorage':
return # We don't want to update the destination node if the provider isn't osfstorage
else:
return
key = cache_settings.STORAGE_USAGE_KEY.format(target_id=target_node._id)
storage_usage_cache.set(key, current_usage, settings.STORAGE_USAGE_CACHE_TIMEOUT)
| null |
1,347 |
# Copyright 2017-2021 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import json
import sys
from prettytable import prettytable
from src.api.dts import DTS
from src.model.dts_model import DtsEncoder
class DtsOperationsManager:
_DTS_TABLE_HEADERS = ['ID', 'Name', 'Status']
_PREF_DELIMITER = "="
def __init__(self):
pass
def create(self, url, name, schedulable, prefixes, preferences, json_out):
registry = DTS.create(url, name, schedulable, prefixes, self._convert_preferences_list_to_dict(preferences))
self._print_registry(registry, json_out)
def list(self, registry_id, json_out):
if registry_id:
registry = DTS.load(registry_id)
self._print_registry(registry, json_out)
else:
registries = DTS.load_all()
if json_out:
self._print_dts_json(registries)
elif registries:
self._print_registries_prettytable(registries)
else:
click.echo('No data transfer services are available.')
def delete(self, registry_id, json_out):
registry = DTS.delete(registry_id)
self._print_registry(registry, json_out)
def upsert_preferences(self, registry_id, preferences_list, json_out):
if not preferences_list:
click.echo('Preferences should not be empty!', err=True)
sys.exit(1)
updated_registry = DTS.update_preferences(registry_id, self._convert_preferences_list_to_dict(preferences_list))
self._print_registry(updated_registry, json_out)
def delete_preferences(self, registry_id, preferences_keys, json_out):
if not preferences_keys:
click.echo('Preferences keys to be removed should not be empty!', err=True)
sys.exit(1)
updated_registry = DTS.delete_preferences(registry_id, preferences_keys)
self._print_registry(updated_registry, json_out)
def _convert_preferences_list_to_dict(self, preferences_list):
preferences_dict = {}
for preference_entry in preferences_list:
preference_value_and_key = preference_entry.split(self._PREF_DELIMITER, 1)
if len(preference_value_and_key) != 2:
click.echo('Error [%s]: preference declaration should contain a delimiter!' % preference_entry, err=True)
sys.exit(1)
else:
preferences_dict[preference_value_and_key[0]] = preference_value_and_key[1]
return preferences_dict
def _print_registry(self, registry, json_out):
if json_out:
self._print_dts_json(registry)
else:
self._print_single_registry_pretty(registry)
def _print_dts_json(self, object):
click.echo(json.dumps(object, cls=DtsEncoder))
def _print_single_registry_pretty(self, registry):
registry_info_table = prettytable.PrettyTable()
registry_info_table.field_names = ['key', 'value']
registry_info_table.align = 'l'
registry_info_table.set_style(12)
registry_info_table.header = False
registry_info_table.add_row(['ID:', registry.id])
registry_info_table.add_row(['Name:', registry.name])
registry_info_table.add_row(['URL:', registry.url])
registry_info_table.add_row(['Created:', registry.created_date])
registry_info_table.add_row(['Schedulable:', registry.schedulable])
registry_info_table.add_row(['Status:', registry.status])
registry_info_table.add_row(['Last heartbeat:', registry.heartbeat or 'No heartbeat was received yet'])
click.echo(registry_info_table)
self._print_list_as_table('Prefixes', registry.prefixes)
self._print_list_as_table('Preferences', self.get_flat_preferences(registry))
def get_flat_preferences(self, registry):
flat_preferences = []
for preference, value in registry.preferences.items():
flat_preferences.append(preference + ': ' + value)
return flat_preferences
def _print_list_as_table(self, header_name, elements):
click.echo()
if elements:
self.METHOD_NAME('{}:'.format(header_name))
for prefix in elements:
click.echo(prefix)
else:
click.echo('No {} specified.'.format(header_name.lower()))
def METHOD_NAME(self, title, line=True):
click.echo(title)
if line:
for i in title:
click.echo('-', nl=False)
click.echo('')
def _print_registries_prettytable(self, registries):
table = self._init_table()
for registry in registries:
table.add_row(self._convert_registry_to_prettytable_row(registry))
click.echo(table)
def _init_table(self):
table = prettytable.PrettyTable()
table.field_names = self._DTS_TABLE_HEADERS
table.align = "l"
table.header = True
return table
def _convert_registry_to_prettytable_row(self, registry):
return [registry.id, registry.name, registry.status]
| null |
1,348 |
#!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
import traceback
import chart_metg
def get_machine_parameters(machine, processor_kind, resource):
if machine == 'cori':
assert processor_kind == 'cpu'
if resource == 'flops':
return {'cores': 32, 'peak_flops': 1.263719e+12, 'peak_bytes': None}
elif resource == 'bytes':
return {'cores': 32, 'peak_flops': None, 'peak_bytes': 7.902120e+10}
else:
assert False
elif machine == 'daint':
if processor_kind == 'cpu':
if resource == 'flops':
return {'cores': 12, 'peak_flops': 5.726025e+11, 'peak_bytes': None} # CPU
elif resource == 'bytes':
assert False
else:
assert False
elif processor_kind == 'gpu':
if resource == 'flops':
# return {'cores': 1, 'peak_flops': 4.351454e+12, 'peak_bytes': None} # GPU 1 rank
return {'cores': 1, 'peak_flops': 4.761341e+12, 'peak_bytes': None} # GPU 12 ranks
elif resource == 'bytes':
assert False
else:
assert False
else:
assert False
else:
assert False
def parse_filename(filename):
fields = os.path.splitext(os.path.basename(filename))[0].split('_')
graph_idx = fields.index('ngraphs')
type_idx = fields.index('type')
try:
radix_idx = fields.index('radix')
except ValueError:
radix_idx = None
try:
imbalance_idx = fields.index('imbalance')
except ValueError:
imbalance_idx = None
try:
comm_idx = fields.index('comm')
except ValueError:
comm_idx = None
try:
cores_per_rank_idx = fields.index('coresperrank')
except ValueError:
cores_per_rank_idx = None
node_idx = fields.index('nodes')
return {
'name': ' '.join(fields[:graph_idx]),
'processor_kind': 'gpu' if 'gpu' in fields[:graph_idx] else 'cpu',
'ngraphs': int(' '.join(fields[graph_idx+1:type_idx])),
'type': ' '.join(fields[type_idx+1:radix_idx or imbalance_idx or comm_idx or cores_per_rank_idx or node_idx]),
'radix': radix_idx and ' '.join(fields[radix_idx+1:imbalance_idx or comm_idx or cores_per_rank_idx or node_idx]),
'imbalance': imbalance_idx and ' '.join(fields[imbalance_idx+1:comm_idx or cores_per_rank_idx or node_idx]),
'comm': comm_idx and ' '.join(fields[comm_idx+1:cores_per_rank_idx or node_idx]),
'cores_per_rank': cores_per_rank_idx and ' '.join(fields[cores_per_rank_idx+1:node_idx]),
'nodes': int(fields[node_idx+1]),
}
class Parser:
def filter(self, row):
return True
def process(self, row, data):
raise Exception('process() must be customized by the subclass')
def METHOD_NAME(self):
raise Exception('error_value() must be customized by the subclass')
def complete(self):
raise Exception('complete() must be customized by the subclass')
def parse(self, machine, resource, threshold, summary, verbose):
has_exception = False
log_filenames = glob.glob('**/*.log', recursive=False)
for filename in log_filenames:
row = parse_filename(filename)
if not self.filter(row):
continue
params = get_machine_parameters(machine, row['processor_kind'], resource)
try:
data = chart_metg.analyze(filename, row['ngraphs'], row['nodes'], params['cores'], threshold, params['peak_flops'], params['peak_bytes'], summary=summary)
except Exception as e:
if verbose:
print('%s:' % filename, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
else:
print('%s: %s: %s' % (filename, type(e).__name__, e), file=sys.stderr)
data = (self.METHOD_NAME(),)
has_exception = True
self.process(row, *data)
self.complete()
if has_exception and not verbose:
print('Errors were encountered while parsing. Run with -v to see full error messages.', file=sys.stderr)
print(file=sys.stderr)
| null |
1,349 |
import IMP
import IMP.test
import IMP.core
import IMP.container
import IMP.algebra
import math
def METHOD_NAME(values, bin_low, bin_high, bin_size):
hist = [0] * (int)((bin_high - bin_low) / bin_size)
for v in values:
bin_index = (int)((v - bin_low) / bin_size)
hist[bin_index] += 1
return hist
class Tests(IMP.test.TestCase):
def setup(self, m):
# Make reference particles
ref = [IMP.Particle(m) for x in range(3)]
IMP.core.XYZ.setup_particle(ref[0], IMP.algebra.Vector3D(10,10,0))
IMP.core.XYZ.setup_particle(ref[1], IMP.algebra.Vector3D(10,14,0))
IMP.core.XYZ.setup_particle(ref[2], IMP.algebra.Vector3D(10,7,0))
# Make images
im = [IMP.Particle(m) for x in range(3)]
for i in im:
IMP.core.XYZ.setup_particle(i, IMP.algebra.Vector3D(0,0,0))
# Tell each image what its reference is
for i, r in zip(im, ref):
IMP.core.Reference.setup_particle(i, r)
# Create transformation
tr = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(
IMP.algebra.get_basis_vector_3d(2), math.pi/6.0),
IMP.algebra.Vector3D(8, 0, 0))
sm = IMP.core.TransformationSymmetry(tr)
# Create the constraint
lsc = IMP.container.ListSingletonContainer(m, im)
c = IMP.container.SingletonsConstraint(sm, None, lsc)
m.add_score_state(c)
return ref, im, sm
def test_rotation(self):
"""Test TransformationSymmetryMover rotation"""
m = IMP.Model()
ref, im, sm = self.setup(m)
m.update()
orig_im_coor = [IMP.core.XYZ(x).get_coordinates() for x in im]
mover = IMP.core.TransformationSymmetryMover(m, sm, ref[0], 0.,
math.pi)
angles = [[], []]
for iteration in range(50):
mover.propose()
m.update()
im_coor = [IMP.core.XYZ(x).get_coordinates() for x in im]
# im[0] should not move from original position
self.assertLess(IMP.algebra.get_distance(im_coor[0],
orig_im_coor[0]), 1e-6)
# distances between im[i] and im[j] should not change
for i in range(3):
for j in range(i, 3):
orig_d = IMP.algebra.get_distance(orig_im_coor[i],
orig_im_coor[j])
d = IMP.algebra.get_distance(im_coor[i], im_coor[j])
self.assertAlmostEqual(orig_d, d, delta=1e-6)
# im[1] and im[2] should fully explore circle around im[0]
for num, i in enumerate((1,2)):
angle = math.atan2(im_coor[i][0] - im_coor[0][0],
im_coor[i][1] - im_coor[0][1])
angles[num].append(angle)
mover.accept()
for i in range(2):
hist = METHOD_NAME(angles[i], -math.pi, math.pi, math.pi / 4.)
# Histogram should be flat, but we don't have enough samples
# to assert that
print(hist)
def test_translation(self):
"""Test TransformationSymmetryMover translation"""
m = IMP.Model()
ref, im, sm = self.setup(m)
m.update()
orig_im_coor = [IMP.core.XYZ(x).get_coordinates() for x in im]
mover = IMP.core.TransformationSymmetryMover(m, sm, ref[0], 5., 0.)
displacement = [[], [], []]
for iteration in range(50):
mover.propose()
m.update()
im_coor = [IMP.core.XYZ(x).get_coordinates() for x in im]
# vectors between im[i] and im[j] should not change
for i in range(3):
for j in range(i, 3):
orig_d = orig_im_coor[i] - orig_im_coor[j]
d = im_coor[i] - im_coor[j]
self.assertLess(IMP.algebra.get_distance(orig_d, d), 1e-5)
# each im should fully explore sphere around original position
for i in range(3):
displacement[i].append(IMP.algebra.get_distance(
im_coor[i], orig_im_coor[i]))
mover.reject()
for i in range(3):
hist = METHOD_NAME(displacement[i], 0, 5.0, 1.0)
def volume_for_bin(i):
high_val = i + 1.0
low_val = i
high_volume = 4./3. * math.pi * high_val * high_val * high_val
low_volume = 4./3. * math.pi * low_val * low_val * low_val
return high_volume - low_volume
# Account for increasing volume of sphere with radius
hist = [hist[i] / volume_for_bin(i) for i in range(len(hist))]
# Histogram should be flat, but we don't have enough samples
# to assert that
print(hist)
if __name__ == '__main__':
IMP.test.main()
| null |
1,350 |
import os
import unittest
from tempfile import NamedTemporaryFile
import numpy as np
from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, \
TimeVariable, Domain, Table
from Orange.data.io import TabReader, ExcelReader
from Orange.data.io_util import guess_data_type
from Orange.misc.collections import natural_sorted
class TestTableFilters(unittest.TestCase):
def METHOD_NAME(self):
# should be ContinuousVariable
valuemap, values, coltype = guess_data_type(list(range(1, 100)))
self.assertEqual(ContinuousVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal(np.array(list(range(1, 100))), values)
valuemap, values, coltype = guess_data_type([1, 2, 3, 1, 2, 3])
self.assertEqual(ContinuousVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal([1, 2, 3, 1, 2, 3], values)
valuemap, values, coltype = guess_data_type(
["1", "2", "3", "1", "2", "3"])
self.assertEqual(ContinuousVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal([1, 2, 3, 1, 2, 3], values)
def test_guess_data_type_discrete(self):
# should be DiscreteVariable
valuemap, values, coltype = guess_data_type([1, 2, 1, 2])
self.assertEqual(DiscreteVariable, coltype)
self.assertEqual([1, 2], valuemap)
np.testing.assert_array_equal([1, 2, 1, 2], values)
valuemap, values, coltype = guess_data_type(["1", "2", "1", "2", "a"])
self.assertEqual(DiscreteVariable, coltype)
self.assertEqual(["1", "2", "a"], valuemap)
np.testing.assert_array_equal(['1', '2', '1', '2', 'a'], values)
# just below the threshold for string variable
in_values = list(map(lambda x: str(x) + "a", range(24))) + ["a"] * 76
valuemap, values, coltype = guess_data_type(in_values)
self.assertEqual(DiscreteVariable, coltype)
self.assertEqual(natural_sorted(set(in_values)), valuemap)
np.testing.assert_array_equal(in_values, values)
def test_guess_data_type_string(self):
# should be StringVariable
# too many different values for discrete
in_values = list(map(lambda x: str(x) + "a", range(90)))
valuemap, values, coltype = guess_data_type(in_values)
self.assertEqual(StringVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal(in_values, values)
# more than len(values)**0.7
in_values = list(map(lambda x: str(x) + "a", range(25))) + ["a"] * 75
valuemap, values, coltype = guess_data_type(in_values)
self.assertEqual(StringVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal(in_values, values)
# more than 100 different values - exactly 101
# this is the case when len(values)**0.7 rule would vote for the
# DiscreteVariable
in_values = list(map(lambda x: str(x) + "a", range(100))) + ["a"] * 999
valuemap, values, coltype = guess_data_type(in_values)
self.assertEqual(StringVariable, coltype)
self.assertIsNone(valuemap)
np.testing.assert_array_equal(in_values, values)
def test_guess_data_type_time(self):
in_values = ["2019-10-10", "2019-10-10", "2019-10-10", "2019-10-01"]
valuemap, _, coltype = guess_data_type(in_values)
self.assertEqual(TimeVariable, coltype)
self.assertIsNone(valuemap)
in_values = ["2019-10-10T12:08:51", "2019-10-10T12:08:51",
"2019-10-10T12:08:51", "2019-10-01T12:08:51"]
valuemap, _, coltype = guess_data_type(in_values)
self.assertEqual(TimeVariable, coltype)
self.assertIsNone(valuemap)
in_values = ["2019-10-10 12:08:51", "2019-10-10 12:08:51",
"2019-10-10 12:08:51", "2019-10-01 12:08:51"]
valuemap, _, coltype = guess_data_type(in_values)
self.assertEqual(TimeVariable, coltype)
self.assertIsNone(valuemap)
in_values = ["2019-10-10 12:08", "2019-10-10 12:08",
"2019-10-10 12:08", "2019-10-01 12:08"]
valuemap, _, coltype = guess_data_type(in_values)
self.assertEqual(TimeVariable, coltype)
self.assertIsNone(valuemap)
def test_guess_data_type_values_order(self):
"""
Test if values are ordered naturally
"""
in_values = [
"something1", "something12", "something2", "something1",
"something20", "something1", "something2", "something12",
"something1", "something12"
]
res = ["something1", "something2", "something12", "something20"]
valuemap, _, coltype = guess_data_type(in_values)
self.assertEqual(DiscreteVariable, coltype)
self.assertListEqual(res, valuemap)
class TestWriters(unittest.TestCase):
def setUp(self):
self.domain = Domain([DiscreteVariable("a", values=tuple("xyz")),
ContinuousVariable("b", number_of_decimals=3)],
ContinuousVariable("c", number_of_decimals=0),
[StringVariable("d")])
self.data = Table.from_numpy(
self.domain,
np.array([[1, 0.5], [2, np.nan], [np.nan, 1.0625]]),
np.array([3, 1, 7]),
np.array(["foo bar baz".split()]).T
)
def test_write_tab(self):
with NamedTemporaryFile(suffix=".tab", delete=False) as f:
fname = f.name
try:
TabReader.write(fname, self.data)
with open(fname, encoding="utf-8") as f:
self.assertEqual(f.read().strip(), """
c\td\ta\tb
continuous\tstring\tx y z\tcontinuous
class\tmeta\t\t
3\tfoo\ty\t0.500
1\tbar\tz\t
7\tbaz\t\t1.06250""".strip())
finally:
os.remove(fname)
def test_roundtrip_xlsx(self):
with NamedTemporaryFile(suffix=".xlsx", delete=False) as f:
fname = f.name
try:
ExcelReader.write(fname, self.data)
data = ExcelReader(fname).read()
np.testing.assert_equal(data.X, self.data.X)
np.testing.assert_equal(data.Y, self.data.Y)
np.testing.assert_equal(data.metas, self.data.metas)
np.testing.assert_equal(data.domain, self.data.domain)
finally:
os.remove(fname)
if __name__ == "__main__":
unittest.main()
| null |
1,351 |
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.service.service import Service
from enum import IntEnum
import numpy as np
class VoxelUnits(IntEnum):
"""Enum with valid VoxelUnits
"""
nm = 1
um = 1000
mm = 1000000
cm = 10000000
nanometers = 1
micrometers = 1000
millimeters = 1000000
centimeters = 10000000
class MeshService(Service):
""" Partial implementation of intern.service.service.Service for the Meshing' services.
"""
def __init__(self):
"""Constructor
"""
Service.__init__(self)
def set_auth(self):
""" No auth for Meshing
"""
self._auth = None
def create(self, volume,
x_range, y_range, z_range, time_range=None,
id_list=[], voxel_unit=VoxelUnits.nm,
voxel_size=[4,4,40], simp_fact=0, max_simplification_error=60,
normals=False, **kwargs):
"""Generate a mesh of the specified IDs
Args:
volume ([array]): Numpy array volume.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
id_list (optional [list]): list of object ids to filter the volume by.
voxel_unit (optional VoxelUnit): voxel unit of measurement to derive conversion factor.
voxel_size (optional [list]): list in form [x,y,z] of voxel size. Defaults to 4x4x40nm
simp_fact (optional int): mesh simplification factor, reduces triangles by given factor
max_simplification_error (optional int): Max tolerable error in physical distance
normals (optional bool): if true will calculate normals
Returns:
(): Return type depends on volume service's implementation.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
from zmesh import Mesher
if np.unique(volume).shape[0] == 1:
raise ValueError("The volume provided only has one unique ID (0). ID 0 is considered background.")
conv_factor = self._get_conversion_factor(voxel_unit)
# Get voxel_sizes
x_voxel_size = float(voxel_size[0]) * conv_factor
y_voxel_size = float(voxel_size[1]) * conv_factor
z_voxel_size = float(voxel_size[2]) * conv_factor
# Mesh
mesher = Mesher((x_voxel_size,y_voxel_size,z_voxel_size))
mesher.mesh(volume)
# If the list is empty then just default to all ID's found in the volume
if (id_list == []):
id_list = mesher.ids()
# Run the mesher on all specified ID's
for oid in id_list:
mesh = mesher.get_mesh(
oid,
normals=normals,
simplification_factor=simp_fact,
max_simplification_error= max_simplification_error,
)
mesh.vertices += [x_range[0]*conv_factor, y_range[0]*conv_factor, z_range[0]*conv_factor]
return Mesh([volume, mesh])
def _get_conversion_factor(self, voxel_unit):
"""
Validate the voxel unit type and derive conversion factor from it if valid
Arguments:
voxel_unit (VoxelUnits): 'nanometers', 'millimeters', <etc>
Returns:
int: conversion factor to use by meshing service
Raises:
ValueError
"""
if not isinstance(voxel_unit, VoxelUnits):
raise ValueError("{} is not a valid voxel unit".format(voxel_unit))
else:
return voxel_unit.value
class Mesh:
def __init__(self, data):
"""Constructor.
Args:
data (tuple[raw_volume, mesh]): tuple containing the raw data and the mesh data
"""
self._raw_vol = data[0]
self._mesh = data[1]
def METHOD_NAME(self):
"""Convert mesh to precompute format for Neuroglancer visualization
Args:
mesh: mesh to convert.
Returns:
(): Returns mesh precompute format
"""
return self._mesh.to_precomputed()
def obj_mesh(self):
"""Convert mesh to obj
Args:
mesh: mesh to convert.
Returns:
(): Returns mesh obj format
"""
return self._mesh.to_obj()
| null |
1,352 |
"""
CoQA: A Conversational Question Answering Challenge
https://arxiv.org/pdf/1808.07042.pdf
CoQA is a large-scale dataset for building Conversational Question Answering
systems. The goal of the CoQA challenge is to measure the ability of machines to
understand a text passage and answer a series of interconnected questions that
appear in a conversation.
Homepage: https://stanfordnlp.github.io/coqa/
"""
import inspect
import transformers.data.metrics.squad_metrics as squad_metrics
import lm_eval.datasets.coqa.coqa
from lm_eval.base import Task, rf, mean
from itertools import zip_longest
_CITATION = """
@misc{reddy2018coqa,
title={CoQA: A Conversational Question Answering Challenge},
author={Siva Reddy and Danqi Chen and Christopher D. Manning},
year={2018},
eprint={1808.07042},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
class CoQA(Task):
VERSION = 1
DATASET_PATH = inspect.getfile(lm_eval.datasets.coqa.coqa)
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return self.dataset["train"]
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
pass
def doc_to_text(self, doc):
# Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1}
# and a question qi, the task is to predict the answer ai
doc_text = doc["story"] + "\n\n"
for (q, a) in zip_longest(
doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]
): # omit target answer ai
question = f"Q: {q}\n\n"
answer = f"A: {a}\n\n" if a is not None else "A:"
doc_text += question + answer
return doc_text
def METHOD_NAME(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["story"] + " " + "\n".join(doc["questions"]["input_text"])
@classmethod
def get_answers(cls, doc, turn_id):
# Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
answers = []
answer_forturn = doc["answers"]["input_text"][turn_id - 1]
answers.append(answer_forturn)
additional_answers = doc.get("additional_answers")
if additional_answers:
for key in additional_answers:
additional_answer_for_turn = additional_answers[key]["input_text"][
turn_id - 1
]
if additional_answer_for_turn.lower() not in map(str.lower, answers):
answers.append(additional_answer_for_turn)
return answers
@classmethod
def get_answer_choice(self, raw_text):
# Function maps answers to CoQA answer categories
# ~ 1/5 of the CoQA answers are Yes/No
# ~ 2/3 of the CoQA answers are span-based
# (answers overlap with the passage ignoring punctuation and case mismatch)
if raw_text == "unknown":
return "0"
if squad_metrics.normalize_answer(raw_text) == "yes":
return "1"
if squad_metrics.normalize_answer(raw_text) == "no":
return "2"
return "3" # Not a yes/no question
@staticmethod
def compute_scores(gold_list, pred):
# tests for exact match and on the normalised answer (compute_exact)
# test for overlap (compute_f1)
f1_sum = 0.0
em_sum = 0.0
if len(gold_list) > 1:
for i in range(len(gold_list)):
gold_answers = gold_list[0:i] + gold_list[i + 1 :]
# predictions compared against (n) golds and take maximum
em_sum += max(
squad_metrics.compute_exact(a, pred) for a in gold_answers
)
f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
else:
em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)
return {
"em": em_sum / max(1, len(gold_list)),
"f1": f1_sum / max(1, len(gold_list)),
}
def doc_to_target(self, doc, turnid=None):
# Default to prediction of last turn.
if turnid is None:
turnid = len(doc["questions"]["input_text"])
raw_text = doc["answers"]["input_text"][turnid - 1]
return " " + raw_text
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
cont_request = rf.greedy_until(ctx, {"until": ["\nQ:"]})
return cont_request
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
turn_id = len(doc["questions"]["input_text"])
gold_list = self.get_answers(doc, turn_id)
pred = results[0].strip().split("\n")[0]
scores = self.compute_scores(gold_list, pred)
return {
"f1": scores["f1"],
"em": scores["em"],
}
def higher_is_better(self):
return {
"f1": True,
"em": True,
}
def aggregation(self):
return {
"f1": mean,
"em": mean,
}
| null |
1,353 |
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import weakref
import nuke
import IECore
import IECoreNuke
class ObjectKnobTest( IECoreNuke.TestCase ) :
def testNameAndLabel( self ) :
n = nuke.createNode( "ieObject" )
k = n.knob( "object" )
self.assertEqual( k.name(), "object" )
self.assertEqual( k.label(), "Object" )
def testAccessors( self ) :
n = nuke.createNode( "ieObject" )
k = n.knob( "object" )
self.assertTrue( isinstance( k, IECoreNuke.ObjectKnob ) )
self.assertEqual( k.getValue(), None )
i = IECore.IntData( 10 )
k.setValue( i )
self.assertFalse( k.getValue().isSame( i ) )
self.assertEqual( k.getValue(), IECore.IntData( 10 ) )
i.value = 20
self.assertEqual( k.getValue(), IECore.IntData( 10 ) )
@unittest.skip("Foundry explained that Knob memory clean up is not a bug but a feature when calling scriptClear")
def METHOD_NAME( self ) :
n = nuke.createNode( "ieObject" )
k = n.knob( "object" )
nuke.scriptClear()
self.assertRaises( RuntimeError, k.name )
self.assertRaises( RuntimeError, k.label )
self.assertRaises( RuntimeError, k.setValue, None )
self.assertRaises( RuntimeError, k.getValue )
w = weakref.ref( k )
self.assertFalse( w() is None )
del k
self.assertFalse( w() is not None )
def testSetValueReturn( self ) :
n = nuke.createNode( "ieObject" )
k = n.knob( "object" )
self.assertEqual( k.setValue( None ), False )
self.assertEqual( k.setValue( IECore.IntData( 1 ) ), True )
self.assertEqual( k.setValue( IECore.IntData( 1 ) ), False )
self.assertEqual( k.setValue( IECore.IntData( 10 ) ), True )
self.assertEqual( k.setValue( None ), True )
def testCopyPaste( self ) :
n = nuke.createNode( "ieObject" )
self.assertEqual( nuke.selectedNodes(), [ n ] )
n.knob( "object" ).setValue( IECore.IntData( 10 ) )
nuke.nodeCopy( "test/IECoreNuke/objectKnob.nk" )
nuke.scriptClear()
n2 = nuke.nodePaste( "test/IECoreNuke/objectKnob.nk" )
self.assertEqual( n2.knob( "object" ).getValue(), IECore.IntData( 10 ) )
def testCopyPasteNoValue( self ) :
n = nuke.createNode( "ieObject" )
self.assertEqual( nuke.selectedNodes(), [ n ] )
nuke.nodeCopy( "test/IECoreNuke/objectKnob.nk" )
nuke.scriptClear()
n2 = nuke.nodePaste( "test/IECoreNuke/objectKnob.nk" )
self.assertEqual( n2.knob( "object" ).getValue(), None )
def testUndo( self ) :
# check our custom knob undoes in the same way as
# standard knobs
n = nuke.createNode( "ieObject" )
n2 = nuke.createNode( "Blur" )
self.assertEqual( n.knob( "object" ).getValue(), None )
self.assertEqual( n2.knob( "size" ).getValue(), 0 )
self.assertEqual( nuke.Undo.disabled(), True )
with IECoreNuke.UndoEnabled() :
self.assertEqual( nuke.Undo.disabled(), False )
with IECoreNuke.UndoBlock() :
n.knob( "object" ).setValue( IECore.IntData( 10 ) )
self.assertEqual( n.knob( "object" ).getValue(), IECore.IntData( 10 ) )
n2.knob( "size" ).setValue( 10 )
self.assertEqual( n2.knob( "size" ).getValue(), 10 )
self.assertEqual( nuke.Undo.disabled(), True )
self.assertEqual( n.knob( "object" ).getValue(), IECore.IntData( 10 ) )
self.assertEqual( n2.knob( "size" ).getValue(), 10 )
nuke.undo()
self.assertEqual( n2.knob( "size" ).getValue(), 0 )
self.assertEqual( n.knob( "object" ).getValue(), None )
def tearDown( self ) :
for f in [
"test/IECoreNuke/objectKnob.nk",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
| null |
1,354 |
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt
import sys
import time
from internal.config import Config, ConfigNotFoundError
from internal.synchronization.pipeline_server import PipelineServer
from exceptions import KeyboardInterrupt
def configure(argv):
opts, args = getopt.getopt(
argv,
"a:k:p:",
["api=", "key=", "proxy=", "email-attribute=", "name-attribute=", "ssh-pub-attribute=",
"ssh-prv-attribute=", "admins-group=", "git-group-prefix="]
)
api = None
key = None
proxy = None
email = None
name = None
ssh_pub = None
ssh_prv = None
admins_group = None
git_group_prefix = None
for opt, arg in opts:
if opt in ("-a", "--api"):
api = arg
elif opt in ("-k", "--key"):
key = arg
elif opt in ("-p", "--proxy"):
proxy = arg
elif opt == "--email-attribute":
email = arg
elif opt == "--name-attribute":
name = arg
elif opt == "--ssh-pub-attribute":
ssh_pub = arg
elif opt == "--ssh-prv-attribute":
ssh_prv = arg
elif opt == "--admins-group":
admins_group = arg
elif opt == "--git-group-prefix":
git_group_prefix = arg
Config.store(key, api, proxy, email, name, ssh_pub, ssh_prv, admins_group, git_group_prefix)
print 'syncgit configuration updated'
exit(0)
def METHOD_NAME():
print 'Use \'configure\' command to setup synchronization properties and settings:'
print 'python syncgit.py configure ' \
'--api=<api path> ' \
'--key=<api token> ' \
'--email-attribute=<attribute name for \'email\' field, case sensitive, default - \'Email\'> ' \
'--name-attribute=<attribute name for \'name\' field, case sensitive, default - \'Name\'> ' \
'--ssh-pub-attribute=<attribute name for \'ssh_pub\' field, case sensitive, default - \'ssh_pub\'> ' \
'--ssh-prv-attribute=<attribute name for \'ssh_prv\' field, case sensitive, default - \'ssh_prv\'> ' \
'--admins-group=<administrators group name, defualt - \'ROLE_ADMIN\'> ' \
'--git-group-prefix=<prefix for group names, default - \'PIPELINE-\'>'
print ''
print 'Use \'sync\' command to synchronize git users, groups and project members.'
print 'python syncgit.py sync'
print ''
print 'Use \'purge\' command to remove git users, groups and project members.'
print 'python syncgit.py purge'
print ''
def main(argv):
if len(argv) > 0:
command = argv[0]
if command == 'help' or command == '-h' or command == '--help':
METHOD_NAME()
elif command == 'configure':
configure(argv[1:])
elif command == 'sync':
try:
config = Config.instance()
if config.api is None:
print 'API path is not configured'
METHOD_NAME()
exit(1)
elif config.access_key is None:
print 'API token is not configured'
METHOD_NAME()
exit(1)
except ConfigNotFoundError as error:
print error.message
METHOD_NAME()
exit(1)
start = time.time()
pipeline_server = PipelineServer()
try:
pipeline_server.synchronize(map(lambda pipeline_id: int(pipeline_id), argv[1:]))
except KeyboardInterrupt:
exit(2)
print ''
print 'Synchronization time: {} seconds'.format(time.time() - start)
elif command == 'sync-users':
try:
config = Config.instance()
if config.api is None:
print 'API path is not configured'
METHOD_NAME()
exit(1)
elif config.access_key is None:
print 'API token is not configured'
METHOD_NAME()
exit(1)
except ConfigNotFoundError as error:
print error.message
METHOD_NAME()
exit(1)
start = time.time()
pipeline_server = PipelineServer()
try:
pipeline_server.synchronize_users(argv[1:])
except KeyboardInterrupt:
exit(2)
print ''
print 'Synchronization time: {} seconds'.format(time.time() - start)
elif command == 'purge':
try:
config = Config.instance()
if config.api is None:
print 'API path is not configured'
METHOD_NAME()
exit(1)
elif config.access_key is None:
print 'API token is not configured'
METHOD_NAME()
exit(1)
except ConfigNotFoundError as error:
print error.message
METHOD_NAME()
exit(1)
choice = ''
while choice not in ('y', 'n'):
sys.stdout.write('This command will remove all users (except root) and generated groups for git server.'
' Are you sure? y/n: ')
choice = raw_input().lower()
if choice == 'n':
sys.exit()
pipeline_server = PipelineServer()
for server in pipeline_server.get_distinct_git_servers():
server.clear_users_and_groups()
print ''
else:
print 'Unknown command {}'.format(command)
exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| null |
1,355 |
# -*- coding: utf-8 -*-
import mock
import pytest
from django.utils import timezone
from api.base.settings.defaults import API_BASE
from api.users.views import ClaimUser
from api_tests.utils import only_supports_methods
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
PreprintFactory,
)
@pytest.mark.django_db
class TestClaimUser:
@pytest.fixture
def mock_mail(self):
with mock.patch('website.project.views.contributor.mails.send_mail') as patch:
yield patch
@pytest.fixture()
def referrer(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, referrer):
return ProjectFactory(creator=referrer)
@pytest.fixture()
def preprint(self, referrer, project):
return PreprintFactory(creator=referrer, project=project)
@pytest.fixture()
def wrong_preprint(self, referrer):
return PreprintFactory(creator=referrer)
@pytest.fixture()
def unreg_user(self, referrer, project):
return project.add_unregistered_contributor(
'David Davidson',
'[email protected]',
auth=Auth(referrer),
save=True
)
@pytest.fixture()
def claimer(self):
return AuthUserFactory()
@pytest.fixture()
def url(self):
return '/{}users/{{}}/claim/'.format(API_BASE)
def payload(self, **kwargs):
payload = {
'data': {
'attributes': {}
}
}
_id = kwargs.pop('id', None)
if _id:
payload['data']['id'] = _id
if kwargs:
payload['data']['attributes'] = kwargs
return payload
def test_unacceptable_methods(self):
assert only_supports_methods(ClaimUser, ['POST'])
def METHOD_NAME(self, app, url, unreg_user, project, wrong_preprint):
_url = url.format(unreg_user._id)
# no record locator
payload = self.payload(email='[email protected]')
res = app.post_json_api(
_url,
payload,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Must specify record "id".'
# bad record locator
payload = self.payload(email='[email protected]', id='notaguid')
res = app.post_json_api(
_url,
payload,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified record.'
# wrong record locator
payload = self.payload(email='[email protected]', id=wrong_preprint._id)
res = app.post_json_api(
_url,
payload,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified record.'
# no email
payload = self.payload(id=project._id)
res = app.post_json_api(
_url,
payload,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Must either be logged in or specify claim email.'
# active user
_url = url.format(project.creator._id)
payload = self.payload(email=project.creator.email, id=project._id)
res = app.post_json_api(
_url,
payload,
expect_errors=True
)
assert res.status_code == 401
def test_claim_unauth_success_with_original_email(self, app, url, project, unreg_user, mock_mail):
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(email='[email protected]', id=project._id),
)
assert res.status_code == 204
assert mock_mail.call_count == 1
def test_claim_unauth_success_with_claimer_email(self, app, url, unreg_user, project, claimer, mock_mail):
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(email=claimer.username, id=project._id)
)
assert res.status_code == 204
assert mock_mail.call_count == 2
def test_claim_unauth_success_with_unknown_email(self, app, url, project, unreg_user, mock_mail):
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(email='[email protected]', id=project._id),
)
assert res.status_code == 204
assert mock_mail.call_count == 2
def test_claim_unauth_success_with_preprint_id(self, app, url, preprint, unreg_user, mock_mail):
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(email='[email protected]', id=preprint._id),
)
assert res.status_code == 204
assert mock_mail.call_count == 1
def test_claim_auth_failure(self, app, url, claimer, wrong_preprint, project, unreg_user, referrer):
_url = url.format(unreg_user._id)
# no record locator
payload = self.payload(email='[email protected]')
res = app.post_json_api(
_url,
payload,
auth=claimer.auth,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Must specify record "id".'
# bad record locator
payload = self.payload(email='[email protected]', id='notaguid')
res = app.post_json_api(
_url,
payload,
auth=claimer.auth,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified record.'
# wrong record locator
payload = self.payload(email='[email protected]', id=wrong_preprint._id)
res = app.post_json_api(
_url,
payload,
auth=claimer.auth,
expect_errors=True
)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified record.'
# referrer auth
payload = self.payload(email='[email protected]', id=project._id)
res = app.post_json_api(
_url,
payload,
auth=referrer.auth,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Referrer cannot claim user.'
# active user
_url = url.format(project.creator._id)
payload = self.payload(email=project.creator.email, id=project._id)
res = app.post_json_api(
_url,
payload,
auth=claimer.auth,
expect_errors=True
)
assert res.status_code == 403
def test_claim_auth_throttle_error(self, app, url, claimer, unreg_user, project, mock_mail):
unreg_user.unclaimed_records[project._id]['last_sent'] = timezone.now()
unreg_user.save()
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(id=project._id),
auth=claimer.auth,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'User account can only be claimed with an existing user once every 24 hours'
assert mock_mail.call_count == 0
def test_claim_auth_success(self, app, url, claimer, unreg_user, project, mock_mail):
res = app.post_json_api(
url.format(unreg_user._id),
self.payload(id=project._id),
auth=claimer.auth
)
assert res.status_code == 204
assert mock_mail.call_count == 2
| null |
1,356 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyVpnAttachmentAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyVpnAttachmentAttribute','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IkeConfig(self): # String
return self.get_query_params().get('IkeConfig')
def set_IkeConfig(self, IkeConfig): # String
self.add_query_param('IkeConfig', IkeConfig)
def get_AutoConfigRoute(self): # Boolean
return self.get_query_params().get('AutoConfigRoute')
def set_AutoConfigRoute(self, AutoConfigRoute): # Boolean
self.add_query_param('AutoConfigRoute', AutoConfigRoute)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_IpsecConfig(self): # String
return self.get_query_params().get('IpsecConfig')
def set_IpsecConfig(self, IpsecConfig): # String
self.add_query_param('IpsecConfig', IpsecConfig)
def get_BgpConfig(self): # String
return self.get_query_params().get('BgpConfig')
def set_BgpConfig(self, BgpConfig): # String
self.add_query_param('BgpConfig', BgpConfig)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_HealthCheckConfig(self): # String
return self.get_query_params().get('HealthCheckConfig')
def set_HealthCheckConfig(self, HealthCheckConfig): # String
self.add_query_param('HealthCheckConfig', HealthCheckConfig)
def get_CustomerGatewayId(self): # String
return self.get_query_params().get('CustomerGatewayId')
def set_CustomerGatewayId(self, CustomerGatewayId): # String
self.add_query_param('CustomerGatewayId', CustomerGatewayId)
def get_LocalSubnet(self): # String
return self.get_query_params().get('LocalSubnet')
def set_LocalSubnet(self, LocalSubnet): # String
self.add_query_param('LocalSubnet', LocalSubnet)
def get_RemoteCaCert(self): # String
return self.get_query_params().get('RemoteCaCert')
def set_RemoteCaCert(self, RemoteCaCert): # String
self.add_query_param('RemoteCaCert', RemoteCaCert)
def get_RemoteSubnet(self): # String
return self.get_query_params().get('RemoteSubnet')
def set_RemoteSubnet(self, RemoteSubnet): # String
self.add_query_param('RemoteSubnet', RemoteSubnet)
def get_EffectImmediately(self): # Boolean
return self.get_query_params().get('EffectImmediately')
def set_EffectImmediately(self, EffectImmediately): # Boolean
self.add_query_param('EffectImmediately', EffectImmediately)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_EnableDpd(self): # Boolean
return self.get_query_params().get('EnableDpd')
def set_EnableDpd(self, EnableDpd): # Boolean
self.add_query_param('EnableDpd', EnableDpd)
def get_VpnConnectionId(self): # String
return self.get_query_params().get('VpnConnectionId')
def set_VpnConnectionId(self, VpnConnectionId): # String
self.add_query_param('VpnConnectionId', VpnConnectionId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('EnableNatTraversal')
def set_EnableNatTraversal(self, EnableNatTraversal): # Boolean
self.add_query_param('EnableNatTraversal', EnableNatTraversal)
| null |
1,357 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateSnapshotGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateSnapshotGroup','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_InstantAccess(self): # Boolean
return self.get_query_params().get('InstantAccess')
def set_InstantAccess(self, InstantAccess): # Boolean
self.add_query_param('InstantAccess', InstantAccess)
def get_ExcludeDiskIds(self): # RepeatList
return self.get_query_params().get('ExcludeDiskId')
def METHOD_NAME(self, ExcludeDiskId): # RepeatList
for depth1 in range(len(ExcludeDiskId)):
self.add_query_param('ExcludeDiskId.' + str(depth1 + 1), ExcludeDiskId[depth1])
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_InstantAccessRetentionDays(self): # Integer
return self.get_query_params().get('InstantAccessRetentionDays')
def set_InstantAccessRetentionDays(self, InstantAccessRetentionDays): # Integer
self.add_query_param('InstantAccessRetentionDays', InstantAccessRetentionDays)
def get_StorageLocationArn(self): # String
return self.get_query_params().get('StorageLocationArn')
def set_StorageLocationArn(self, StorageLocationArn): # String
self.add_query_param('StorageLocationArn', StorageLocationArn)
def get_DiskIds(self): # RepeatList
return self.get_query_params().get('DiskId')
def set_DiskIds(self, DiskId): # RepeatList
for depth1 in range(len(DiskId)):
self.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
| null |
1,358 |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
HasBaleen: typing_extensions.TypeAlias = schemas.BoolSchema
HasTeeth: typing_extensions.TypeAlias = schemas.BoolSchema
class ClassNameEnums:
@schemas.classproperty
def METHOD_NAME(cls) -> typing.Literal["whale"]:
return ClassName.validate("whale")
@dataclasses.dataclass(frozen=True)
class ClassName(
schemas.Schema
):
types: typing.FrozenSet[typing.Type] = frozenset({
str,
})
enum_value_to_name: typing.Mapping[typing.Union[int, float, str, schemas.Bool, None], str] = dataclasses.field(
default_factory=lambda: {
"whale": "WHALE",
}
)
enums = ClassNameEnums
@typing.overload
@classmethod
def validate(
cls,
arg: typing.Literal["whale"],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["whale"]: ...
@typing.overload
@classmethod
def validate(
cls,
arg: str,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["whale",]: ...
@classmethod
def validate(
cls,
arg,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal[
"whale",
]:
validated_arg = super().validate_base(
arg,
configuration=configuration,
)
return typing.cast(typing.Literal[
"whale",
],
validated_arg
)
Properties = typing.TypedDict(
'Properties',
{
"hasBaleen": typing.Type[HasBaleen],
"hasTeeth": typing.Type[HasTeeth],
"className": typing.Type[ClassName],
}
)
class WhaleDict(schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES]):
__required_keys__: typing.FrozenSet[str] = frozenset({
"className",
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"hasBaleen",
"hasTeeth",
})
def __new__(
cls,
*,
className: typing.Literal[
"whale"
],
hasBaleen: typing.Union[
bool,
schemas.Unset
] = schemas.unset,
hasTeeth: typing.Union[
bool,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {
"className": className,
}
for key, val in (
("hasBaleen", hasBaleen),
("hasTeeth", hasTeeth),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
arg_.update(kwargs)
used_arg_ = typing.cast(WhaleDictInput, arg_)
return Whale.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
WhaleDictInput,
WhaleDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> WhaleDict:
return Whale.validate(arg, configuration=configuration)
@property
def className(self) -> typing.Literal["whale"]:
return typing.cast(
typing.Literal["whale"],
self.__getitem__("className")
)
@property
def hasBaleen(self) -> typing.Union[bool, schemas.Unset]:
val = self.get("hasBaleen", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
bool,
val
)
@property
def hasTeeth(self) -> typing.Union[bool, schemas.Unset]:
val = self.get("hasTeeth", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
bool,
val
)
def get_additional_property_(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)
return self.get(name, schemas.unset)
WhaleDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class Whale(
schemas.Schema[WhaleDict, tuple]
):
"""NOTE: This class is auto generated by OpenAPI JSON Schema Generator.
Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
Do not edit the class manually.
"""
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
required: typing.FrozenSet[str] = frozenset({
"className",
})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: WhaleDict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
WhaleDictInput,
WhaleDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> WhaleDict:
return super().validate_base(
arg,
configuration=configuration,
)
| null |
1,359 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class AddDnsGtmAccessStrategyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'AddDnsGtmAccessStrategy','alidns')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DefaultLbaStrategy(self): # String
return self.get_query_params().get('DefaultLbaStrategy')
def set_DefaultLbaStrategy(self, DefaultLbaStrategy): # String
self.add_query_param('DefaultLbaStrategy', DefaultLbaStrategy)
def get_FailoverAddrPoolType(self): # String
return self.get_query_params().get('FailoverAddrPoolType')
def set_FailoverAddrPoolType(self, FailoverAddrPoolType): # String
self.add_query_param('FailoverAddrPoolType', FailoverAddrPoolType)
def get_DefaultAddrPoolType(self): # String
return self.get_query_params().get('DefaultAddrPoolType')
def set_DefaultAddrPoolType(self, DefaultAddrPoolType): # String
self.add_query_param('DefaultAddrPoolType', DefaultAddrPoolType)
def get_FailoverMaxReturnAddrNum(self): # Integer
return self.get_query_params().get('FailoverMaxReturnAddrNum')
def set_FailoverMaxReturnAddrNum(self, FailoverMaxReturnAddrNum): # Integer
self.add_query_param('FailoverMaxReturnAddrNum', FailoverMaxReturnAddrNum)
def get_FailoverLbaStrategy(self): # String
return self.get_query_params().get('FailoverLbaStrategy')
def set_FailoverLbaStrategy(self, FailoverLbaStrategy): # String
self.add_query_param('FailoverLbaStrategy', FailoverLbaStrategy)
def get_DefaultAddrPools(self): # RepeatList
return self.get_query_params().get('DefaultAddrPool')
def set_DefaultAddrPools(self, DefaultAddrPool): # RepeatList
for depth1 in range(len(DefaultAddrPool)):
if DefaultAddrPool[depth1].get('Id') is not None:
self.add_query_param('DefaultAddrPool.' + str(depth1 + 1) + '.Id', DefaultAddrPool[depth1].get('Id'))
if DefaultAddrPool[depth1].get('LbaWeight') is not None:
self.add_query_param('DefaultAddrPool.' + str(depth1 + 1) + '.LbaWeight', DefaultAddrPool[depth1].get('LbaWeight'))
def get_FailoverMinAvailableAddrNum(self): # Integer
return self.get_query_params().get('FailoverMinAvailableAddrNum')
def set_FailoverMinAvailableAddrNum(self, FailoverMinAvailableAddrNum): # Integer
self.add_query_param('FailoverMinAvailableAddrNum', FailoverMinAvailableAddrNum)
def get_DefaultMaxReturnAddrNum(self): # Integer
return self.get_query_params().get('DefaultMaxReturnAddrNum')
def set_DefaultMaxReturnAddrNum(self, DefaultMaxReturnAddrNum): # Integer
self.add_query_param('DefaultMaxReturnAddrNum', DefaultMaxReturnAddrNum)
def get_DefaultMinAvailableAddrNum(self): # Integer
return self.get_query_params().get('DefaultMinAvailableAddrNum')
def set_DefaultMinAvailableAddrNum(self, DefaultMinAvailableAddrNum): # Integer
self.add_query_param('DefaultMinAvailableAddrNum', DefaultMinAvailableAddrNum)
def get_StrategyMode(self): # String
return self.get_query_params().get('StrategyMode')
def METHOD_NAME(self, StrategyMode): # String
self.add_query_param('StrategyMode', StrategyMode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Lines(self): # String
return self.get_query_params().get('Lines')
def set_Lines(self, Lines): # String
self.add_query_param('Lines', Lines)
def get_StrategyName(self): # String
return self.get_query_params().get('StrategyName')
def set_StrategyName(self, StrategyName): # String
self.add_query_param('StrategyName', StrategyName)
def get_DefaultLatencyOptimization(self): # String
return self.get_query_params().get('DefaultLatencyOptimization')
def set_DefaultLatencyOptimization(self, DefaultLatencyOptimization): # String
self.add_query_param('DefaultLatencyOptimization', DefaultLatencyOptimization)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_FailoverLatencyOptimization(self): # String
return self.get_query_params().get('FailoverLatencyOptimization')
def set_FailoverLatencyOptimization(self, FailoverLatencyOptimization): # String
self.add_query_param('FailoverLatencyOptimization', FailoverLatencyOptimization)
def get_FailoverAddrPools(self): # RepeatList
return self.get_query_params().get('FailoverAddrPool')
def set_FailoverAddrPools(self, FailoverAddrPool): # RepeatList
for depth1 in range(len(FailoverAddrPool)):
if FailoverAddrPool[depth1].get('Id') is not None:
self.add_query_param('FailoverAddrPool.' + str(depth1 + 1) + '.Id', FailoverAddrPool[depth1].get('Id'))
if FailoverAddrPool[depth1].get('LbaWeight') is not None:
self.add_query_param('FailoverAddrPool.' + str(depth1 + 1) + '.LbaWeight', FailoverAddrPool[depth1].get('LbaWeight'))
| null |
1,360 |
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
import scipy.sparse as sp
from Orange.data import Table
from Orange.preprocess import Randomize
class TestRandomizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(42)
cls.zoo = Table("zoo")
def test_randomize_default(self):
data = self.zoo
randomizer = Randomize()
data_rand = randomizer(data)
self.assertTrue((data.X == data_rand.X).all())
self.assertTrue((data.metas == data_rand.metas).all())
self.assertTrue((data.Y != data_rand.Y).any())
self.assertTrue((np.sort(data.Y, axis=0) == np.sort(
data_rand.Y, axis=0)).all())
def test_randomize_classes(self):
data = self.zoo
randomizer = Randomize(rand_type=Randomize.RandomizeClasses)
data_rand = randomizer(data)
self.assertTrue((data.X == data_rand.X).all())
self.assertTrue((data.metas == data_rand.metas).all())
self.assertTrue((data.Y != data_rand.Y).any())
self.assertTrue((np.sort(data.Y, axis=0) == np.sort(
data_rand.Y, axis=0)).all())
def test_randomize_attributes(self):
data = self.zoo
randomizer = Randomize(rand_type=Randomize.RandomizeAttributes)
data_rand = randomizer(data)
self.assertTrue((data.Y == data_rand.Y).all())
self.assertTrue((data.metas == data_rand.metas).all())
self.assertTrue((data.X != data_rand.X).any())
self.assertTrue((np.sort(data.X, axis=0) == np.sort(
data_rand.X, axis=0)).all())
def test_randomize_metas(self):
data = self.zoo
randomizer = Randomize(rand_type=Randomize.RandomizeMetas)
data_rand = randomizer(data)
self.assertTrue((data.X == data_rand.X).all())
self.assertTrue((data.Y == data_rand.Y).all())
self.assertTrue((data.metas != data_rand.metas).any())
self.assertTrue((np.sort(data.metas, axis=0) == np.sort(
data_rand.metas, axis=0)).all())
def test_randomize_all(self):
data = self.zoo
rand_type = Randomize.RandomizeClasses | Randomize.RandomizeAttributes \
| Randomize.RandomizeMetas
randomizer = Randomize(rand_type=rand_type)
data_rand = randomizer(data)
self.assertTrue((data.Y != data_rand.Y).any())
self.assertTrue((np.sort(data.Y, axis=0) == np.sort(
data_rand.Y, axis=0)).all())
self.assertTrue((data.X != data_rand.X).any())
self.assertTrue((np.sort(data.X, axis=0) == np.sort(
data_rand.X, axis=0)).all())
self.assertTrue((data.metas != data_rand.metas).any())
self.assertTrue((np.sort(data.metas, axis=0) == np.sort(
data_rand.metas, axis=0)).all())
def METHOD_NAME(self):
data_orig = self.zoo
data = Table("zoo")
_ = Randomize(rand_type=Randomize.RandomizeClasses)(data)
_ = Randomize(rand_type=Randomize.RandomizeAttributes)(data)
_ = Randomize(rand_type=Randomize.RandomizeMetas)(data)
self.assertTrue((data.X == data_orig.X).all())
self.assertTrue((data.metas == data_orig.metas).all())
self.assertTrue((data.Y == data_orig.Y).all())
def test_randomize_replicate(self):
randomizer1 = Randomize(rand_seed=1)
rand_data11 = randomizer1(self.zoo)
rand_data12 = randomizer1(self.zoo)
randomizer2 = Randomize(rand_seed=1)
rand_data2 = randomizer2(self.zoo)
np.testing.assert_array_equal(rand_data11.Y, rand_data12.Y)
np.testing.assert_array_equal(rand_data11.Y, rand_data2.Y)
def test_randomize(self):
x = np.arange(10000, dtype=int).reshape((100, 100))
randomized = Randomize().randomize(x.copy())
# Do not mix data between columns
np.testing.assert_equal(randomized % 100, x % 100)
# Do not shuffle entire rows:
# lexical sorting of rows should not equal the original table
randomized = np.array(sorted(list(map(list, randomized))), dtype=int)
self.assertFalse(np.all(randomized == x))
def test_randomize_sparse(self):
x = np.array([[0, 0, 3, 0],
[1, 0, 2, 0],
[4, 5, 6, 7]])
randomize = Randomize().randomize
randomized = randomize(sp.csr_matrix(x), rand_state=1)
randomized = randomized.toarray()
# Data is shuffled (rand_seed=1 should always shuffle it)
self.assertFalse(np.all(x == randomized))
# Data remains within a column
self.assertTrue(all(sorted(x[:, i]) == sorted(randomized[:, i])
for i in range(4)))
# Do not shuffle entire rows
randomized = np.array(sorted(list(map(list, randomized))), dtype=int)
self.assertFalse(np.all(randomized == x))
# Test that shuffle is not sparse structure dependent
x = np.array([[1, 2, 3, 4],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
randomized = randomize(sp.csr_matrix(x), rand_state=0x393f)
self.assertFalse(np.all(x == randomized.todense()))
# Do not just assign some indices. I.e. make sure that the shuffling is
# dependent in the input's non-zero indices.
r_once = randomize(sp.csr_matrix(x), rand_state=1)
r_twice = randomize(r_once.copy(), rand_state=1)
self.assertFalse(np.all(r_once.todense() == r_twice.todense()))
| null |
1,361 |
# /usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common objects shared by all _ps* modules."""
from __future__ import division
import errno
import functools
import os
import socket
import stat
import warnings
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import namedtuple
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
# --- constants
AF_INET6 = getattr(socket, 'AF_INET6', None)
AF_UNIX = getattr(socket, 'AF_UNIX', None)
STATUS_RUNNING = "running"
STATUS_SLEEPING = "sleeping"
STATUS_DISK_SLEEP = "disk-sleep"
STATUS_STOPPED = "stopped"
STATUS_TRACING_STOP = "tracing-stop"
STATUS_ZOMBIE = "zombie"
STATUS_DEAD = "dead"
STATUS_WAKE_KILL = "wake-kill"
STATUS_WAKING = "waking"
STATUS_IDLE = "idle" # BSD
STATUS_LOCKED = "locked" # BSD
STATUS_WAITING = "waiting" # BSD
CONN_ESTABLISHED = "ESTABLISHED"
CONN_SYN_SENT = "SYN_SENT"
CONN_SYN_RECV = "SYN_RECV"
CONN_FIN_WAIT1 = "FIN_WAIT1"
CONN_FIN_WAIT2 = "FIN_WAIT2"
CONN_TIME_WAIT = "TIME_WAIT"
CONN_CLOSE = "CLOSE"
CONN_CLOSE_WAIT = "CLOSE_WAIT"
CONN_LAST_ACK = "LAST_ACK"
CONN_LISTEN = "LISTEN"
CONN_CLOSING = "CLOSING"
CONN_NONE = "NONE"
# --- functions
def usage_percent(used, total, _round=None):
"""Calculate percentage usage of 'used' against 'total'."""
try:
ret = (used / total) * 100
except ZeroDivisionError:
ret = 0
if _round is not None:
return round(ret, _round)
else:
return ret
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
lock.acquire()
try:
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
finally:
lock.release()
return ret
def cache_clear():
"""Clear cache."""
lock.acquire()
try:
cache.clear()
finally:
lock.release()
lock = threading.RLock()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
# http://code.activestate.com/recipes/577819-deprecated-decorator/
def deprecated(replacement=None):
"""A decorator which can be used to mark functions as deprecated."""
def outer(fun):
msg = "psutil.%s is deprecated" % fun.__name__
if replacement is not None:
msg += "; use %s instead" % replacement
if fun.__doc__ is None:
fun.__doc__ = msg
@functools.wraps(fun)
def inner(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return inner
return outer
def deprecated_method(replacement):
"""A decorator which can be used to mark a method as deprecated
'replcement' is the method name which will be called instead.
"""
def outer(fun):
msg = "%s() is deprecated; use %s() instead" % (
fun.__name__, replacement)
if fun.__doc__ is None:
fun.__doc__ = msg
@functools.wraps(fun)
def inner(self, *args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return getattr(self, replacement)(*args, **kwargs)
return inner
return outer
def METHOD_NAME(path):
"""Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
st = os.stat(path)
except OSError as err:
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return stat.S_ISREG(st.st_mode)
# --- Process.connections() 'kind' parameter mapping
conn_tmap = {
"all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
"tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
"tcp4": ([AF_INET], [SOCK_STREAM]),
"udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
"udp4": ([AF_INET], [SOCK_DGRAM]),
"inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
"inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
"inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
}
if AF_INET6 is not None:
conn_tmap.update({
"tcp6": ([AF_INET6], [SOCK_STREAM]),
"udp6": ([AF_INET6], [SOCK_DGRAM]),
})
if AF_UNIX is not None:
conn_tmap.update({
"unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
})
del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
# --- namedtuples for psutil.* system-related functions
# psutil.swap_memory()
sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
'sout'])
# psutil.disk_usage()
sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
# psutil.disk_io_counters()
sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'read_time', 'write_time'])
# psutil.disk_partitions()
sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
# psutil.net_io_counters()
snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
'packets_sent', 'packets_recv',
'errin', 'errout',
'dropin', 'dropout'])
# psutil.users()
suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
# psutil.net_connections()
sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
'status', 'pid'])
# --- namedtuples for psutil.Process methods
# psutil.Process.memory_info()
pmem = namedtuple('pmem', ['rss', 'vms'])
# psutil.Process.cpu_times()
pcputimes = namedtuple('pcputimes', ['user', 'system'])
# psutil.Process.open_files()
popenfile = namedtuple('popenfile', ['path', 'fd'])
# psutil.Process.threads()
pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
# psutil.Process.uids()
puids = namedtuple('puids', ['real', 'effective', 'saved'])
# psutil.Process.gids()
pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
# psutil.Process.io_counters()
pio = namedtuple('pio', ['read_count', 'write_count',
'read_bytes', 'write_bytes'])
# psutil.Process.ionice()
pionice = namedtuple('pionice', ['ioclass', 'value'])
# psutil.Process.ctx_switches()
pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
# --- misc
# backward compatibility layer for Process.connections() ntuple
class pconn(
namedtuple('pconn',
['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
__slots__ = ()
@property
def local_address(self):
warnings.warn("'local_address' field is deprecated; use 'laddr'"
"instead", category=DeprecationWarning, stacklevel=2)
return self.laddr
@property
def remote_address(self):
warnings.warn("'remote_address' field is deprecated; use 'raddr'"
"instead", category=DeprecationWarning, stacklevel=2)
return self.raddr
| null |
1,362 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribePropertyScaDetailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribePropertyScaDetail')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SearchItemSub(self): # String
return self.get_query_params().get('SearchItemSub')
def set_SearchItemSub(self, SearchItemSub): # String
self.add_query_param('SearchItemSub', SearchItemSub)
def get_Remark(self): # String
return self.get_query_params().get('Remark')
def set_Remark(self, Remark): # String
self.add_query_param('Remark', Remark)
def get_Pid(self): # String
return self.get_query_params().get('Pid')
def set_Pid(self, Pid): # String
self.add_query_param('Pid', Pid)
def get_SearchItem(self): # String
return self.get_query_params().get('SearchItem')
def set_SearchItem(self, SearchItem): # String
self.add_query_param('SearchItem', SearchItem)
def get_Uuid(self): # String
return self.get_query_params().get('Uuid')
def set_Uuid(self, Uuid): # String
self.add_query_param('Uuid', Uuid)
def get_Biz(self): # String
return self.get_query_params().get('Biz')
def set_Biz(self, Biz): # String
self.add_query_param('Biz', Biz)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ProcessStartedStart(self): # Long
return self.get_query_params().get('ProcessStartedStart')
def set_ProcessStartedStart(self, ProcessStartedStart): # Long
self.add_query_param('ProcessStartedStart', ProcessStartedStart)
def get_ProcessStartedEnd(self): # Long
return self.get_query_params().get('ProcessStartedEnd')
def set_ProcessStartedEnd(self, ProcessStartedEnd): # Long
self.add_query_param('ProcessStartedEnd', ProcessStartedEnd)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_ScaVersion(self): # String
return self.get_query_params().get('ScaVersion')
def set_ScaVersion(self, ScaVersion): # String
self.add_query_param('ScaVersion', ScaVersion)
def get_SearchInfoSub(self): # String
return self.get_query_params().get('SearchInfoSub')
def set_SearchInfoSub(self, SearchInfoSub): # String
self.add_query_param('SearchInfoSub', SearchInfoSub)
def get_SearchInfo(self): # String
return self.get_query_params().get('SearchInfo')
def set_SearchInfo(self, SearchInfo): # String
self.add_query_param('SearchInfo', SearchInfo)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_BizType(self): # String
return self.get_query_params().get('BizType')
def set_BizType(self, BizType): # String
self.add_query_param('BizType', BizType)
def get_Port(self): # String
return self.get_query_params().get('Port')
def set_Port(self, Port): # String
self.add_query_param('Port', Port)
def get_Name(self): # Long
return self.get_query_params().get('Name')
def set_Name(self, Name): # Long
self.add_query_param('Name', Name)
def get_ScaName(self): # String
return self.get_query_params().get('ScaName')
def set_ScaName(self, ScaName): # String
self.add_query_param('ScaName', ScaName)
def METHOD_NAME(self): # String
return self.get_query_params().get('ScaNamePattern')
def set_ScaNamePattern(self, ScaNamePattern): # String
self.add_query_param('ScaNamePattern', ScaNamePattern)
def get_User(self): # String
return self.get_query_params().get('User')
def set_User(self, User): # String
self.add_query_param('User', User)
| null |
1,363 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Utility functions for networking."""
import socket
from typing import Optional, cast
from urllib.parse import urlparse
from zenml.environment import Environment
from zenml.logger import get_logger
logger = get_logger(__name__)
# default scanning port range for allocating ports
SCAN_PORT_RANGE = (8000, 65535)
def port_available(port: int, address: str = "127.0.0.1") -> bool:
"""Checks if a local port is available.
Args:
port: TCP port number
address: IP address on the local machine
Returns:
True if the port is available, otherwise False
"""
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
else:
# The SO_REUSEPORT socket option is not supported on Windows.
# This if clause exists just for mypy to not complain about
# missing code paths.
pass
s.bind((address, port))
except socket.error as e:
logger.debug("Port %d unavailable on %s: %s", port, address, e)
return False
return True
def find_available_port() -> int:
"""Finds a local random unoccupied TCP port.
Returns:
A random unoccupied TCP port.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
_, port = s.getsockname()
return cast(int, port)
def scan_for_available_port(
start: int = SCAN_PORT_RANGE[0], stop: int = SCAN_PORT_RANGE[1]
) -> Optional[int]:
"""Scan the local network for an available port in the given range.
Args:
start: the beginning of the port range value to scan
stop: the (inclusive) end of the port range value to scan
Returns:
The first available port in the given range, or None if no available
port is found.
"""
for port in range(start, stop + 1):
if port_available(port):
return port
logger.debug(
"No free TCP ports found in the range %d - %d",
start,
stop,
)
return None
def port_is_open(hostname: str, port: int) -> bool:
"""Check if a TCP port is open on a remote host.
Args:
hostname: hostname of the remote machine
port: TCP port number
Returns:
True if the port is open, False otherwise
"""
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
result = sock.connect_ex((hostname, port))
return result == 0
except socket.error as e:
logger.debug(
f"Error checking TCP port {port} on host {hostname}: {str(e)}"
)
return False
def METHOD_NAME(url: str) -> str:
"""Replaces the localhost with an internal Docker or K3D hostname in a given URL.
Localhost URLs that are directly accessible on the host machine are not
accessible from within a Docker or K3D container running on that same
machine, but there are special hostnames featured by both Docker
(`host.docker.internal`) and K3D (`host.k3d.internal`) that can be used to
access host services from within the containers.
Use this method to attempt to replace `localhost` in a URL with one of these
special hostnames, if they are available inside a container.
Args:
url: The URL to update.
Returns:
The updated URL.
"""
if not Environment.in_container():
return url
parsed_url = urlparse(url)
if parsed_url.hostname in ("localhost", "127.0.0.1"):
for internal_hostname in (
"host.docker.internal",
"host.k3d.internal",
):
try:
socket.gethostbyname(internal_hostname)
parsed_url = parsed_url._replace(
netloc=parsed_url.netloc.replace(
parsed_url.hostname,
internal_hostname,
)
)
logger.debug(
f"Replacing localhost with {internal_hostname} in URL: "
f"{url}"
)
return parsed_url.geturl()
except socket.gaierror:
continue
return url
def replace_internal_hostname_with_localhost(hostname: str) -> str:
"""Replaces an internal Docker or K3D hostname with localhost.
Localhost URLs that are directly accessible on the host machine are not
accessible from within a Docker or K3D container running on that same
machine, but there are special hostnames featured by both Docker
(`host.docker.internal`) and K3D (`host.k3d.internal`) that can be used to
access host services from within the containers.
Use this method to replace one of these special hostnames with localhost
if used outside a container or in a container where special hostnames are
not available.
Args:
hostname: The hostname to replace.
Returns:
The original or replaced hostname.
"""
if hostname not in ("host.docker.internal", "host.k3d.internal"):
return hostname
if Environment.in_container():
# Try to resolve one of the special hostnames to see if it is available
# inside the container and use that if it is.
for internal_hostname in (
"host.docker.internal",
"host.k3d.internal",
):
try:
socket.gethostbyname(internal_hostname)
if internal_hostname != hostname:
logger.debug(
f"Replacing internal hostname {hostname} with "
f"{internal_hostname}"
)
return internal_hostname
except socket.gaierror:
continue
logger.debug(f"Replacing internal hostname {hostname} with localhost.")
return "127.0.0.1"
def get_or_create_ngrok_tunnel(ngrok_token: str, port: int) -> str:
"""Get or create an ngrok tunnel at the given port.
Args:
ngrok_token: The ngrok auth token.
port: The port to tunnel.
Returns:
The public URL of the ngrok tunnel.
Raises:
ImportError: If the `pyngrok` package is not installed.
"""
try:
from pyngrok import ngrok as ngrok_client
except ImportError:
raise ImportError(
"The `pyngrok` package is required to create ngrok tunnels. "
"Please install it by running `pip install pyngrok`."
)
# Check if ngrok is already tunneling the port
tunnels = ngrok_client.get_tunnels()
for tunnel in tunnels:
if tunnel.config and isinstance(tunnel.config, dict):
tunnel_protocol = tunnel.config.get("proto")
tunnel_port = tunnel.config.get("addr")
if tunnel_protocol == "http" and tunnel_port == port:
return str(tunnel.public_url)
# Create new tunnel
ngrok_client.set_auth_token(ngrok_token)
return str(ngrok_client.connect(port).public_url)
| null |
1,364 |
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Vector
from ...io.imp.gltf2_io_user_extensions import import_user_extensions
from ...io.imp.gltf2_io_binary import BinaryData
from .gltf2_blender_animation_utils import make_fcurve
from .gltf2_blender_vnode import VNode
class BlenderNodeAnim():
"""Blender Object Animation."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def anim(gltf, anim_idx, node_idx):
"""Manage animation targeting a node's TRS."""
animation = gltf.data.animations[anim_idx]
node = gltf.data.nodes[node_idx]
if anim_idx not in node.animations.keys():
return
for channel_idx in node.animations[anim_idx]:
channel = animation.channels[channel_idx]
if channel.target.path not in ['translation', 'rotation', 'scale']:
continue
BlenderNodeAnim.do_channel(gltf, anim_idx, node_idx, channel)
@staticmethod
def do_channel(gltf, anim_idx, node_idx, channel):
animation = gltf.data.animations[anim_idx]
vnode = gltf.vnodes[node_idx]
path = channel.target.path
import_user_extensions('gather_import_animation_channel_before_hook', gltf, animation, vnode, path, channel)
action = BlenderNodeAnim.METHOD_NAME(gltf, node_idx, animation.track_name)
keys = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].input)
values = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].output)
if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE":
# TODO manage tangent?
values = values[1::3]
# Convert the curve from glTF to Blender.
if path == "translation":
blender_path = "location"
group_name = "Location"
num_components = 3
values = [gltf.loc_gltf_to_blender(vals) for vals in values]
values = vnode.base_locs_to_final_locs(values)
elif path == "rotation":
blender_path = "rotation_quaternion"
group_name = "Rotation"
num_components = 4
values = [gltf.quaternion_gltf_to_blender(vals) for vals in values]
values = vnode.base_rots_to_final_rots(values)
elif path == "scale":
blender_path = "scale"
group_name = "Scale"
num_components = 3
values = [gltf.scale_gltf_to_blender(vals) for vals in values]
values = vnode.base_scales_to_final_scales(values)
# Objects parented to a bone are translated to the bone tip by default.
# Correct for this by translating backwards from the tip to the root.
if vnode.type == VNode.Object and path == "translation":
if vnode.parent is not None and gltf.vnodes[vnode.parent].type == VNode.Bone:
bone_length = gltf.vnodes[vnode.parent].bone_length
off = Vector((0, -bone_length, 0))
values = [vals + off for vals in values]
if vnode.type == VNode.Bone:
# Need to animate the pose bone when the node is a bone.
group_name = vnode.blender_bone_name
blender_path = 'pose.bones["%s"].%s' % (
bpy.utils.escape_identifier(vnode.blender_bone_name),
blender_path
)
# We have the final TRS of the bone in values. We need to give
# the TRS of the pose bone though, which is relative to the edit
# bone.
#
# Final = EditBone * PoseBone
# where
# Final = Trans[ft] Rot[fr] Scale[fs]
# EditBone = Trans[et] Rot[er]
# PoseBone = Trans[pt] Rot[pr] Scale[ps]
#
# Solving for PoseBone gives
#
# pt = Rot[er^{-1}] (ft - et)
# pr = er^{-1} fr
# ps = fs
if path == 'translation':
edit_trans, edit_rot = vnode.editbone_trans, vnode.editbone_rot
edit_rot_inv = edit_rot.conjugated()
values = [
edit_rot_inv @ (trans - edit_trans)
for trans in values
]
elif path == 'rotation':
edit_rot = vnode.editbone_rot
edit_rot_inv = edit_rot.conjugated()
values = [
edit_rot_inv @ rot
for rot in values
]
elif path == 'scale':
pass # no change needed
# To ensure rotations always take the shortest path, we flip
# adjacent antipodal quaternions.
if path == 'rotation':
for i in range(1, len(values)):
if values[i].dot(values[i-1]) < 0:
values[i] = -values[i]
fps = bpy.context.scene.render.fps
coords = [0] * (2 * len(keys))
coords[::2] = (key[0] * fps for key in keys)
for i in range(0, num_components):
coords[1::2] = (vals[i] for vals in values)
make_fcurve(
action,
coords,
data_path=blender_path,
index=i,
group_name=group_name,
interpolation=animation.samplers[channel.sampler].interpolation,
)
import_user_extensions('gather_import_animation_channel_after_hook', gltf, animation, vnode, path, channel, action)
@staticmethod
def METHOD_NAME(gltf, node_idx, anim_name):
vnode = gltf.vnodes[node_idx]
if vnode.type == VNode.Bone:
# For bones, the action goes on the armature.
vnode = gltf.vnodes[vnode.bone_arma]
obj = vnode.blender_object
action = gltf.action_cache.get(obj.name)
if not action:
name = anim_name + "_" + obj.name
action = bpy.data.actions.new(name)
action.id_root = 'OBJECT'
gltf.needs_stash.append((obj, action))
gltf.action_cache[obj.name] = action
return action
| null |
1,365 |
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <[email protected]>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Manage the Glances web/url list (Ports plugin)."""
from glances.globals import urlparse
from glances.logger import logger
class GlancesWebList(object):
"""Manage the Web/Url list for the ports plugin."""
_section = "ports"
_default_refresh = 60
_default_timeout = 3
def __init__(self, config=None, args=None):
# web_list is a list of dict (JSON compliant)
# [ {'url': 'http://blog.nicolargo.com',
# 'refresh': 30,
# 'description': 'My blog',
# 'status': 404} ... ]
# Load the configuration file
self._web_list = self.load(config)
def load(self, config):
"""Load the web list from the configuration file."""
web_list = []
if config is None:
logger.debug("No configuration file available. Cannot load ports list.")
elif not config.has_section(self._section):
logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section)
else:
logger.debug("Start reading the [%s] section in the configuration file" % self._section)
refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh))
timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout))
# Read the web/url list
for i in range(1, 256):
new_web = {}
postfix = 'web_%s_' % str(i)
# Read mandatory configuration key: host
new_web['url'] = config.get_value(self._section, '%s%s' % (postfix, 'url'))
if new_web['url'] is None:
continue
url_parse = urlparse(new_web['url'])
if not bool(url_parse.scheme) or not bool(url_parse.netloc):
logger.error(
'Bad URL (%s) in the [%s] section of configuration file.' % (new_web['url'], self._section)
)
continue
# Read optionals configuration keys
# Default description is the URL without the http://
new_web['description'] = config.get_value(
self._section, '%sdescription' % postfix, default="%s" % url_parse.netloc
)
# Default status
new_web['status'] = None
new_web['elapsed'] = 0
# Refresh rate in second
new_web['refresh'] = refresh
# Timeout in second
new_web['timeout'] = int(config.get_value(self._section, '%stimeout' % postfix, default=timeout))
# RTT warning
new_web['rtt_warning'] = config.get_value(self._section, '%srtt_warning' % postfix, default=None)
if new_web['rtt_warning'] is not None:
# Convert to second
new_web['rtt_warning'] = int(new_web['rtt_warning']) / 1000.0
# Indice
new_web['indice'] = 'web_' + str(i)
# ssl_verify
new_web['ssl_verify'] = config.get_value(self._section, '%sssl_verify' % postfix, default=True)
# Proxy
http_proxy = config.get_value(self._section, '%shttp_proxy' % postfix, default=None)
https_proxy = config.get_value(self._section, '%shttps_proxy' % postfix, default=None)
if https_proxy is None and http_proxy is None:
new_web['proxies'] = None
else:
new_web['proxies'] = {'http': http_proxy, 'https': https_proxy}
# Add the server to the list
logger.debug("Add Web URL %s to the static list" % new_web['url'])
web_list.append(new_web)
# Ports list loaded
logger.debug("Web list loaded: %s" % web_list)
return web_list
def get_web_list(self):
"""Return the current server list (dict of dict)."""
return self._web_list
def METHOD_NAME(self, pos, key, value):
"""Set the key to the value for the pos (position in the list)."""
self._web_list[pos][key] = value
| null |
1,366 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SaveSingleTaskForCreatingOrderActivateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain-intl', '2017-12-18', 'SaveSingleTaskForCreatingOrderActivate','domain')
def get_Country(self):
return self.get_query_params().get('Country')
def set_Country(self,Country):
self.add_query_param('Country',Country)
def get_SubscriptionDuration(self):
return self.get_query_params().get('SubscriptionDuration')
def set_SubscriptionDuration(self,SubscriptionDuration):
self.add_query_param('SubscriptionDuration',SubscriptionDuration)
def get_PermitPremiumActivation(self):
return self.get_query_params().get('PermitPremiumActivation')
def set_PermitPremiumActivation(self,PermitPremiumActivation):
self.add_query_param('PermitPremiumActivation',PermitPremiumActivation)
def get_City(self):
return self.get_query_params().get('City')
def set_City(self,City):
self.add_query_param('City',City)
def get_Dns2(self):
return self.get_query_params().get('Dns2')
def set_Dns2(self,Dns2):
self.add_query_param('Dns2',Dns2)
def get_Dns1(self):
return self.get_query_params().get('Dns1')
def set_Dns1(self,Dns1):
self.add_query_param('Dns1',Dns1)
def get_RegistrantProfileId(self):
return self.get_query_params().get('RegistrantProfileId')
def set_RegistrantProfileId(self,RegistrantProfileId):
self.add_query_param('RegistrantProfileId',RegistrantProfileId)
def get_CouponNo(self):
return self.get_query_params().get('CouponNo')
def set_CouponNo(self,CouponNo):
self.add_query_param('CouponNo',CouponNo)
def get_AliyunDns(self):
return self.get_query_params().get('AliyunDns')
def set_AliyunDns(self,AliyunDns):
self.add_query_param('AliyunDns',AliyunDns)
def get_TelExt(self):
return self.get_query_params().get('TelExt')
def set_TelExt(self,TelExt):
self.add_query_param('TelExt',TelExt)
def get_Province(self):
return self.get_query_params().get('Province')
def set_Province(self,Province):
self.add_query_param('Province',Province)
def get_PostalCode(self):
return self.get_query_params().get('PostalCode')
def set_PostalCode(self,PostalCode):
self.add_query_param('PostalCode',PostalCode)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_Email(self):
return self.get_query_params().get('Email')
def set_Email(self,Email):
self.add_query_param('Email',Email)
def get_Address(self):
return self.get_query_params().get('Address')
def set_Address(self,Address):
self.add_query_param('Address',Address)
def get_TelArea(self):
return self.get_query_params().get('TelArea')
def set_TelArea(self,TelArea):
self.add_query_param('TelArea',TelArea)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_RegistrantType(self):
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self,RegistrantType):
self.add_query_param('RegistrantType',RegistrantType)
def get_Telephone(self):
return self.get_query_params().get('Telephone')
def set_Telephone(self,Telephone):
self.add_query_param('Telephone',Telephone)
def get_TrademarkDomainActivation(self):
return self.get_query_params().get('TrademarkDomainActivation')
def set_TrademarkDomainActivation(self,TrademarkDomainActivation):
self.add_query_param('TrademarkDomainActivation',TrademarkDomainActivation)
def get_UseCoupon(self):
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self,UseCoupon):
self.add_query_param('UseCoupon',UseCoupon)
def get_RegistrantOrganization(self):
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self,RegistrantOrganization):
self.add_query_param('RegistrantOrganization',RegistrantOrganization)
def get_PromotionNo(self):
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self,PromotionNo):
self.add_query_param('PromotionNo',PromotionNo)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_EnableDomainProxy(self):
return self.get_query_params().get('EnableDomainProxy')
def set_EnableDomainProxy(self,EnableDomainProxy):
self.add_query_param('EnableDomainProxy',EnableDomainProxy)
def METHOD_NAME(self):
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self,RegistrantName):
self.add_query_param('RegistrantName',RegistrantName)
def get_UsePromotion(self):
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self,UsePromotion):
self.add_query_param('UsePromotion',UsePromotion
| null |
1,367 |
## \example kernel/dependency_graph.py
# When trying to understand what is going on in \imp, it can often be useful
# to view the dependency graph, that is, the graph showing how various
# entities relate to one another. In it, an arrow leads from an IMP::Container
# or IMP::Particle to an IMP::Restraint if the IMP::Restraint reads from that
# container or particle. Similarly, an arrow leads from an IMP::Container or
# IMP::Particle to an IMP::ScoreState if the score state reads from it, and
# an arrow leads from an IMP::ScoreState to an IMP::Container or IMP::Particle
# if the score state updates the particle.
#
# The resulting pruned dependency graph is:
# \dotgraph{ \dot
# digraph pruned_dependencies {
# 0[label="Nup85-12"];
# 1[label="Fragment [213-218)"];
# 2[label="Nup145C-4"];
# 3[label="Atom k distance restraint 2"];
# 4[label="Nup1202-8"];
# 5[label="Model 0"];
# 6[label="Fragment [239-244)"];
# 7[label="Nup1332-10"];
# 8[label="Nup1201-12"];
# 9[label="Connectivity particles"];
# 10[label="Nup84-14"];
# 11[label="Atom k distance restraint 3"];
# 12[label="MovedSingletonContainer2"];
# 13[label="Nup1333-16"];
# 14[label="Atom k distance restraint 5"];
# 15[label="Atom k distance restraint 4"];
# 16[label="normalize rigid bodies"];
# 17[label="Atom k distance restraint 0"];
# 18[label="ListSingletonContainer2"];
# 19[label="Chain A rigid body"];
# 20[label="ConnectivityRestraint 0"];
# 21[label="MovedSingletonContainer1"];
# 22[label="ListSingletonContainer1"];
# 23[label="HarmonicUpperBoundSphereDistancePairScore2 and ConnectingPairContainer"];
# 24[label="HarmonicUpperBoundSphereDistancePairScore1 and ConnectingPairContainer"];
# 25[label="Sec13"];
# 26[label="Hierarchy EV"];
# 27[label="Chain A rigid body rigid body positions"];
# 28[label="rigid bodies list"];
# 29[label="Atom k distance restraint 1"];
# 30[label="HarmonicUpperBoundSphereDistancePairScore0 and ConnectingPairContainer"];
# 31[label="ConnectingPairContainer"];
# 32[label="ConnectingPairContainer"];
# 33[label="Chain A rigid body"];
# 34[label="ListSingletonContainer0"];
# 35[label="ConnectivityRestraint 1"];
# 36[label="MovedSingletonContainer0"];
# 37[label="Chain A rigid body rigid body positions"];
# 38[label="ConnectingPairContainer"];
# 39[label="Seh1"];
# 40[label="Hierarchy EV particles"];
# 16->19 ;
# 16->33 ;
# 27->6 ;
# 27->39 ;
# 37->25 ;
# 37->1 ;
# 4->3 ;
# 2->3 ;
# 11->5 ;
# 17->5 ;
# 23->5 ;
# 14->5 ;
# 35->5 ;
# 15->5 ;
# 30->5 ;
# 24->5 ;
# 29->5 ;
# 26->5 ;
# 3->5 ;
# 20->5 ;
# 13->11 ;
# 10->11 ;
# 2->12 ;
# 18->12 ;
# 25->14 ;
# 2->14 ;
# 1->14 ;
# 33->14 ;
# 0->15 ;
# 6->15 ;
# 19->15 ;
# 39->15 ;
# 28->16 ;
# 8->17 ;
# 4->17 ;
# 13->20 ;
# 7->20 ;
# 22->21 ;
# 10->21 ;
# 2->23 ;
# 38->23 ;
# 31->24 ;
# 10->24 ;
# 0->26 ;
# 13->26 ;
# 8->26 ;
# 7->26 ;
# 4->26 ;
# 2->26 ;
# 6->26 ;
# 19->26 ;
# 10->26 ;
# 40->26 ;
# 1->26 ;
# 33->26 ;
# 19->27 ;
# 0->29 ;
# 2->29 ;
# 0->30 ;
# 32->30 ;
# 21->31 ;
# 22->31 ;
# 10->31 ;
# 0->32 ;
# 36->32 ;
# 34->32 ;
# 25->35 ;
# 2->35 ;
# 10->35 ;
# 1->35 ;
# 33->35 ;
# 0->36 ;
# 34->36 ;
# 33->37 ;
# 12->38 ;
# 2->38 ;
# 18->38 ;
# }
# \enddot
# }
import IMP.atom
import IMP.container
import sys
IMP.setup_from_argv(sys.argv, "Example of dependency graphs")
def create_representation():
m = IMP.Model()
all = IMP.atom.Hierarchy.setup_particle(IMP.Particle(m))
all.set_name("the universe")
rs = []
def METHOD_NAME(name, ds):
h = IMP.atom.METHOD_NAME(m, name, 10, ds)
# leaves = IMP.atom.get_leaves(h)
all.add_child(h)
r = IMP.atom.create_connectivity_restraint(
[IMP.atom.Selection(c) for c in h.get_children()], 1)
if r:
rs.append(r)
def create_protein_from_pdbs(name, files):
def create_from_pdb(file):
with IMP.SetLogState(IMP.NONE):
t = IMP.atom.read_pdb(
IMP.get_example_path("data/" + file), m,
IMP.atom.ATOMPDBSelector())
# IMP.atom.show_molecular_hierarchy(t)
c = IMP.atom.Chain(IMP.atom.get_by_type(t, IMP.atom.CHAIN_TYPE)[0])
if c.get_number_of_children() == 0:
IMP.atom.show_molecular_hierarchy(t)
# there is no reason to use all atoms, just approximate the pdb
# shape instead
s = IMP.atom.create_simplified_along_backbone(c,
10.0 / 2.0)
IMP.atom.destroy(t)
# make the simplified structure rigid
rb = IMP.atom.create_rigid_body(s)
rb.set_coordinates_are_optimized(True)
return s
if len(files) > 1:
p = IMP.Particle(m)
h = IMP.atom.Hierarchy.setup_particle(p)
h.set_name(name)
for i, f in enumerate(files):
c = create_from_pdb(f)
h.add_child(c)
c.set_name(name + " chain " + str(i))
r = IMP.atom.create_connectivity_restraint(
[IMP.atom.Selection(c) for c in h.get_children()], 1)
if r:
rs.append(r)
else:
h = create_from_pdb(files[0])
h.set_name(name)
all.add_child(h)
METHOD_NAME("Nup85", 570)
METHOD_NAME("Nup84", 460)
METHOD_NAME("Nup145C", 442)
METHOD_NAME("Nup120", [0, 500, 761])
METHOD_NAME("Nup133", [0, 450, 778, 1160])
create_protein_from_pdbs("Seh1", ["seh1.pdb"])
create_protein_from_pdbs("Sec13", ["sec13.pdb"])
return (m, rs, all)
def create_restraints(m, rs, all):
def add_connectivity_restraint(s):
r = IMP.atom.create_connectivity_restraint(s, 1)
rs.append(r)
def add_distance_restraint(s0, s1):
r = IMP.atom.create_distance_restraint(s0, s1, 0, 1)
rs.append(r)
evr = IMP.atom.create_excluded_volume_restraint([all])
rs.append(evr)
s0 = IMP.atom.Selection(hierarchy=all, molecule="Nup145C",
residue_indexes=range(0, 423))
s1 = IMP.atom.Selection(hierarchy=all, molecule="Nup84")
s2 = IMP.atom.Selection(hierarchy=all, molecule="Sec13")
add_connectivity_restraint([s0, s1, s2])
add_distance_restraint(
IMP.atom.Selection(hierarchy=all, molecule="Nup145C",
residue_indexes=range(0, 423)),
IMP.atom.Selection(hierarchy=all, molecule="Nup85"))
add_distance_restraint(
IMP.atom.Selection(hierarchy=all, molecule="Nup145C",
residue_indexes=range(0, 423)),
IMP.atom.Selection(hierarchy=all, molecule="Nup120",
residue_indexes=range(500, 762)))
add_distance_restraint(IMP.atom.Selection(hierarchy=all, molecule="Nup84"),
IMP.atom.Selection(
hierarchy=all, molecule="Nup133",
residue_indexes=range(778, 1160)))
add_distance_restraint(IMP.atom.Selection(hierarchy=all, molecule="Nup85"),
IMP.atom.Selection(hierarchy=all, molecule="Seh1"))
add_distance_restraint(
IMP.atom.Selection(hierarchy=all, molecule="Nup145C",
residue_indexes=range(0, 423)),
IMP.atom.Selection(hierarchy=all, molecule="Sec13"))
# now do the actual work
(m, rs, all) = create_representation()
create_restraints(m, rs, all)
sf = IMP.core.RestraintsScoringFunction(rs)
# we can get the full dependency graph for the whole model with all
# the restraints but it is pretty complex
dg = IMP.get_dependency_graph(m)
IMP.show_graphviz(dg)
# better thing to do is to get the "pruned" graph
pdg = IMP.get_pruned_dependency_graph(m)
IMP.show_graphviz(pdg)
| null |
1,368 |
# Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .abstract_classic_cache_hierarchy import AbstractClassicCacheHierarchy
from ..abstract_cache_hierarchy import AbstractCacheHierarchy
from ...boards.abstract_board import AbstractBoard
from ....isas import ISA
from m5.objects import Bridge, BaseXBar, SystemXBar, BadAddr, Port
from ....utils.override import *
class NoCache(AbstractClassicCacheHierarchy):
"""
No cache hierarchy. The CPUs are connected straight to the memory bus.
By default a SystemXBar of width 64bit is used, though this can be
configured via the constructor.
NOTE: At present this does not work with FS. The following error is
received:
```
...
build/X86/mem/snoop_filter.cc:277: panic: panic condition
(sf_item.requested & req_mask).none() occurred: SF value
0000000000000000000000000000000000000000000000000000000000000000 ...
missing the original request
Memory Usage: 3554472 KBytes
Program aborted at tick 1668400099164
--- BEGIN LIBC BACKTRACE ---
...
```
"""
@staticmethod
def _get_default_membus() -> SystemXBar:
"""
A method used to obtain the default memory bus of 64 bit in width for
the NoCache CacheHierarchy.
:returns: The default memory bus for the NoCache CacheHierarchy.
:rtype: SystemXBar
"""
membus = SystemXBar(width=64)
membus.badaddr_responder = BadAddr()
membus.default = membus.badaddr_responder.pio
# the max. routing table size needs to be set
# to a higher value for HBM2 stack
membus.max_routing_table_size = 2048
return membus
def __init__(
self, membus: BaseXBar = _get_default_membus.__func__()
) -> None:
"""
:param membus: The memory bus for this setup. This parameter is
optional and will default toa 64 bit width SystemXBar is not specified.
:type membus: BaseXBar
"""
super().__init__()
self.membus = membus
@overrides(AbstractClassicCacheHierarchy)
def get_mem_side_port(self) -> Port:
return self.membus.mem_side_ports
@overrides(AbstractClassicCacheHierarchy)
def METHOD_NAME(self) -> Port:
return self.membus.cpu_side_ports
@overrides(AbstractCacheHierarchy)
def incorporate_cache(self, board: AbstractBoard) -> None:
if board.has_coherent_io():
self._setup_coherent_io_bridge(board)
for core in board.get_processor().get_cores():
core.connect_icache(self.membus.cpu_side_ports)
core.connect_dcache(self.membus.cpu_side_ports)
core.connect_walker_ports(
self.membus.cpu_side_ports, self.membus.cpu_side_ports
)
if board.get_processor().get_isa() == ISA.X86:
int_req_port = self.membus.mem_side_ports
int_resp_port = self.membus.cpu_side_ports
core.connect_interrupt(int_req_port, int_resp_port)
else:
core.connect_interrupt()
# Set up the system port for functional access from the simulator.
board.connect_system_port(self.membus.cpu_side_ports)
for _, port in board.get_memory().get_mem_ports():
self.membus.mem_side_ports = port
def _setup_coherent_io_bridge(self, board: AbstractBoard) -> None:
"""Create a bridge from I/O back to membus"""
self.iobridge = Bridge(delay="10ns", ranges=board.mem_ranges)
self.iobridge.mem_side_port = self.membus.cpu_side_ports
self.iobridge.cpu_side_port = board.get_mem_side_coherent_io_port()
| null |
1,369 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class VerifyContactFieldRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'VerifyContactField')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Country(self): # String
return self.get_query_params().get('Country')
def set_Country(self, Country): # String
self.add_query_param('Country', Country)
def get_City(self): # String
return self.get_query_params().get('City')
def set_City(self, City): # String
self.add_query_param('City', City)
def get_ZhCity(self): # String
return self.get_query_params().get('ZhCity')
def set_ZhCity(self, ZhCity): # String
self.add_query_param('ZhCity', ZhCity)
def get_TelExt(self): # String
return self.get_query_params().get('TelExt')
def set_TelExt(self, TelExt): # String
self.add_query_param('TelExt', TelExt)
def METHOD_NAME(self): # String
return self.get_query_params().get('Province')
def set_Province(self, Province): # String
self.add_query_param('Province', Province)
def get_ZhRegistrantName(self): # String
return self.get_query_params().get('ZhRegistrantName')
def set_ZhRegistrantName(self, ZhRegistrantName): # String
self.add_query_param('ZhRegistrantName', ZhRegistrantName)
def get_PostalCode(self): # String
return self.get_query_params().get('PostalCode')
def set_PostalCode(self, PostalCode): # String
self.add_query_param('PostalCode', PostalCode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Email(self): # String
return self.get_query_params().get('Email')
def set_Email(self, Email): # String
self.add_query_param('Email', Email)
def get_ZhRegistrantOrganization(self): # String
return self.get_query_params().get('ZhRegistrantOrganization')
def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String
self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization)
def get_Address(self): # String
return self.get_query_params().get('Address')
def set_Address(self, Address): # String
self.add_query_param('Address', Address)
def get_TelArea(self): # String
return self.get_query_params().get('TelArea')
def set_TelArea(self, TelArea): # String
self.add_query_param('TelArea', TelArea)
def get_ZhAddress(self): # String
return self.get_query_params().get('ZhAddress')
def set_ZhAddress(self, ZhAddress): # String
self.add_query_param('ZhAddress', ZhAddress)
def get_RegistrantType(self): # String
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self, RegistrantType): # String
self.add_query_param('RegistrantType', RegistrantType)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_Telephone(self): # String
return self.get_query_params().get('Telephone')
def set_Telephone(self, Telephone): # String
self.add_query_param('Telephone', Telephone)
def get_ZhProvince(self): # String
return self.get_query_params().get('ZhProvince')
def set_ZhProvince(self, ZhProvince): # String
self.add_query_param('ZhProvince', ZhProvince)
def get_RegistrantOrganization(self): # String
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self, RegistrantOrganization): # String
self.add_query_param('RegistrantOrganization', RegistrantOrganization)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_RegistrantName(self): # String
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self, RegistrantName): # String
self.add_query_param('RegistrantName', RegistrantName)
| null |
1,370 |
"""Utilities for constructing Galaxy integration tests.
Tests that start an actual Galaxy server with a particular configuration in
order to test something that cannot be tested with the default functional/api
testing configuration.
"""
import os
import re
from typing import (
ClassVar,
Iterator,
Optional,
Type,
TYPE_CHECKING,
TypeVar,
)
from unittest import (
skip,
SkipTest,
)
import pytest
from galaxy.app import UniverseApplication
from galaxy.tool_util.verify.test_data import TestDataResolver
from galaxy.util import safe_makedirs
from galaxy.util.unittest import TestCase
from galaxy.util.unittest_utils import (
_identity,
skip_unless_executable,
)
from galaxy_test.base.api import (
UsesApiTestCaseMixin,
UsesCeleryTasks,
)
from .driver_util import GalaxyTestDriver
if TYPE_CHECKING:
from galaxy_test.base.populators import BaseDatasetPopulator
NO_APP_MESSAGE = "test_case._app called though no Galaxy has been configured."
# Following should be for Homebrew Rabbitmq and Docker on Mac "amqp://guest:guest@localhost:5672//"
AMQP_URL = os.environ.get("GALAXY_TEST_AMQP_URL", None)
POSTGRES_CONFIGURED = "postgres" in os.environ.get("GALAXY_TEST_DBURI", "")
SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
VAULT_CONF = os.path.join(SCRIPT_DIRECTORY, "vault_conf.yml")
def skip_if_jenkins(cls):
if os.environ.get("BUILD_NUMBER", ""):
return skip
return cls
def skip_unless_amqp():
if AMQP_URL is not None:
return _identity
return pytest.mark.skip("AMQP_URL is not set, required for this test.")
def skip_unless_postgres():
if POSTGRES_CONFIGURED:
return _identity
return pytest.mark.skip("GALAXY_TEST_DBURI does not point to postgres database, required for this test.")
def skip_unless_docker():
return skip_unless_executable("docker")
def skip_unless_kubernetes():
return skip_unless_executable("kubectl")
def k8s_config_path():
return os.environ.get("GALAXY_TEST_KUBE_CONFIG_PATH", "~/.kube/config")
def skip_unless_fixed_port():
if os.environ.get("GALAXY_TEST_PORT_RANDOM") != "1":
return _identity
return pytest.mark.skip("GALAXY_TEST_PORT must be set for this test.")
def skip_if_github_workflow():
if os.environ.get("GITHUB_ACTIONS", None) is None:
return _identity
return pytest.mark.skip("This test is skipped for Github actions.")
def skip_unless_environ(env_var):
if os.environ.get(env_var):
return _identity
return pytest.mark.skip(f"{env_var} must be set for this test")
class IntegrationInstance(UsesApiTestCaseMixin, UsesCeleryTasks):
"""Unit test case with utilities for spinning up Galaxy."""
_test_driver: GalaxyTestDriver # Optional in parent class, but required for integration tests.
_app_available: ClassVar[bool]
prefer_template_database = True
# Don't pull in default configs for un-configured things from Galaxy's
# config directory and such.
isolate_galaxy_config = True
dataset_populator: Optional["BaseDatasetPopulator"]
@classmethod
def setUpClass(cls):
"""Configure and start Galaxy for a test."""
cls._app_available = False
cls._test_driver = GalaxyTestDriver()
cls.METHOD_NAME()
cls._test_driver.setup(config_object=cls)
cls._app_available = True
cls._configure_app()
@classmethod
def tearDownClass(cls):
"""Shutdown Galaxy server and cleanup temp directory."""
cls._test_driver.tear_down()
cls._app_available = False
def tearDown(self):
logs = self._test_driver.get_logs()
if logs:
print(logs)
return super().tearDown()
def setUp(self):
self.test_data_resolver = TestDataResolver()
self._configure_interactor()
def _configure_interactor(self):
# Setup attributes needed for API testing...
server_wrapper = self._test_driver.server_wrappers[0]
host = server_wrapper.host
port = server_wrapper.port
prefix = server_wrapper.prefix or ""
self.url = f"http://{host}:{port}{prefix.rstrip('/')}/"
self._setup_interactor()
def restart(self, handle_reconfig=None):
self._test_driver.restart(config_object=self.__class__, handle_config=handle_reconfig)
self._configure_app()
self._configure_interactor()
@property
def _app(self) -> UniverseApplication:
assert self._app_available, NO_APP_MESSAGE
app = self._test_driver.app
assert app, NO_APP_MESSAGE
return app
@property
def _tempdir(self):
return self._test_driver.galaxy_test_tmp_dir
@classmethod
def METHOD_NAME(cls):
"""Extension point for subclasses called before Galaxy is launched."""
@classmethod
def _configure_app(cls):
"""Extension point for subclasses called after Galaxy is launched.
```self._app``` can be used to access Galaxy core app.
"""
def _skip_unless_postgres(self):
if not self._app.config.database_connection.startswith("post"):
raise SkipTest("Test only valid for postgres")
def _run_tool_test(self, *args, **kwargs):
return self._test_driver.run_tool_test(*args, **kwargs)
@classmethod
def temp_config_dir(cls, name):
# realpath here to get around problems with symlinks being blocked.
return os.path.realpath(os.path.join(cls._test_driver.galaxy_test_tmp_dir, name))
@pytest.fixture
def history_id(self) -> Iterator[str]:
assert self.dataset_populator
with self.dataset_populator.test_history() as history_id:
yield history_id
class IntegrationTestCase(IntegrationInstance, TestCase):
"""Unit TestCase with utilities for spinning up Galaxy."""
IntegrationInstanceObject = TypeVar("IntegrationInstanceObject", bound=IntegrationInstance)
def integration_module_instance(clazz: Type[IntegrationInstanceObject]):
def _instance() -> Iterator[IntegrationInstanceObject]:
instance = clazz()
instance.setUpClass()
instance.setUp()
yield instance
instance.tearDownClass()
return pytest.fixture(scope="module")(_instance)
def integration_tool_runner(tool_ids):
def test_tools(instance, tool_id):
instance._run_tool_test(tool_id)
return pytest.mark.parametrize("tool_id", tool_ids)(test_tools)
class ConfiguresObjectStores:
object_stores_parent: ClassVar[str]
_test_driver: GalaxyTestDriver
@classmethod
def _configure_object_store(cls, template, config):
temp_directory = cls._test_driver.mkdtemp()
cls.object_stores_parent = temp_directory
config_path = os.path.join(temp_directory, "object_store_conf.xml")
xml = template.safe_substitute({"temp_directory": temp_directory})
with open(config_path, "w") as f:
f.write(xml)
config["object_store_config_file"] = config_path
for path in re.findall(r'files_dir path="([^"]*)"', xml):
assert path.startswith(temp_directory)
dir_name = os.path.basename(path)
os.path.join(temp_directory, dir_name)
safe_makedirs(path)
setattr(cls, f"{dir_name}_path", path)
class ConfiguresDatabaseVault:
@classmethod
def _configure_database_vault(cls, config):
config["vault_config_file"] = VAULT_CONF
| null |
1,371 |
from sources import *
from content import *
from utils import *
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name !!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[ name ].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit ( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit ( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def METHOD_NAME( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[ name ]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup !!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit ( field, markup, block )
self.METHOD_NAME( markup, block )
self.block_exit( block )
self.section_exit ( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
#
# Formatting a block
#
| null |
1,372 |
"""GE41RT Detector Distortion"""
import numpy as np
from hexrd import constants as cnst
from hexrd.constants import USE_NUMBA
if USE_NUMBA:
import numba
from .distortionabc import DistortionABC
from .registry import _RegisterDistortionClass
from .utils import newton
RHO_MAX = 204.8 # max radius in mm for ge detector
if USE_NUMBA:
@numba.njit(nogil=True, cache=True)
def _ge_41rt_inverse_distortion(out, in_, rhoMax, params):
maxiter = 100
prec = cnst.epsf
p0, p1, p2, p3, p4, p5 = params[0:6]
rxi = 1.0/rhoMax
for el in range(len(in_)):
xi, yi = in_[el, 0:2]
ri = np.sqrt(xi*xi + yi*yi)
if ri < cnst.sqrt_epsf:
ri_inv = 0.0
else:
ri_inv = 1.0/ri
sinni = yi*ri_inv
cosni = xi*ri_inv
ro = ri
cos2ni = cosni*cosni - sinni*sinni
sin2ni = 2*sinni*cosni
cos4ni = cos2ni*cos2ni - sin2ni*sin2ni
# newton solver iteration
for i in range(maxiter):
ratio = ri*rxi
fx = (p0*ratio**p3*cos2ni +
p1*ratio**p4*cos4ni +
p2*ratio**p5 + 1)*ri - ro # f(x)
fxp = (p0*ratio**p3*cos2ni*(p3+1) +
p1*ratio**p4*cos4ni*(p4+1) +
p2*ratio**p5*(p5+1) + 1) # f'(x)
delta = fx/fxp
ri = ri - delta
# convergence check for newton
if np.abs(delta) <= prec*np.abs(ri):
break
xi = ri*cosni
yi = ri*sinni
out[el, 0] = xi
out[el, 1] = yi
return out
@numba.njit(nogil=True, cache=True)
def _ge_41rt_distortion(out, in_, rhoMax, params):
p0, p1, p2, p3, p4, p5 = params[0:6]
rxi = 1.0/rhoMax
for el in range(len(in_)):
xi, yi = in_[el, 0:2]
ri = np.sqrt(xi*xi + yi*yi)
if ri < cnst.sqrt_epsf:
ri_inv = 0.0
else:
ri_inv = 1.0/ri
sinni = yi*ri_inv
cosni = xi*ri_inv
cos2ni = cosni*cosni - sinni*sinni
sin2ni = 2*sinni*cosni
cos4ni = cos2ni*cos2ni - sin2ni*sin2ni
ratio = ri*rxi
ri = (p0*ratio**p3*cos2ni
+ p1*ratio**p4*cos4ni
+ p2*ratio**p5
+ 1)*ri
xi = ri*cosni
yi = ri*sinni
out[el, 0] = xi
out[el, 1] = yi
return out
else:
# non-numba versions for the direct and inverse distortion
def _ge_41rt_inverse_distortion(out, in_, rhoMax, params):
maxiter = 100
prec = cnst.epsf
p0, p1, p2, p3, p4, p5 = params[0:6]
rxi = 1.0/rhoMax
xi, yi = in_[:, 0], in_[:, 1]
ri = np.sqrt(xi*xi + yi*yi)
# !!! adding fix TypeError when processings list of coords
zfix = []
if np.any(ri) < cnst.sqrt_epsf:
zfix = ri < cnst.sqrt_epsf
ri[zfix] = 1.0
ri_inv = 1.0/ri
ri_inv[zfix] = 0.
sinni = yi*ri_inv
cosni = xi*ri_inv
ro = ri
cos2ni = cosni*cosni - sinni*sinni
sin2ni = 2*sinni*cosni
cos4ni = cos2ni*cos2ni - sin2ni*sin2ni
# newton solver iteration
#
# FIXME: looks like we hae a problem here,
# should iterate over single coord pairs?
for i in range(maxiter):
ratio = ri*rxi
fx = (p0*ratio**p3*cos2ni +
p1*ratio**p4*cos4ni +
p2*ratio**p5 + 1)*ri - ro # f(x)
fxp = (p0*ratio**p3*cos2ni*(p3+1) +
p1*ratio**p4*cos4ni*(p4+1) +
p2*ratio**p5*(p5+1) + 1) # f'(x)
delta = fx/fxp
ri = ri - delta
# convergence check for newton
if np.max(np.abs(delta/ri)) <= prec:
break
out[:, 0] = ri*cosni
out[:, 1] = ri*sinni
return out
def _ge_41rt_distortion(out, in_, rhoMax, params):
p0, p1, p2, p3, p4, p5 = params[0:6]
rxi = 1.0/rhoMax
xi, yi = in_[:, 0], in_[:, 1]
# !!! included fix on ValueError for array--like in_
ri = np.sqrt(xi*xi + yi*yi)
ri[ri < cnst.sqrt_epsf] = np.inf
ri_inv = 1.0/ri
sinni = yi*ri_inv
cosni = xi*ri_inv
cos2ni = cosni*cosni - sinni*sinni
sin2ni = 2*sinni*cosni
cos4ni = cos2ni*cos2ni - sin2ni*sin2ni
ratio = ri*rxi
ri = (p0*ratio**p3*cos2ni
+ p1*ratio**p4*cos4ni
+ p2*ratio**p5
+ 1)*ri
out[:, 0] = ri*cosni
out[:, 1] = ri*sinni
return out
def _rho_scl_func_inv(ri, ni, ro, rx, p):
retval = (p[0]*(ri/rx)**p[3] * np.cos(2.0 * ni) +
p[1]*(ri/rx)**p[4] * np.cos(4.0 * ni) +
p[2]*(ri/rx)**p[5] + 1)*ri - ro
return retval
def _rho_scl_dfunc_inv(ri, ni, ro, rx, p):
retval = p[0]*(ri/rx)**p[3] * np.cos(2.0 * ni) * (p[3] + 1) + \
p[1]*(ri/rx)**p[4] * np.cos(4.0 * ni) * (p[4] + 1) + \
p[2]*(ri/rx)**p[5] * (p[5] + 1) + 1
return retval
def inverse_distortion_numpy(rho0, eta0, rhoMax, params):
return newton(rho0, _rho_scl_func_inv, _rho_scl_dfunc_inv,
(eta0, rho0, rhoMax, params))
class GE_41RT(DistortionABC, metaclass=_RegisterDistortionClass):
maptype = "GE_41RT"
def __init__(self, params, **kwargs):
self._params = np.asarray(params, dtype=float).flatten()
@property
def params(self):
return self._params
@params.setter
def params(self, x):
assert len(x) == 6, "parameter list must have len of 6"
self._params = np.asarray(x, dtype=float).flatten()
@property
def is_trivial(self):
return \
self.params[0] == 0 and \
self.params[1] == 0 and \
self.params[2] == 0
def apply(self, xy_in):
if self.is_trivial:
return xy_in
else:
xy_in = np.asarray(xy_in, dtype=float)
xy_out = np.empty_like(xy_in)
_ge_41rt_distortion(
xy_out, xy_in, float(RHO_MAX), np.asarray(self.params)
)
return xy_out
def METHOD_NAME(self, xy_in):
if self.is_trivial:
return xy_in
else:
xy_in = np.asarray(xy_in, dtype=float)
xy_out = np.empty_like(xy_in)
_ge_41rt_inverse_distortion(
xy_out, xy_in, float(RHO_MAX), np.asarray(self.params)
)
return xy_out
| null |
1,373 |
"""
The class used to define an adjoint source for use in the adjoint
(i.e., reverse-mode) simulation.
"""
from meep import CustomSource
import numpy as np
from scipy import linalg, signal
class FilteredSource(CustomSource):
def __init__(
self,
center_frequency: float,
frequencies: np.ndarray,
frequency_response,
dt,
time_src=None,
):
# divide by two to compensate for staggered E, H time interval
dt = dt / 2
self.dt = dt
self.frequencies = frequencies
self.center_frequencies = frequencies
# For now, the basis functions cannot overlap much in the frequency
# domain. Otherwise, the resulting nodes are wildly large and induce
# numerical precision errors. We can always produce a safe result
# by forcing the length of each basis function to meet the minimum
# frequency requirements. This method still minimizes storage
# requirements.
self.T = np.max(np.abs(1 / np.diff(frequencies)))
self.N = np.rint(self.T / self.dt)
self.t = np.arange(0, dt * (self.N), dt)
self.n = np.arange(self.N)
f = self.func()
# frequency bandwidth of the Nuttall window function
fwidth = self.METHOD_NAME()
self.bf = [
lambda t, i=i: 0
if t > self.T
else (
self.nuttall(t, self.center_frequencies)
/ (self.dt / np.sqrt(2 * np.pi))
)[i]
for i in range(len(self.center_frequencies))
]
self.time_src_bf = [
CustomSource(
src_func=bfi,
center_frequency=center_frequency,
is_integrated=False,
end_time=self.T,
fwidth=fwidth,
)
for bfi in self.bf
]
if time_src:
# get the cutoff of the input signal
signal_t = np.array(
[time_src.swigobj.current(ti, dt) for ti in self.t]
) # time domain signal
signal_dtft = self.dtft(signal_t, self.frequencies)
else:
signal_dtft = 1
# Multiply sampled dft of input signal with filter transfer function.
H = signal_dtft * frequency_response
# Estimate the impulse response using a sinc function RBN.
self.nodes, self.err = self.estimate_impulse_response(H)
# initialize super
super().__init__(
src_func=f,
center_frequency=center_frequency,
is_integrated=False,
end_time=self.T,
fwidth=fwidth,
)
def cos_window_td(self, a, t, f0):
cos_sum = np.sum(
[
(-1) ** k * a[k] * np.cos(2 * np.pi * t * k / self.T)
for k in range(len(a))
],
axis=0,
)
return np.exp(-1j * 2 * np.pi * f0 * t) * (cos_sum)
def cos_window_fd(self, a, f, f0):
df = 1 / (self.N * self.dt)
cos_sum = a[0] * self.sinc(f, f0)
for k in range(1, len(a)):
cos_sum += (-1) ** k * a[k] / 2 * self.sinc(f, f0 - k * df) + (-1) ** k * a[
k
] / 2 * self.sinc(f, f0 + k * df)
return cos_sum
def sinc(self, f, f0):
num = np.where(
f == f0,
self.N + 1,
(1 - np.exp(1j * (self.N + 1) * (2 * np.pi) * (f - f0) * self.dt)),
)
den = np.where(f == f0, 1, (1 - np.exp(1j * (2 * np.pi) * (f - f0) * self.dt)))
return num / den
def rect(self, t, f0):
n = np.rint((t) / self.dt)
return np.where(
n.any() < 0.0 or n.any() > self.N,
0,
np.exp(-1j * 2 * np.pi * f0 * t),
)
def hann(self, t, f0):
a = [0.5, 0.5]
return self.cos_window_td(a, t, f0)
def hann_dtft(self, f, f0):
a = [0.5, 0.5]
return self.cos_window_fd(a, f, f0)
def nuttall(self, t, f0):
a = [0.355768, 0.4873960, 0.144232, 0.012604]
return self.cos_window_td(a, t, f0)
def nuttall_dtft(self, f, f0):
a = [0.355768, 0.4873960, 0.144232, 0.012604]
return self.cos_window_fd(a, f, f0)
# Compute the bandwidth of the DTFT of the Nuttall window function
# (magnitude) assuming it has decayed from its peak value by some
# tolerance by fitting it to an asymptotic power law of the form
# C / f^3 where C is a constant and f is the frequency.
def METHOD_NAME(self):
tol = 1e-7
fwidth = 1 / (self.N * self.dt)
frq_inf = 10000 * fwidth
na_dtft = self.nuttall_dtft(frq_inf, 0)
coeff = frq_inf**3 * np.abs(na_dtft)
na_dtft_max = self.nuttall_dtft(0, 0)
bw = 2 * np.power(coeff / (tol * na_dtft_max), 1 / 3)
return bw.real
def dtft(self, y, f):
return (
np.matmul(
np.exp(1j * 2 * np.pi * f[:, np.newaxis] * np.arange(y.size) * self.dt),
y,
)
* self.dt
/ np.sqrt(2 * np.pi)
)
def __call__(self, t):
if t > self.T:
return 0
vec = self.nuttall(t, self.center_frequencies) / (
self.dt / np.sqrt(2 * np.pi)
) # compensate for meep dtft
return np.inner(vec, self.nodes)
def func(self):
def _f(t):
return self(t)
return _f
def estimate_impulse_response(self, H):
# Use a Vandermonde matrix to calculate the weights of each Gaussian
# term. Each window is centered at each frequency point.
#
# TODO: come up with a more sophisticated way to choose the temporal
# window size and basis locations that will minimize l2 estimation
# error and the node weights (since matrix is ill-conditioned).
vandermonde = self.nuttall_dtft(
self.frequencies[:, np.newaxis],
self.center_frequencies[np.newaxis, :],
)
nodes = np.matmul(linalg.pinv(vandermonde), H.T)
H_hat = np.matmul(vandermonde, nodes)
l2_err = np.sum(np.abs(H - H_hat.T) ** 2 / np.abs(H) ** 2)
return nodes, l2_err
| null |
1,374 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddRtsLiveStreamTranscodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddRtsLiveStreamTranscode','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Template(self): # String
return self.get_query_params().get('Template')
def set_Template(self, Template): # String
self.add_query_param('Template', Template)
def get_DeleteBframes(self): # Boolean
return self.get_query_params().get('DeleteBframes')
def set_DeleteBframes(self, DeleteBframes): # Boolean
self.add_query_param('DeleteBframes', DeleteBframes)
def get_Lazy(self): # String
return self.get_query_params().get('Lazy')
def set_Lazy(self, Lazy): # String
self.add_query_param('Lazy', Lazy)
def get_Gop(self): # String
return self.get_query_params().get('Gop')
def set_Gop(self, Gop): # String
self.add_query_param('Gop', Gop)
def get_Opus(self): # Boolean
return self.get_query_params().get('Opus')
def METHOD_NAME(self, Opus): # Boolean
self.add_query_param('Opus', Opus)
def get_AudioCodec(self): # String
return self.get_query_params().get('AudioCodec')
def set_AudioCodec(self, AudioCodec): # String
self.add_query_param('AudioCodec', AudioCodec)
def get_TemplateType(self): # String
return self.get_query_params().get('TemplateType')
def set_TemplateType(self, TemplateType): # String
self.add_query_param('TemplateType', TemplateType)
def get_AudioProfile(self): # String
return self.get_query_params().get('AudioProfile')
def set_AudioProfile(self, AudioProfile): # String
self.add_query_param('AudioProfile', AudioProfile)
def get_Height(self): # Integer
return self.get_query_params().get('Height')
def set_Height(self, Height): # Integer
self.add_query_param('Height', Height)
def get_App(self): # String
return self.get_query_params().get('App')
def set_App(self, App): # String
self.add_query_param('App', App)
def get_AudioChannelNum(self): # Integer
return self.get_query_params().get('AudioChannelNum')
def set_AudioChannelNum(self, AudioChannelNum): # Integer
self.add_query_param('AudioChannelNum', AudioChannelNum)
def get_Profile(self): # Integer
return self.get_query_params().get('Profile')
def set_Profile(self, Profile): # Integer
self.add_query_param('Profile', Profile)
def get_FPS(self): # Integer
return self.get_query_params().get('FPS')
def set_FPS(self, FPS): # Integer
self.add_query_param('FPS', FPS)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AudioRate(self): # Integer
return self.get_query_params().get('AudioRate')
def set_AudioRate(self, AudioRate): # Integer
self.add_query_param('AudioRate', AudioRate)
def get_AudioBitrate(self): # Integer
return self.get_query_params().get('AudioBitrate')
def set_AudioBitrate(self, AudioBitrate): # Integer
self.add_query_param('AudioBitrate', AudioBitrate)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_Width(self): # Integer
return self.get_query_params().get('Width')
def set_Width(self, Width): # Integer
self.add_query_param('Width', Width)
def get_VideoBitrate(self): # Integer
return self.get_query_params().get('VideoBitrate')
def set_VideoBitrate(self, VideoBitrate): # Integer
self.add_query_param('VideoBitrate', VideoBitrate)
| null |
1,375 |
import enum
import re
import sys
from array import array
from typing import Any, List, Optional, Dict, Tuple, Union, overload
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
from pysam import AlignmentHeader # type: ignore
CMATCH: int
CINS: int
CDEL: int
CREF_SKIP: int
CSOFT_CLIP: int
CHARD_CLIP: int
CPAD: int
CEQUAL: int
CDIFF: int
CBACK: int
FPAIRED: int
FPROPER_PAIR: int
FUNMAP: int
FMUNMAP: int
FREVERSE: int
FMREVERSE: int
FREAD1: int
FREAD2: int
FSECONDARY: int
FQCFAIL: int
FDUP: int
FSUPPLEMENTARY: int
CIGAR2CODE: Dict[int, str]
CIGAR_REGEX: re.Pattern
DATATYPE2FORMAT: Dict[int, Tuple[str, int]]
KEY_NAMES: List[str]
TagValue = Union[str, int, float, array]
class CIGAR_OPS(enum.IntEnum):
CBACK: int
CDEL: int
CDIFF: int
CEQUAL: int
CHARD_CLIP: int
CINS: int
CMATCH: int
CPAD: int
CREF_SKIP: int
CSOFT_CLIP: int
class SAM_FLAGS(enum.IntEnum):
FDUP: int
FMREVERSE: int
FMUNMAP: int
FPAIRED: int
FPROPER_PAIR: int
FQCFAIL: int
FREAD1: int
FREAD2: int
FREVERSE: int
FSECONDARY: int
FSUPPLEMENTARY: int
FUNMAP: int
class AlignedSegment:
header: AlignmentHeader
query_name: Optional[str]
flag: int
reference_name: Optional[str]
reference_id: int
reference_start: int
mapping_quality: int
cigarstring: Optional[str]
next_reference_id: int
next_reference_name: Optional[str]
next_reference_start: int
template_length: int
query_sequence: Optional[str]
query_qualities: Optional[array]
bin: int
is_paired: bool
is_proper_pair: bool
is_unmapped: bool
mate_is_unmapped: bool
is_reverse: bool
mate_is_reverse: bool
is_read1: bool
is_read2: bool
is_secondary: bool
is_qcfail: bool
is_duplicate: bool
is_supplementary: bool
cigartuples: Optional[List[Tuple[int, int]]]
def __init__(self, header: Optional[AlignmentHeader] = ...) -> None: ...
def compare(self, other: Any) -> int: ...
def to_string(self) -> str: ...
@classmethod
def fromstring(cls, sam: str, header: AlignmentHeader) -> AlignedSegment: ...
def to_dict(self) -> Dict: ...
@classmethod
def from_dict(cls, sam_dict: Dict[str, Any], header: AlignmentHeader) -> Any: ...
def get_reference_positions(self, full_length: bool = ...) -> List[int]: ...
@property
def query_length(self) -> int: ...
@property
def reference_end(self) -> Optional[int]: ...
@property
def reference_length(self) -> Optional[int]: ...
@property
def query_alignment_sequence(self) -> Optional[str]: ...
@property
def query_alignment_qualities(self) -> Optional[array]: ...
@property
def query_alignment_start(self) -> int: ...
@property
def query_alignment_end(self) -> int: ...
@property
def query_alignment_length(self) -> int: ...
def infer_query_length(self) -> Optional[int]: ...
def infer_read_length(self) -> Optional[int]: ...
def get_reference_sequence(self) -> str: ...
def get_forward_sequence(self) -> Optional[str]: ...
def get_forward_qualities(self) -> Optional[array]: ...
def get_aligned_pairs(
self, matches_only: bool = ..., with_seq: bool = ...
) -> List[Tuple[int, int]]: ...
def get_blocks(self) -> List[Tuple[int, int]]: ...
def get_overlap(self, start: int, end: int) -> Optional[int]: ...
def get_cigar_stats(self) -> Tuple[array, array]: ...
def set_tag(
self,
tag: str,
value: Union[int, float, str, bytes, array, List, Tuple, None],
value_type: Optional[
Literal["A", "i", "f", "Z", "H", "B", "c", "C", "s", "S", "I"]
] = ...,
replace: bool = ...,
) -> None: ...
def has_tag(self, tag: str) -> bool: ...
@overload
def get_tag(self, tag: str, with_value_type: Literal[False] = ...) -> TagValue: ...
@overload
def get_tag(
self, tag: str, with_value_type: Literal[True]
) -> Tuple[TagValue, str]: ...
@overload
def get_tag(
self, tag: str, with_value_type: bool
) -> Union[TagValue, Tuple[TagValue, str]]: ...
@overload
def get_tags(
self, with_value_type: Literal[False] = ...
) -> List[Tuple[str, TagValue]]: ...
@overload
def get_tags(
self, with_value_type: Literal[True]
) -> List[Tuple[str, TagValue, str]]: ...
@overload
def get_tags(
self, with_value_type: bool
) -> Union[List[Tuple[str, TagValue]], List[Tuple[str, TagValue, str]]]: ...
@overload
def get_tags(
self, with_value_type: bool = ...
) -> Union[List[Tuple[str, TagValue, str]], List[Tuple[str, TagValue]]]: ...
def METHOD_NAME(self, tags: Any) -> None: ...
def __eq__(self, other): ...
def __ge__(self, other): ...
def __gt__(self, other): ...
def __le__(self, other): ...
def __lt__(self, other): ...
def __ne__(self, other): ...
class PileupRead:
@property
def alignment(self) -> AlignedSegment: ...
@property
def query_position(self) -> Optional[int]: ...
@property
def query_position_or_next(self) -> int: ...
@property
def indel(self) -> int: ...
@property
def level(self) -> int: ...
@property
def is_del(self) -> int: ...
@property
def is_head(self) -> int: ...
@property
def is_tail(self) -> int: ...
@property
def is_refskip(self) -> int: ...
class PileupColumn:
nsegments: int
def set_min_base_quality(self, min_base_quality: int) -> None: ...
def __len__(self) -> int: ...
@property
def reference_id(self) -> int: ...
@property
def reference_name(self) -> Optional[str]: ...
@property
def reference_pos(self) -> int: ...
@property
def pileups(self) -> List[PileupRead]: ...
def get_num_aligned(self) -> int: ...
def get_query_sequences(
self,
mark_matches: bool = ...,
mark_ends: bool = ...,
add_indels: bool = ...,
) -> List[str]: ...
def get_query_qualities(self) -> List[int]: ...
def get_mapping_qualities(self) -> List[int]: ...
def get_query_positions(self) -> List[int]: ...
def get_query_names(self) -> List[str]: ...
| null |
1,376 |
# Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipeline.hpc.logger import Logger
def _perform_command(action, msg, error_msg, skip_on_failure):
Logger.info(msg)
try:
action()
except RuntimeError as e:
Logger.warn(error_msg)
if not skip_on_failure:
raise RuntimeError(error_msg, e)
class GridEngineType:
SGE = "SGE"
SLURM = "SLURM"
def __init__(self):
pass
class AllocationRuleParsingError(RuntimeError):
pass
class AllocationRule:
ALLOWED_VALUES = ['$pe_slots', '$fill_up', '$round_robin']
def __init__(self, value):
if value in AllocationRule.ALLOWED_VALUES:
self.value = value
else:
raise AllocationRuleParsingError('Wrong AllocationRule value, only %s is available!' % AllocationRule.ALLOWED_VALUES)
@staticmethod
def pe_slots():
return AllocationRule('$pe_slots')
@staticmethod
def METHOD_NAME():
return AllocationRule('$fill_up')
@staticmethod
def round_robin():
return AllocationRule('$round_robin')
@staticmethod
def fractional_rules():
return [AllocationRule.round_robin(), AllocationRule.METHOD_NAME()]
@staticmethod
def integral_rules():
return [AllocationRule.pe_slots()]
def __eq__(self, other):
if not isinstance(other, AllocationRule):
# don't attempt to compare against unrelated types
return False
return other.value == self.value
class GridEngineJobState:
RUNNING = 'running'
PENDING = 'pending'
SUSPENDED = 'suspended'
ERROR = 'errored'
DELETED = 'deleted'
COMPLETED = 'completed'
UNKNOWN = 'unknown'
_letter_codes_to_states = {
# Job statuses: [SGE] + [SLURM]
RUNNING: ['r', 't', 'Rr', 'Rt'] + ['RUNNING'],
PENDING: ['qw', 'qw', 'hqw', 'hqw', 'hRwq', 'hRwq', 'hRwq', 'qw', 'qw'] + ['PENDING'],
SUSPENDED: ['s', 'ts', 'S', 'tS', 'T', 'tT', 'Rs', 'Rts', 'RS', 'RtS', 'RT', 'RtT'] + ['SUSPENDED', 'STOPPED'],
ERROR: ['Eqw', 'Ehqw', 'EhRqw'] + ['DEADLINE', ' FAILED'],
DELETED: ['dr', 'dt', 'dRr', 'dRt', 'ds', 'dS', 'dT', 'dRs', 'dRS', 'dRT'] + ['DELETED', 'CANCELLED'],
COMPLETED: [] + ['COMPLETED', 'COMPLETING']
}
@staticmethod
def from_letter_code(code):
for key in GridEngineJobState._letter_codes_to_states:
if code in GridEngineJobState._letter_codes_to_states[key]:
return key
return GridEngineJobState.UNKNOWN
class GridEngineJob:
def __init__(self, id, root_id, name, user, state, datetime, hosts=None, cpu=0, gpu=0, mem=0, pe='local'):
self.id = id
self.root_id = root_id
self.name = name
self.user = user
self.state = state
self.datetime = datetime
self.hosts = hosts if hosts else []
self.cpu = cpu
self.gpu = gpu
self.mem = mem
self.pe = pe
def __repr__(self):
return str(self.__dict__)
class GridEngine:
def get_jobs(self):
pass
def disable_host(self, host):
"""
Disables host to prevent receiving new jobs from the queue.
This command does not abort currently running jobs.
:param host: Host to be enabled.
"""
pass
def enable_host(self, host):
"""
Enables host to make it available to receive new jobs from the queue.
:param host: Host to be enabled.
"""
pass
def get_pe_allocation_rule(self, pe):
"""
Returns allocation rule of the pe
:param pe: Parallel environment to return allocation rule.
"""
pass
def delete_host(self, host, skip_on_failure=False):
"""
Completely deletes host from GE:
1. Shutdown host execution daemon.
2. Removes host from queue settings.
3. Removes host from host group.
4. Removes host from administrative hosts.
5. Removes host from GE.
:param host: Host to be removed.
:param skip_on_failure: Specifies if the host killing should be continued even if some of
the commands has failed.
"""
pass
def get_host_supplies(self):
pass
def get_host_supply(self, host):
pass
def get_engine_type(self):
pass
def is_valid(self, host):
"""
Validates host in GE checking corresponding execution host availability and its states.
:param host: Host to be checked.
:return: True if execution host is valid.
"""
return True
def kill_jobs(self, jobs, force=False):
"""
Kills jobs in GE.
:param jobs: Grid engine jobs.
:param force: Specifies if this command should be performed with -f flag.
"""
pass
class GridEngineDemandSelector:
def select(self, jobs):
pass
class GridEngineJobValidator:
def validate(self, jobs):
pass
| null |
1,377 |
# IODATA is an input and output module for quantum chemistry.
# Copyright (C) 2011-2019 The IODATA Development Team
#
# This file is part of IODATA.
#
# IODATA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# IODATA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
# --
# pylint: disable=unsubscriptable-object
"""Test iodata.formats.cube module."""
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from ..api import load_one, dump_one
try:
from importlib_resources import as_file, files
except ImportError:
from importlib.resources import as_file, files
def test_load_aelta():
with as_file(files("iodata.test.data").joinpath("aelta.cube")) as fn_cube:
mol = load_one(str(fn_cube))
assert mol.title == 'Some random cube for testing (sort of) useless data'
assert_equal(mol.natom, 72)
assert_allclose(mol.atcoords[5, 0], 27.275511, atol=1.e-5)
assert_allclose(mol.atcoords[-2, 2], 26.460812, atol=1.e-5)
assert_equal(mol.cube.shape, (12, 12, 12))
my_cellvecs = np.array([[1.8626, 0.1, 0.0],
[0.0, 1.8626, 0.0],
[0.0, 0.0, 1.8626]], dtype=float) * 12
assert_allclose(mol.cellvecs, my_cellvecs, atol=1.e-5)
my_axes = np.array([[1.8626, 0.1, 0.0],
[0.0, 1.8626, 0.0],
[0.0, 0.0, 1.8626]], dtype=float)
assert_allclose(mol.cube.axes, my_axes, atol=1.e-5)
assert_allclose(mol.cube.origin, np.array([0.0, 1.2, 0.0]), atol=1.e-10)
assert_allclose(mol.cube.data[0, 0, 0], 9.49232e-06, atol=1.e-12)
assert_allclose(mol.cube.data[-1, -1, -1], 2.09856e-04, atol=1.e-10)
pn = mol.atcorenums
assert_allclose(pn[0], 1.0, atol=1.e-10)
assert_allclose(pn[1], 0.1, atol=1.e-10)
assert_allclose(pn[-2], 0.2, atol=1.e-10)
assert_allclose(pn[-1], mol.atnums[-1], atol=1.e-10)
def METHOD_NAME(tmpdir):
with as_file(files("iodata.test.data").joinpath("aelta.cube")) as fn_cube1:
mol1 = load_one(str(fn_cube1))
fn_cube2 = '%s/%s' % (tmpdir, 'aelta.cube')
dump_one(mol1, fn_cube2)
mol2 = load_one(fn_cube2)
with open(fn_cube2) as f:
line_counter = 0
block_counter = 0
for line in f:
line_counter += 1
if line_counter > 6 + len(mol2.atnums):
if mol2.cube.shape[2] % 6 == 0:
assert len(line.split()) == 6
if mol2.cube.shape[2] % 6 != 0:
block_line_counter = line_counter - (
6
+ len(mol2.atnums)
+ block_counter * (mol2.cube.shape[2] // 6 + 1)
)
if 1 <= block_line_counter <= mol2.cube.shape[2] // 6:
assert len(line.split()) == 6
if block_line_counter == mol2.cube.shape[2] // 6 + 1:
assert len(line.split()) == mol2.cube.shape[2] % 6
assert mol1.title == mol2.title
assert_allclose(mol1.atcoords, mol2.atcoords, atol=1.e-4)
assert_equal(mol1.atnums, mol2.atnums)
cube1 = mol1.cube
cube2 = mol2.cube
assert_allclose(cube1.axes, cube2.axes, atol=1.e-4)
assert_equal(cube1.shape, cube2.shape)
assert_allclose(mol1.cube.data, mol2.cube.data, atol=1.e-4)
assert_allclose(mol1.atcorenums, mol2.atcorenums, atol=1.e-4)
def test_load_dump_h2o_5points(tmpdir):
# load cube file
with as_file(files("iodata.test.data").joinpath("cubegen_h2o_5points.cube")) as fn_cube1:
mol1 = load_one(str(fn_cube1))
# write cube file in a temporary directory
fn_cube2 = tmpdir.join('iodata_h2o_5points.cube')
dump_one(mol1, fn_cube2)
# read the contents as string (skip the first 2 lines) & compare
with open(fn_cube1, "r") as f:
content1 = f.read().split("\n", 2)[-1]
content2 = fn_cube2.read().split("\n", 2)[-1]
assert content1 == content2
def test_load_dump_ch4_6points(tmpdir):
# load cube file
with as_file(files("iodata.test.data").joinpath("cubegen_ch4_6points.cube")) as fn_cube1:
mol1 = load_one(str(fn_cube1))
# write cube file in a temporary directory
fn_cube2 = tmpdir.join('iodata_ch4_6points.cube')
dump_one(mol1, fn_cube2)
# read the contents as string (skip the first 2 lines) & compare
with open(fn_cube1, "r") as f:
content1 = f.read().split("\n", 2)[-1]
content2 = fn_cube2.read().split("\n", 2)[-1]
assert content1 == content2
def test_load_dump_nh3_7points(tmpdir):
# load cube file
with as_file(files("iodata.test.data").joinpath("cubegen_nh3_7points.cube")) as fn_cube1:
mol1 = load_one(str(fn_cube1))
# write cube file in a temporary directory
fn_cube2 = tmpdir.join('iodata_nh3_7points.cube')
dump_one(mol1, fn_cube2)
# read the contents as string (skip the first 2 lines) & compare
with open(fn_cube1, "r") as f:
content1 = f.read().split("\n", 2)[-1]
content2 = fn_cube2.read().split("\n", 2)[-1]
assert content1 == content2
| null |
1,378 |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Project:
glideinWMS
Description:
unit test for glideinwms/creation/lib/cvWParams.py
Author:
Dennis Box [email protected]
"""
import copy
import os
import sys
import tempfile
import unittest
from unittest import mock
import xmlrunner
from glideinwms.creation.lib.cvWParams import extract_attr_val, VOFrontendParams, VOFrontendSubParams
from glideinwms.creation.lib.cWParams import Params, SubParams
ARGV = ["fixtures/frontend.xml", "fixtures/frontend.xml"]
SRC_DIR = "fixtures/frontend"
USAGE_PREFIX = "create_frontend"
class TestVOFrontendSubParams(unittest.TestCase):
def setUp(self):
v_o_frontend_params = VOFrontendParams(USAGE_PREFIX, SRC_DIR, ARGV)
self.sub_params = VOFrontendSubParams(v_o_frontend_params.data)
def test_init(self):
self.assertTrue(isinstance(self.sub_params, SubParams))
def test__eq__(self):
cpy = copy.deepcopy(self.sub_params)
self.assertEqual(cpy, self.sub_params)
self.assertTrue(cpy == self.sub_params)
self.assertFalse(self.sub_params is None)
def test_extract_attr_val(self):
monkey = mock.Mock()
monkey.type = "string"
monkey.value = "monkey"
self.assertEqual("monkey", self.sub_params.extract_attr_val(monkey))
def test_looks_like_dict(self):
self.assertTrue(len(list(self.sub_params.keys())) > 0)
# for k in self.sub_params: FAILS in the __getitem__ step
# for k in self.sub_params.keys(): PASSES __getitem__
for k in list(self.sub_params.keys()):
self.assertTrue(k in self.sub_params)
val1 = self.sub_params.__getitem__(k)
val2 = self.sub_params[k]
self.assertEqual(val1, val2)
class TestVOFrontendParams(unittest.TestCase):
def setUp(self):
self.v_o_frontend_params = VOFrontendParams(USAGE_PREFIX, SRC_DIR, ARGV)
def test_init(self):
self.assertTrue(isinstance(self.v_o_frontend_params, Params))
def test_buildDir(self):
self.assertEqual(SRC_DIR, self.v_o_frontend_params.buildDir("", SRC_DIR))
def test_derive(self):
try:
self.v_o_frontend_params.derive()
except RuntimeError as err:
self.fail(err)
def test_extract_attr_val(self):
p = self.v_o_frontend_params
self.assertEqual("1", p.extract_attr_val(p.attrs["GLIDECLIENT_Rank"]))
def METHOD_NAME(self):
sc = self.v_o_frontend_params.get_subparams_class()
self.assertNotEqual(None, sc)
def test_get_top_element(self):
self.assertEqual("frontend", self.v_o_frontend_params.get_top_element())
def test_get_xml_format(self):
fmt_dict = self.v_o_frontend_params.get_xml_format()
self.assertTrue("dicts_params" in fmt_dict)
self.assertTrue("lists_params" in fmt_dict)
def test_get_xml(self):
self.assertTrue(len(self.v_o_frontend_params.get_xml().__repr__()) > 0)
def test_get_description(self):
self.assertTrue(len(self.v_o_frontend_params.get_description().__repr__()) > 0)
def test_init_defaults(self):
try:
self.v_o_frontend_params.init_defaults()
except RuntimeError as err:
self.fail(err)
def test_validate_names(self):
try:
self.v_o_frontend_params.validate_names()
except RuntimeError as err:
self.fail(err)
def test_file_read_and_write(self):
fn = tempfile.NamedTemporaryFile(prefix="/tmp/", delete=False)
fn.close()
self.v_o_frontend_params.save_into_file(fn.name)
new_param_obj = VOFrontendParams("", "", [fn.name, fn.name])
new_param_obj.load_file(fn.name)
def test__eq__(self):
cpy = copy.deepcopy(self.v_o_frontend_params)
self.assertTrue(cpy == self.v_o_frontend_params)
class TestExtractAttrVal(unittest.TestCase):
def test_extract_attr_val(self):
monkey = mock.Mock()
monkey.type = "string"
monkey.value = "monkey"
self.assertEqual(monkey.value, extract_attr_val(monkey))
if __name__ == "__main__":
unittest.main(testRunner=xmlrunner.XMLTestRunner(output="unittests-reports"))
| null |
1,379 |
from nose.tools import * # noqa:
from addons.wiki.models import WikiVersion
from api.base.settings.defaults import API_BASE
from tests.base import ApiWikiTestCase
from osf_tests.factories import ProjectFactory, RegistrationFactory
class TestWikiVersionContentView(ApiWikiTestCase):
def _set_up_public_project_with_wiki_page(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_wiki = self._add_project_wiki_version(self.public_project, self.user)
self.public_url = '/{}wikis/{}/versions/{}/content/'.format(API_BASE, self.public_wiki.wiki_page._id, self.public_wiki.identifier)
def _set_up_private_project_with_wiki_page(self):
self.private_project = ProjectFactory(creator=self.user)
self.private_wiki = self._add_project_wiki_version(self.private_project, self.user)
self.private_url = '/{}wikis/{}/versions/{}/content/'.format(API_BASE, self.private_wiki.wiki_page._id, self.private_wiki.identifier)
def _set_up_public_registration_with_wiki_page(self):
self._set_up_public_project_with_wiki_page()
self.public_registration = RegistrationFactory(project=self.public_project, user=self.user, is_public=True)
self.public_registration_wiki = WikiVersion.objects.get_for_node(self.public_registration, 'home')
self.public_registration.save()
self.public_registration_url = '/{}wikis/{}/versions/{}/content/'.format(API_BASE, self.public_registration_wiki.wiki_page._id, self.public_registration_wiki.identifier)
def test_logged_out_user_can_get_public_wiki_content(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), self.public_wiki.content)
def METHOD_NAME(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), self.public_wiki.content)
def test_logged_in_contributor_can_get_public_wiki_content(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), self.public_wiki.content)
def test_logged_out_user_cannot_get_private_wiki_content(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_logged_in_non_contributor_cannot_get_private_wiki_content(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_contributor_can_get_private_wiki_content(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), self.private_wiki.content)
def test_older_versions_content_can_be_accessed(self):
self._set_up_private_project_with_wiki_page()
# Create a second version
wiki_version = self.private_wiki.wiki_page.update(self.user, 'Second draft of wiki')
wiki_page = wiki_version.wiki_page
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), self.private_wiki.content)
self.private_url_latest = '/{}wikis/{}/versions/{}/content/'.format(API_BASE, wiki_page._id, wiki_version.identifier)
res = self.app.get(self.private_url_latest, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'text/markdown')
assert_equal(res.body.decode(), wiki_version.content)
def test_user_cannot_get_withdrawn_registration_wiki_content(self):
self._set_up_public_registration_with_wiki_page()
withdrawal = self.public_registration.retract_registration(user=self.user, save=True)
token = list(withdrawal.approval_state.values())[0]['approval_token']
withdrawal.approve_retraction(self.user, token)
withdrawal.save()
res = self.app.get(self.public_registration_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
| null |
1,380 |
import pytest
import pytz
import datetime
from addons.wiki.exceptions import NameMaximumLengthError
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from osf_tests.factories import NodeFactory, UserFactory, ProjectFactory
from tests.base import OsfTestCase, fake
pytestmark = pytest.mark.django_db
# from website/addons/wiki/tests/test_wiki.py
class TestWikiPageModel:
@pytest.mark.enable_implicit_clean
def test_page_name_cannot_be_greater_than_100_characters(self):
bad_name = 'a' * 101
page = WikiPage(page_name=bad_name)
with pytest.raises(NameMaximumLengthError):
page.save()
def test_is_current_with_single_version(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
version = page.update(user=user, content='hello')
assert version.is_current is True
def test_is_current_with_multiple_versions(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
ver1 = page.update(user=user, content='draft1')
ver2 = page.update(user=user, content='draft2')
assert ver1.is_current is False
assert ver2.is_current is True
def test_is_current_deleted_page(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
ver1 = page.update(user=user, content='draft1')
page.deleted = datetime.datetime(2017, 1, 1, 1, 00, tzinfo=pytz.utc)
page.save()
assert ver1.is_current is False
class TestWikiPage(OsfTestCase):
def setUp(self):
super(TestWikiPage, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.wiki = WikiFactory(user=self.user, node=self.project)
def test_wiki_factory(self):
wiki = WikiFactory()
assert wiki.page_name == 'home'
assert bool(wiki.user)
assert bool(wiki.node)
def test_wiki_version_factory(self):
version = WikiVersionFactory()
assert version.identifier == 1
assert version.content == 'First draft of wiki'
assert bool(version.user)
assert bool(version.wiki_page)
def test_url(self):
assert self.wiki.url == '{project_url}wiki/home/'.format(project_url=self.project.url)
def test_url_for_wiki_page_name_with_spaces(self):
wiki = WikiFactory(user=self.user, node=self.project, page_name='Test Wiki')
url = '{}wiki/{}/'.format(self.project.url, wiki.page_name)
assert wiki.url == url
def METHOD_NAME(self):
wiki = WikiFactory(user=self.user, node=self.project)
wiki.page_name = 'Wiki!@#$%^&*()+'
wiki.save()
url = '{}wiki/{}/'.format(self.project.url, wiki.page_name)
assert wiki.url == url
# Regression test for an issue on prod:
# https://www.flowdock.com/app/cos/archiver/threads/I09794CXgkkFK22_2kpEQfeIws2
# We can't assume that WikiVersion.identifier follows a contiguous
# sequence. There was a WikiPage that had versions (ordered by creation):
# 1, 2, 3, 4, 5, 6, 7, 8, 2, 3, 4, 5
# This test reproduces that state and makes sure that
# WikiPage.current_version_number, WikiPage.get_version, and WikiVersion.is_current
# behave as expected
def test_current_version_number_with_non_contiguous_version_numbers(self):
wiki = WikiFactory()
for i in range(1, 9):
WikiVersion(wiki_page=wiki, identifier=i, content=fake.sentence()).save()
for i in range(2, 6):
WikiVersion(wiki_page=wiki, identifier=i, content=fake.sentence()).save()
assert wiki.current_version_number == 5
latest_version = wiki.versions.order_by('-created')[0]
assert latest_version.is_current
assert wiki.get_version(5) == latest_version
| null |
1,381 |
# Use this extension for showing layer status with three leds
import pwmio
import time
from kmk.extensions import Extension, InvalidExtensionEnvironment
from kmk.keys import make_key
class statusLED(Extension):
def __init__(
self,
led_pins,
brightness=30,
brightness_step=5,
brightness_limit=100,
):
self._leds = []
for led in led_pins:
try:
self._leds.append(pwmio.PWMOut(led))
except Exception as e:
print(e)
raise InvalidExtensionEnvironment(
'Unable to create pulseio.PWMOut() instance with provided led_pin'
)
self._led_count = len(self._leds)
self.brightness = brightness
self._layer_last = -1
self.brightness_step = brightness_step
self.brightness_limit = brightness_limit
make_key(names=('SLED_INC',), on_press=self._key_led_inc)
make_key(names=('SLED_DEC',), on_press=self._key_led_dec)
def _layer_indicator(self, layer_active, *args, **kwargs):
'''
Indicates layer with leds
For the time being just a simple consecutive single led
indicator. And when there are more layers than leds it
wraps around to the first led again.
(Also works for a single led, which just lights when any
layer is active)
'''
if self._layer_last != layer_active:
led_last = 0 if self._layer_last == 0 else 1 + (self._layer_last - 1) % 3
if layer_active > 0:
led_active = 0 if layer_active == 0 else 1 + (layer_active - 1) % 3
self.set_brightness(self.brightness, led_active)
self.set_brightness(0, led_last)
else:
self.set_brightness(0, led_last)
self._layer_last = layer_active
def __repr__(self):
return f'SLED({self._to_dict()})'
def _to_dict(self):
return {
'_brightness': self.brightness,
'brightness_step': self.brightness_step,
'brightness_limit': self.brightness_limit,
}
def on_runtime_enable(self, sandbox):
return
def on_runtime_disable(self, sandbox):
return
def during_bootup(self, sandbox):
'''Light up every single led once for 200 ms'''
for i in range(self._led_count + 2):
if i < self._led_count:
self._leds[i].duty_cycle = int(self.brightness / 100 * 65535)
i_off = i - 2
if i_off >= 0 and i_off < self._led_count:
self._leds[i_off].duty_cycle = int(0)
time.sleep(0.1)
for led in self._leds:
led.duty_cycle = int(0)
return
def before_matrix_scan(self, sandbox):
return
def METHOD_NAME(self, sandbox):
self._layer_indicator(sandbox.active_layers[0])
return
def before_hid_send(self, sandbox):
return
def after_hid_send(self, sandbox):
return
def on_powersave_enable(self, sandbox):
self.set_brightness(0)
return
def on_powersave_disable(self, sandbox):
self.set_brightness(self._brightness)
self._leds[2].duty_cycle = int(50 / 100 * 65535)
time.sleep(0.2)
self._leds[2].duty_cycle = int(0)
return
def set_brightness(self, percent, layer_id=-1):
if layer_id < 0:
for led in self._leds:
led.duty_cycle = int(percent / 100 * 65535)
else:
self._leds[layer_id - 1].duty_cycle = int(percent / 100 * 65535)
def increase_brightness(self, step=None):
if not step:
self._brightness += self.brightness_step
else:
self._brightness += step
if self._brightness > 100:
self._brightness = 100
self.set_brightness(self._brightness, self._layer_last)
def decrease_brightness(self, step=None):
if not step:
self._brightness -= self.brightness_step
else:
self._brightness -= step
if self._brightness < 0:
self._brightness = 0
self.set_brightness(self._brightness, self._layer_last)
def _key_led_inc(self, *args, **kwargs):
self.increase_brightness()
def _key_led_dec(self, *args, **kwargs):
self.decrease_brightness()
| null |
1,382 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoceanbasepro.endpoint import endpoint_data
import json
class CreateProjectRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'CreateProject','oceanbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SinkEndpointId(self): # String
return self.get_body_params().get('SinkEndpointId')
def set_SinkEndpointId(self, SinkEndpointId): # String
self.add_body_params('SinkEndpointId', SinkEndpointId)
def get_UseOss(self): # Boolean
return self.get_body_params().get('UseOss')
def set_UseOss(self, UseOss): # Boolean
self.add_body_params('UseOss', UseOss)
def get_OssKey(self): # String
return self.get_body_params().get('OssKey')
def set_OssKey(self, OssKey): # String
self.add_body_params('OssKey', OssKey)
def get_SourceEndpointId(self): # String
return self.get_body_params().get('SourceEndpointId')
def set_SourceEndpointId(self, SourceEndpointId): # String
self.add_body_params('SourceEndpointId', SourceEndpointId)
def get_Type(self): # String
return self.get_body_params().get('Type')
def set_Type(self, Type): # String
self.add_body_params('Type', Type)
def get_FullTransferConfig(self): # Struct
return self.get_body_params().get('FullTransferConfig')
def set_FullTransferConfig(self, FullTransferConfig): # Struct
self.add_body_params("FullTransferConfig", json.dumps(FullTransferConfig))
def get_EnableStructTransfer(self): # Boolean
return self.get_body_params().get('EnableStructTransfer')
def set_EnableStructTransfer(self, EnableStructTransfer): # Boolean
self.add_body_params('EnableStructTransfer', EnableStructTransfer)
def get_TransferMapping(self): # Struct
return self.get_body_params().get('TransferMapping')
def set_TransferMapping(self, TransferMapping): # Struct
self.add_body_params("TransferMapping", json.dumps(TransferMapping))
def get_WorkerGradeId(self): # String
return self.get_body_params().get('WorkerGradeId')
def set_WorkerGradeId(self, WorkerGradeId): # String
self.add_body_params('WorkerGradeId', WorkerGradeId)
def get_CommonTransferConfig(self): # Struct
return self.get_body_params().get('CommonTransferConfig')
def set_CommonTransferConfig(self, CommonTransferConfig): # Struct
self.add_body_params("CommonTransferConfig", json.dumps(CommonTransferConfig))
def get_StructTransferConfig(self): # Struct
return self.get_body_params().get('StructTransferConfig')
def set_StructTransferConfig(self, StructTransferConfig): # Struct
self.add_body_params("StructTransferConfig", json.dumps(StructTransferConfig))
def get_EnableIncrTransfer(self): # Boolean
return self.get_body_params().get('EnableIncrTransfer')
def METHOD_NAME(self, EnableIncrTransfer): # Boolean
self.add_body_params('EnableIncrTransfer', EnableIncrTransfer)
def get_EnableFullTransfer(self): # Boolean
return self.get_body_params().get('EnableFullTransfer')
def set_EnableFullTransfer(self, EnableFullTransfer): # Boolean
self.add_body_params('EnableFullTransfer', EnableFullTransfer)
def get_EnableFullVerify(self): # Boolean
return self.get_body_params().get('EnableFullVerify')
def set_EnableFullVerify(self, EnableFullVerify): # Boolean
self.add_body_params('EnableFullVerify', EnableFullVerify)
def get_Name(self): # String
return self.get_body_params().get('Name')
def set_Name(self, Name): # String
self.add_body_params('Name', Name)
def get_LabelIds(self): # Array
return self.get_body_params().get('LabelIds')
def set_LabelIds(self, LabelIds): # Array
self.add_body_params("LabelIds", json.dumps(LabelIds))
def get_IncrTransferConfig(self): # Struct
return self.get_body_params().get('IncrTransferConfig')
def set_IncrTransferConfig(self, IncrTransferConfig): # Struct
self.add_body_params("IncrTransferConfig", json.dumps(IncrTransferConfig))
def get_EnableReverseIncrTransfer(self): # Boolean
return self.get_body_params().get('EnableReverseIncrTransfer')
def set_EnableReverseIncrTransfer(self, EnableReverseIncrTransfer): # Boolean
self.add_body_params('EnableReverseIncrTransfer', EnableReverseIncrTransfer)
| null |
1,383 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class SubmitJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'SubmitJob')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self): # String
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self, StderrRedirectPath): # String
self.add_query_param('StderrRedirectPath', StderrRedirectPath)
def get_RunasUserPassword(self): # String
return self.get_query_params().get('RunasUserPassword')
def set_RunasUserPassword(self, RunasUserPassword): # String
self.add_query_param('RunasUserPassword', RunasUserPassword)
def get_ClockTime(self): # String
return self.get_query_params().get('ClockTime')
def set_ClockTime(self, ClockTime): # String
self.add_query_param('ClockTime', ClockTime)
def get_JobRetryPriority(self): # Integer
return self.get_query_params().get('JobRetry.Priority')
def set_JobRetryPriority(self, JobRetryPriority): # Integer
self.add_query_param('JobRetry.Priority', JobRetryPriority)
def get_CommandLine(self): # String
return self.get_query_params().get('CommandLine')
def set_CommandLine(self, CommandLine): # String
self.add_query_param('CommandLine', CommandLine)
def get_JobQueue(self): # String
return self.get_query_params().get('JobQueue')
def METHOD_NAME(self, JobQueue): # String
self.add_query_param('JobQueue', JobQueue)
def get_ArrayRequest(self): # String
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self, ArrayRequest): # String
self.add_query_param('ArrayRequest', ArrayRequest)
def get_UnzipCmd(self): # String
return self.get_query_params().get('UnzipCmd')
def set_UnzipCmd(self, UnzipCmd): # String
self.add_query_param('UnzipCmd', UnzipCmd)
def get_PackagePath(self): # String
return self.get_query_params().get('PackagePath')
def set_PackagePath(self, PackagePath): # String
self.add_query_param('PackagePath', PackagePath)
def get_Mem(self): # String
return self.get_query_params().get('Mem')
def set_Mem(self, Mem): # String
self.add_query_param('Mem', Mem)
def get_JobRetryCount(self): # Integer
return self.get_query_params().get('JobRetry.Count')
def set_JobRetryCount(self, JobRetryCount): # Integer
self.add_query_param('JobRetry.Count', JobRetryCount)
def get_StdoutRedirectPath(self): # String
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self, StdoutRedirectPath): # String
self.add_query_param('StdoutRedirectPath', StdoutRedirectPath)
def get_Variables(self): # String
return self.get_query_params().get('Variables')
def set_Variables(self, Variables): # String
self.add_query_param('Variables', Variables)
def get_PostCmdLine(self): # String
return self.get_query_params().get('PostCmdLine')
def set_PostCmdLine(self, PostCmdLine): # String
self.add_query_param('PostCmdLine', PostCmdLine)
def get_RunasUser(self): # String
return self.get_query_params().get('RunasUser')
def set_RunasUser(self, RunasUser): # String
self.add_query_param('RunasUser', RunasUser)
def get_Cpu(self): # Integer
return self.get_query_params().get('Cpu')
def set_Cpu(self, Cpu): # Integer
self.add_query_param('Cpu', Cpu)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_ReRunable(self): # Boolean
return self.get_query_params().get('ReRunable')
def set_ReRunable(self, ReRunable): # Boolean
self.add_query_param('ReRunable', ReRunable)
def get_Thread(self): # Integer
return self.get_query_params().get('Thread')
def set_Thread(self, Thread): # Integer
self.add_query_param('Thread', Thread)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_Gpu(self): # Integer
return self.get_query_params().get('Gpu')
def set_Gpu(self, Gpu): # Integer
self.add_query_param('Gpu', Gpu)
def get_JobRetryOnExitCode(self): # Integer
return self.get_query_params().get('JobRetry.OnExitCode')
def set_JobRetryOnExitCode(self, JobRetryOnExitCode): # Integer
self.add_query_param('JobRetry.OnExitCode', JobRetryOnExitCode)
def get_Node(self): # Integer
return self.get_query_params().get('Node')
def set_Node(self, Node): # Integer
self.add_query_param('Node', Node)
def get_Async(self): # Boolean
return self.get_query_params().get('Async')
def set_Async(self, _Async): # Boolean
self.add_query_param('Async', _Async)
def get_Task(self): # Integer
return self.get_query_params().get('Task')
def set_Task(self, Task): # Integer
self.add_query_param('Task', Task)
def get_InputFileUrl(self): # String
return self.get_query_params().get('InputFileUrl')
def set_InputFileUrl(self, InputFileUrl): # String
self.add_query_param('InputFileUrl', InputFileUrl)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
| null |
1,384 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkadcp.endpoint import endpoint_data
import json
class UpdateHubClusterFeatureRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'adcp', '2022-01-01', 'UpdateHubClusterFeature','adcp')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AccessControlList(self): # Array
return self.get_query_params().get('AccessControlList')
def set_AccessControlList(self, AccessControlList): # Array
self.add_query_param("AccessControlList", json.dumps(AccessControlList))
def get_MonitorEnabled(self): # Boolean
return self.get_query_params().get('MonitorEnabled')
def set_MonitorEnabled(self, MonitorEnabled): # Boolean
self.add_query_param('MonitorEnabled', MonitorEnabled)
def get_DeletionProtection(self): # Boolean
return self.get_query_params().get('DeletionProtection')
def set_DeletionProtection(self, DeletionProtection): # Boolean
self.add_query_param('DeletionProtection', DeletionProtection)
def get_EnableMesh(self): # Boolean
return self.get_query_params().get('EnableMesh')
def set_EnableMesh(self, EnableMesh): # Boolean
self.add_query_param('EnableMesh', EnableMesh)
def get_ArgoCDHAEnabled(self): # Boolean
return self.get_query_params().get('ArgoCDHAEnabled')
def set_ArgoCDHAEnabled(self, ArgoCDHAEnabled): # Boolean
self.add_query_param('ArgoCDHAEnabled', ArgoCDHAEnabled)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('ArgoCDEnabled')
def set_ArgoCDEnabled(self, ArgoCDEnabled): # Boolean
self.add_query_param('ArgoCDEnabled', ArgoCDEnabled)
def get_VSwitches(self): # Array
return self.get_query_params().get('VSwitches')
def set_VSwitches(self, VSwitches): # Array
self.add_query_param("VSwitches", json.dumps(VSwitches))
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_PublicAccessEnabled(self): # Boolean
return self.get_query_params().get('PublicAccessEnabled')
def set_PublicAccessEnabled(self, PublicAccessEnabled): # Boolean
self.add_query_param('PublicAccessEnabled', PublicAccessEnabled)
def get_PublicApiServerEnabled(self): # Boolean
return self.get_query_params().get('PublicApiServerEnabled')
def set_PublicApiServerEnabled(self, PublicApiServerEnabled): # Boolean
self.add_query_param('PublicApiServerEnabled', PublicApiServerEnabled)
def get_ArgoServerEnabled(self): # Boolean
return self.get_query_params().get('ArgoServerEnabled')
def set_ArgoServerEnabled(self, ArgoServerEnabled): # Boolean
self.add_query_param('ArgoServerEnabled', ArgoServerEnabled)
def get_WorkflowScheduleMode(self): # String
return self.get_query_params().get('WorkflowScheduleMode')
def set_WorkflowScheduleMode(self, WorkflowScheduleMode): # String
self.add_query_param('WorkflowScheduleMode', WorkflowScheduleMode)
def get_AuditLogEnabled(self): # Boolean
return self.get_query_params().get('AuditLogEnabled')
def set_AuditLogEnabled(self, AuditLogEnabled): # Boolean
self.add_query_param('AuditLogEnabled', AuditLogEnabled)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_PriceLimit(self): # String
return self.get_query_params().get('PriceLimit')
def set_PriceLimit(self, PriceLimit): # String
self.add_query_param('PriceLimit', PriceLimit)
def get_ApiServerEipId(self): # String
return self.get_query_params().get('ApiServerEipId')
def set_ApiServerEipId(self, ApiServerEipId): # String
self.add_query_param('ApiServerEipId', ApiServerEipId)
| null |
1,385 |
from pathlib import Path
from typing import List, Optional
import pytest
from click.testing import CliRunner
from ggshield.__main__ import cli
from ggshield.core.errors import ExitCode
from ggshield.utils.os import cd
from tests.repository import Repository
from tests.unit.conftest import my_vcr
@pytest.mark.parametrize(
"scan_arg, cassette",
[
(None, "test_sca_scan_prepush_output_diff"),
("--all", "test_sca_scan_prepush_output_all"),
],
)
def METHOD_NAME(
tmp_path: Path,
cli_fs_runner: CliRunner,
scan_arg: Optional[List[str]],
cassette: str,
pipfile_lock_with_vuln,
) -> None:
"""
GIVEN a remote repository and a local clone
WHEN executing the prepush command with or without the '--all' option
THEN the scan output format is correct
"""
remote_repo = Repository.create(tmp_path / "remote", bare=True)
local_repo = Repository.clone(remote_repo.path, tmp_path / "local")
sha = local_repo.create_commit()
dep_file = local_repo.path / "Pipfile.lock"
dep_file.write_text(pipfile_lock_with_vuln)
local_repo.add("Pipfile.lock")
local_repo.create_commit()
with cd(str(tmp_path / "local")):
args = ["sca", "scan", "pre-push"]
if scan_arg is not None:
args.append(scan_arg)
with my_vcr.use_cassette(cassette):
result = cli_fs_runner.invoke(
cli,
args,
env={"PRE_COMMIT_FROM_REF": "", "PRE_COMMIT_TO_REF": sha},
)
assert result.exit_code == ExitCode.SCAN_FOUND_PROBLEMS
assert "> Pipfile.lock: 1 incident detected" in result.stdout
assert (
"""
Severity: Medium
Summary: sqlparse contains a regular expression that is vulnerable to Regular Expression Denial of Service
A fix is available at version 0.4.4
Identifier: GHSA-rrm6-wvj7-cwh2
CVE IDs: CVE-2023-30608"""
in result.stdout
)
@pytest.mark.parametrize(
"scan_arg, cassette",
[
(None, "test_sca_scan_prepush_no_sca_changes"),
("--all", "test_sca_scan_prepush_no_sca_changes_all"),
],
)
def test_sca_scan_prepush_no_sca_changes(
tmp_path: Path,
cli_fs_runner: CliRunner,
scan_arg: Optional[List[str]],
cassette: str,
) -> None:
"""
GIVEN a remote repository and a local clone
WHEN executing the prepush command with no SCA file in commits
THEN the scan is performed if and only if '--all' is specified
"""
remote_repo = Repository.create(tmp_path / "remote", bare=True)
local_repo = Repository.clone(remote_repo.path, tmp_path / "local")
file = local_repo.path / "Pipfile"
file.write_text("")
local_repo.add("Pipfile")
local_repo.create_commit()
local_repo.push()
file = local_repo.path / "non_sca.txt"
file.write_text("This should not be detected")
local_repo.add("non_sca.txt")
sha = local_repo.create_commit()
with cd(str(tmp_path / "local")):
args = ["sca", "scan", "pre-push"]
if scan_arg is not None:
args.append(scan_arg)
with my_vcr.use_cassette(cassette):
result = cli_fs_runner.invoke(
cli,
args,
env={"PRE_COMMIT_FROM_REF": "", "PRE_COMMIT_TO_REF": sha},
)
assert result.exit_code == ExitCode.SUCCESS
if scan_arg is None:
assert "No SCA vulnerability has been added" in result.stdout
else:
assert "No SCA vulnerability has been found" in result.stdout
@my_vcr.use_cassette("test_sca_scan_prepush_no_sca_files.yaml")
def test_sca_scan_prepush_no_sca_files(
tmp_path: Path, cli_fs_runner: CliRunner
) -> None:
"""
GIVEN a remote repository and a local clone
WHEN executing the prepush command with --all option and no SCA file in repo
THEN no scan is performed
"""
remote_repo = Repository.create(tmp_path / "remote", bare=True)
local_repo = Repository.clone(remote_repo.path, tmp_path / "local")
file = local_repo.path / "before_hook.txt"
file.write_text("This should not be detected")
local_repo.add("before_hook.txt")
local_repo.create_commit()
local_repo.push()
file = local_repo.path / "non_sca.txt"
file.write_text("This should not be detected")
local_repo.add("non_sca.txt")
sha = local_repo.create_commit()
with cd(str(tmp_path / "local")):
result = cli_fs_runner.invoke(
cli,
["sca", "scan", "pre-push", "--all"],
env={"PRE_COMMIT_FROM_REF": "", "PRE_COMMIT_TO_REF": sha},
)
assert result.exit_code == ExitCode.SUCCESS
assert "No file to scan." in result.stdout
assert "No SCA vulnerability has been found" in result.stdout
| null |
1,386 |
# Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import collections
import logging
import abc
import os.path
import octobot_commons.logging as bot_logging
import octobot_commons.timestamp_util as timestamp_util
class Notifier:
@abc.abstractmethod
def send_notifications(self) -> bool:
raise NotImplementedError("send_notifications is not implemented")
notifiers = {}
def register_notifier(notification_key, notifier):
if notification_key not in notifiers:
notifiers[notification_key] = []
notifiers[notification_key].append(notifier)
GENERAL_NOTIFICATION_KEY = "general_notifications"
BACKTESTING_NOTIFICATION_KEY = "backtesting_notifications"
DATA_COLLECTOR_NOTIFICATION_KEY = "data_collector_notifications"
STRATEGY_OPTIMIZER_NOTIFICATION_KEY = "strategy_optimizer_notifications"
DASHBOARD_NOTIFICATION_KEY = "dashboard_notifications"
# Make WebInterface visible to imports
from tentacles.Services.Interfaces.web_interface.web import WebInterface
# disable server logging
for logger in ('engineio.server', 'socketio.server', 'geventwebsocket.handler'):
logging.getLogger(logger).setLevel(logging.WARNING)
notifications_history = collections.deque(maxlen=1000)
notifications = []
TIME_AXIS_TITLE = "Time"
def dir_last_updated(folder):
return str(max(os.path.getmtime(os.path.join(root_path, f))
for root_path, dirs, files in os.walk(folder)
for f in files))
LAST_UPDATED_STATIC_FILES = 0
def update_registered_plugins(plugins):
global LAST_UPDATED_STATIC_FILES
last_update_time = float(LAST_UPDATED_STATIC_FILES)
for plugin in plugins:
if plugin.static_folder:
last_update_time = max(
last_update_time,
float(dir_last_updated(os.path.join(os.path.dirname(__file__), "static"))),
float(dir_last_updated(plugin.static_folder))
)
LAST_UPDATED_STATIC_FILES = last_update_time
def flush_notifications():
notifications.clear()
def _send_notification(notification_key, **kwargs) -> bool:
if notification_key in notifiers:
return any(notifier.all_clients_send_notifications(**kwargs)
for notifier in notifiers[notification_key])
return False
def send_general_notifications(**kwargs):
if _send_notification(GENERAL_NOTIFICATION_KEY, **kwargs):
flush_notifications()
def send_backtesting_status(**kwargs):
_send_notification(BACKTESTING_NOTIFICATION_KEY, **kwargs)
def send_data_collector_status(**kwargs):
_send_notification(DATA_COLLECTOR_NOTIFICATION_KEY, **kwargs)
def send_strategy_optimizer_status(**kwargs):
_send_notification(STRATEGY_OPTIMIZER_NOTIFICATION_KEY, **kwargs)
def send_new_trade(dict_new_trade, exchange_id, symbol):
_send_notification(DASHBOARD_NOTIFICATION_KEY, exchange_id=exchange_id, trades=[dict_new_trade], symbol=symbol)
def send_order_update(dict_order, exchange_id, symbol):
_send_notification(DASHBOARD_NOTIFICATION_KEY, exchange_id=exchange_id, order=dict_order, symbol=symbol)
async def add_notification(level, title, message, sound=None):
notification = {
"Level": level.value,
"Title": title,
"Message": message.replace("<br>", " "),
"Sound": sound,
"Time": timestamp_util.get_now_time()
}
notifications.append(notification)
notifications_history.append(notification)
send_general_notifications()
def get_notifications():
return notifications
def METHOD_NAME() -> list:
return list(notifications_history)
def get_logs():
return bot_logging.logs_database[bot_logging.LOG_DATABASE]
def get_errors_count():
return bot_logging.logs_database[bot_logging.LOG_NEW_ERRORS_COUNT]
def flush_errors_count():
bot_logging.reset_errors_count()
| null |
1,387 |
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import logging
from tabview import tabview
ViewerBase = tabview.Viewer
viewer_help = """
F1 or ? Show this help
s Sort by current column (ascending)
S Sort by current column (descending)
1 Sort numerically by current column (ascending)
! or 2 Sort numerically by current column (descending)
a Reset sort order
A Reset sort order (reversed)
r Reload file/data (resets sort order)
{{actions}}
Cursor keys or h,j,k,l Move highlighted cell
Q or q Quit
Home, 0, ^, C-a Move to start of the current line
End, $, C-e Move to end of the current line
[num]| Move to column <num> (defaults to first column)
PgUp/PgDn or J/K Move a page up or down
H,L Page left or right
g Move to top of current column
[num]G Move to line <num> (defaults to last line)
Insert or m Memorize current position
Delete or ' Move to last memorized position
Enter View current cell details
/ Specify a search term
n Move to next search result
p Move to previous search result
< > Decrease / increase column width (all columns)
, . Decrease / increase column width (current column)
- + Decrease / increase column gap
` Show logs (used for troubleshooting issues)
[num]c Toggle variable column width mode or set width to [num]
[num]C Maximize current column or set width to [num]
[num][ Skip to [num]th change in row value (backward)
[num]] Skip to [num]th change in row value (forward)
[num]{ Skip to [num]th change in column value (backward)
[num]} Skip to [num]th change in column value (forward)
"""
viewer_help_key_width = 25
class Viewer(ViewerBase):
get_data = None
get_detail = None
actions = None
max_header_width = 12
max_data_width = 20
def __init__(self, *args, **kw):
assert self.get_data is not None
assert self.get_detail is not None
with StatusWin("Reading data"):
data, logs = self._init_data()
self.logs = logs
args = (args[0], data) + args[2:]
kw["column_widths"] = self._column_widths(data)
# pylint: disable=non-parent-init-called
ViewerBase.__init__(self, *args, **kw)
def _init_data(self):
data, logs = self.get_data() # pylint: disable=not-callable
return tabview.process_data(data), logs
def _column_widths(self, data):
if not data:
return None
widths = {}
self._update_column_widths(widths, data[0], self.max_header_width)
for row in data[1:]:
self._update_column_widths(widths, row, self.max_data_width)
return [widths[col] for col in sorted(widths)]
@staticmethod
def _update_column_widths(widths, vals, max_width):
for col, val in zip(range(len(vals)), vals):
widths[col] = min(max(len(val or ""), widths.get(col, 0)), max_width)
def location_string(self, yp, xp):
lstr = ViewerBase.location_string(self, yp, xp)
return lstr.replace("-,", "")
def define_keys(self):
ViewerBase.define_keys(self)
del self.keys["t"]
del self.keys["y"]
del self.keys[tabview.KEY_CTRL('g')]
self.keys["1"] = self.sort_by_column_numeric
self.keys["!"] = self.sort_by_column_numeric_reverse
self.keys["2"] = self.sort_by_column_numeric_reverse
self.keys["`"] = self.show_logs
# pylint: disable=not-an-iterable
self.keys.update(
{
key: self._action_handler(action_cb)
for (key, _), action_cb in self.actions
}
)
def show_logs(self):
self.text_box(self._format_logs(), "Logs")
def text_box(self, contents, title):
tabview.TextBox(self.scr, data=contents, title=title)()
self.resize()
def _format_logs(self):
logs = self.logs or []
fmt = logging.Formatter("%(asctime)s: %(levelname)s %(message)s")
return "\n".join([fmt.format(r) for r in logs])
def _action_handler(self, action_cb):
return lambda: action_cb(self)
def METHOD_NAME(self):
help_text = self._insert_actions_help(viewer_help.strip())
tabview.TextBox(self.scr, help_text, "Key bindings")()
self.resize()
def _insert_actions_help(self, text):
# pylint: disable=not-an-iterable
actions_help = "\n".join(
[
key.ljust(viewer_help_key_width) + help_text
for (key, help_text), _ in self.actions
]
)
if actions_help:
actions_help += "\n"
return text.replace("{{actions}}\n", actions_help)
def sort_by_column_numeric(self):
from operator import itemgetter
xp = self.x + self.win_x
self.data = sorted(
self.data, key=lambda x: self.float_key(itemgetter(xp)(x), float('inf'))
)
def sort_by_column_numeric_reverse(self):
from operator import itemgetter
xp = self.x + self.win_x
self.data = sorted(
self.data,
key=lambda x: self.float_key(itemgetter(xp)(x), float('-inf')),
reverse=True,
)
@staticmethod
def float_key(value, default):
try:
return float(value)
except ValueError:
return default
def show_cell(self):
yp = self.y + self.win_y
xp = self.x + self.win_x
# pylint: disable=not-callable
detail = self.get_detail(self.data, yp, xp)
if not detail:
return
content, title = detail
tabview.TextBox(self.scr, content, title)()
self.resize()
class StatusWin:
def __init__(self, msg):
self.msg = msg
def __enter__(self):
scr = curses.initscr()
scr.clear()
scr_h, scr_w = scr.getmaxyx()
win_h, win_w = 5, 25
win_y = (scr_h - win_h) // 2
win_x = (scr_w - win_w) // 2
win = curses.newwin(win_h, win_w, win_y, win_x)
win.addstr(2, 3, "Refreshing data...")
win.border()
win.refresh()
def __exit__(self, *_):
curses.endwin()
def view_runs(get_data_cb, get_detail_cb, actions):
Viewer.get_data = staticmethod(get_data_cb)
Viewer.get_detail = staticmethod(get_detail_cb)
Viewer.actions = actions
tabview.Viewer = Viewer
tabview.view(
[[]],
column_width="max",
info="Guild run comparison",
)
def _patch_for_tabview():
_patch_os_unsetenv()
_patch_curses_resizeterm()
def _patch_os_unsetenv():
import os
if not hasattr(os, "unsetenv"):
os.unsetenv = lambda _val: None
def _patch_curses_resizeterm():
if not hasattr(curses, "resizeterm"):
assert hasattr(curses, "resize_term")
curses.resizeterm = curses.resize_term
_patch_for_tabview()
| null |
1,388 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class PurchaseStorageCapacityUnitRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'PurchaseStorageCapacityUnit','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def METHOD_NAME(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_Capacity(self): # Integer
return self.get_query_params().get('Capacity')
def set_Capacity(self, Capacity): # Integer
self.add_query_param('Capacity', Capacity)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_Amount(self): # Integer
return self.get_query_params().get('Amount')
def set_Amount(self, Amount): # Integer
self.add_query_param('Amount', Amount)
def get_FromApp(self): # String
return self.get_query_params().get('FromApp')
def set_FromApp(self, FromApp): # String
self.add_query_param('FromApp', FromApp)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
| null |
1,389 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class CreateMultiZoneClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'CreateMultiZoneCluster','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ArchVersion(self):
return self.get_query_params().get('ArchVersion')
def set_ArchVersion(self,ArchVersion):
self.add_query_param('ArchVersion',ArchVersion)
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_EngineVersion(self):
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self,EngineVersion):
self.add_query_param('EngineVersion',EngineVersion)
def get_LogDiskType(self):
return self.get_query_params().get('LogDiskType')
def set_LogDiskType(self,LogDiskType):
self.add_query_param('LogDiskType',LogDiskType)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_PrimaryVSwitchId(self):
return self.get_query_params().get('PrimaryVSwitchId')
def set_PrimaryVSwitchId(self,PrimaryVSwitchId):
self.add_query_param('PrimaryVSwitchId',PrimaryVSwitchId)
def get_LogInstanceType(self):
return self.get_query_params().get('LogInstanceType')
def set_LogInstanceType(self,LogInstanceType):
self.add_query_param('LogInstanceType',LogInstanceType)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_LogNodeCount(self):
return self.get_query_params().get('LogNodeCount')
def set_LogNodeCount(self,LogNodeCount):
self.add_query_param('LogNodeCount',LogNodeCount)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_PeriodUnit(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_CoreDiskType(self):
return self.get_query_params().get('CoreDiskType')
def set_CoreDiskType(self,CoreDiskType):
self.add_query_param('CoreDiskType',CoreDiskType)
def get_ArbiterZoneId(self):
return self.get_query_params().get('ArbiterZoneId')
def set_ArbiterZoneId(self,ArbiterZoneId):
self.add_query_param('ArbiterZoneId',ArbiterZoneId)
def METHOD_NAME(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_MultiZoneCombination(self):
return self.get_query_params().get('MultiZoneCombination')
def set_MultiZoneCombination(self,MultiZoneCombination):
self.add_query_param('MultiZoneCombination',MultiZoneCombination)
def get_PrimaryZoneId(self):
return self.get_query_params().get('PrimaryZoneId')
def set_PrimaryZoneId(self,PrimaryZoneId):
self.add_query_param('PrimaryZoneId',PrimaryZoneId)
def get_Engine(self):
return self.get_query_params().get('Engine')
def set_Engine(self,Engine):
self.add_query_param('Engine',Engine)
def get_StandbyVSwitchId(self):
return self.get_query_params().get('StandbyVSwitchId')
def set_StandbyVSwitchId(self,StandbyVSwitchId):
self.add_query_param('StandbyVSwitchId',StandbyVSwitchId)
def get_StandbyZoneId(self):
return self.get_query_params().get('StandbyZoneId')
def set_StandbyZoneId(self,StandbyZoneId):
self.add_query_param('StandbyZoneId',StandbyZoneId)
def get_MasterInstanceType(self):
return self.get_query_params().get('MasterInstanceType')
def set_MasterInstanceType(self,MasterInstanceType):
self.add_query_param('MasterInstanceType',MasterInstanceType)
def get_CoreNodeCount(self):
return self.get_query_params().get('CoreNodeCount')
def set_CoreNodeCount(self,CoreNodeCount):
self.add_query_param('CoreNodeCount',CoreNodeCount)
def get_LogDiskSize(self):
return self.get_query_params().get('LogDiskSize')
def set_LogDiskSize(self,LogDiskSize):
self.add_query_param('LogDiskSize',LogDiskSize)
def get_CoreInstanceType(self):
return self.get_query_params().get('CoreInstanceType')
def set_CoreInstanceType(self,CoreInstanceType):
self.add_query_param('CoreInstanceType',CoreInstanceType)
def get_CoreDiskSize(self):
return self.get_query_params().get('CoreDiskSize')
def set_CoreDiskSize(self,CoreDiskSize):
self.add_query_param('CoreDiskSize',CoreDiskSize)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType)
def get_ArbiterVSwitchId(self):
return self.get_query_params().get('ArbiterVSwitchId')
def set_ArbiterVSwitchId(self,ArbiterVSwitchId):
self.add_query_param('ArbiterVSwitchId',ArbiterVSwitchId
| null |
1,390 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022-2023 Valory AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""The module contains test_helpers for module tests."""
from pathlib import Path
from types import ModuleType
from typing import cast
from unittest import mock
import pytest
from _pytest.capture import CaptureFixture # type: ignore
from autonomy.cli.helpers import docstring
from autonomy.cli.helpers.docstring import analyse_docstrings, import_rounds_module
import packages
from packages.valory.skills import test_abci
from packages.valory.skills.test_abci import rounds as test_abci_rounds
@pytest.mark.parametrize("module", [test_abci, test_abci_rounds])
@pytest.mark.parametrize("with_package_dir", [True, False])
def test_import_rounds_module(module: ModuleType, with_package_dir: bool) -> None:
"""Test import_rounds_module"""
packages_dir = (
Path(cast(str, packages.__file__)).parent if with_package_dir else None
)
module_path = Path(cast(str, module.__file__))
module = import_rounds_module(module_path, packages_dir)
assert module is test_abci_rounds
def test_import_rounds_module_failure() -> None:
"""Test import_rounds_module"""
packages_module = Path(cast(str, packages.__file__))
module_path = Path(cast(str, test_abci.__file__))
with pytest.raises(ModuleNotFoundError, match="No module named 'packages.rounds'"):
import_rounds_module(packages_module)
with pytest.raises(ModuleNotFoundError, match="No module named 'skills'"):
import_rounds_module(module_path, packages_dir=module_path.parent.parent)
@pytest.mark.parametrize("module", [test_abci, test_abci_rounds])
def METHOD_NAME(module: ModuleType) -> None:
"""Test analyse_docstrings"""
module_path = Path(cast(str, module.__file__))
updated_needed = analyse_docstrings(module_path)
assert not updated_needed
def test_analyse_docstrings_no_abci_app_definition(capsys: CaptureFixture) -> None:
"""Test analyse_docstrings no ABCIApp definition found"""
with mock.patch.object(docstring, "import_rounds_module", return_value=docstring):
module_path = Path(cast(str, test_abci.__file__))
updated_needed = analyse_docstrings(module_path)
stdout = capsys.readouterr().out
expected = f"WARNING: No AbciApp definition found in: {docstring.__file__}"
assert updated_needed
assert expected in stdout
def test_analyse_docstrings_with_update(capsys: CaptureFixture) -> None:
"""Test analyse_docstrings with update"""
module_path = Path(cast(str, test_abci_rounds.__file__))
doc = cast(str, test_abci_rounds.TestAbciApp.__doc__)
content_with_mutated_abci_doc = module_path.read_text().replace(doc, doc + " ")
with mock.patch.object(Path, "write_text") as mock_write_text:
with mock.patch.object(Path, "read_text", return_value=""):
updated_needed = analyse_docstrings(module_path, update=True)
assert updated_needed
out, _ = capsys.readouterr()
expected = (
"does not contain well formatted docstring, please update it manually"
)
assert expected in out
with mock.patch.object(
Path, "read_text", return_value=content_with_mutated_abci_doc
):
updated_needed = analyse_docstrings(module_path, update=True)
assert updated_needed
mock_write_text.assert_called_once()
| null |
1,391 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
from ..Stage import Stage, StageInterface, MoveFuture
from acq4.drivers.ThorlabsMFC1 import MFC1 as MFC1_Driver
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from pyqtgraph import debug
import time
class ChangeNotifier(Qt.QObject):
sigPosChanged = Qt.Signal(object, object, object)
class ThorlabsMFC1(Stage):
"""Thorlabs motorized focus controller (MFC1)
"""
def __init__(self, man, config, name):
self.port = config.pop('port')
self.scale = config.pop('scale', (1, 1, 1))
params = config.pop('motorParams', {})
self.dev = MFC1_Driver(self.port, **params)
man.sigAbortAll.connect(self.dev.stop)
# Optionally use ROE-200 z axis to control focus
roe = config.pop('roe', None)
self._roeDev = None
self._roeEnabled = "waiting" # ROE control is disabled until after the first update
if roe is not None:
dev = man.getDevice(roe)
self._roeDev = dev
# need to connect to internal change signal because
# the public signal should already have z-axis information removed.
dev._notifier.sigPosChanged.connect(self._roeChanged)
self._lastPos = None
Stage.__init__(self, man, config, name)
self.getPosition(refresh=True)
# Optionally read limits from config
limits = list(config.pop('limits', (None, None)))
self.setLimits(z=limits)
self._monitor = MonitorThread(self)
self._monitor.start()
def capabilities(self):
# device only reads/writes z-axis
return {
'getPos': (False, False, True),
'setPos': (False, False, True),
'limits': (False, False, True),
}
def mfcPosChanged(self, pos, oldpos):
self.posChanged(pos)
def _getPosition(self):
pos = self.dev.position() * self.scale[2]
if pos != self._lastPos:
oldpos = self._lastPos
self._lastPos = pos
self.posChanged([0, 0, pos])
return [0, 0, pos]
def _move(self, pos, speed, linear):
pos = self._toAbsolutePosition(pos)
limits = self.getLimits()[2]
if limits[0] is not None:
pos[2] = max(pos[2], limits[0])
if limits[1] is not None:
pos[2] = min(pos[2], limits[1])
return MFC1MoveFuture(self, pos, speed)
def targetPosition(self):
return [0, 0, self.dev.target_position() * self.scale[2]]
def METHOD_NAME(self):
self._monitor.stop()
Stage.METHOD_NAME(self)
def _roeChanged(self, drive, pos, oldpos):
if drive != self._roeDev.drive:
return
if self._roeEnabled is not True:
if self._roeEnabled == 'waiting':
self._roeEnabled = True
return
dz = pos[2] - oldpos[2]
if dz == 0:
return
target = self.dev.target_position() * self.scale[2] + dz
self.moveTo([0, 0, target], 'fast')
def deviceInterface(self, win):
return MFC1StageInterface(self, win)
def setRoeEnabled(self, enable):
self._roeEnabled = enable
def setZero(self):
"""Reset the device position to 0 (without moving the motor).
"""
self.dev.set_encoder(0)
self._getPosition()
def stop(self):
self.dev.stop()
def setHolding(self, hold):
self.dev.set_holding(hold)
class MonitorThread(Thread):
def __init__(self, dev):
self.dev = dev
self.lock = Mutex(recursive=True)
self.stopped = False
self.interval = 0.3
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def setInterval(self, i):
with self.lock:
self.interval = i
def run(self):
minInterval = 100e-3
interval = minInterval
lastPos = None
while True:
try:
with self.lock:
if self.stopped:
break
maxInterval = self.interval
pos = self.dev._getPosition()[2]
if pos != lastPos:
# stage is moving; request more frequent updates
interval = minInterval
else:
interval = min(maxInterval, interval*2)
lastPos = pos
time.sleep(interval)
except:
debug.printExc('Error in MFC1 monitor thread:')
time.sleep(maxInterval)
class MFC1StageInterface(StageInterface):
def __init__(self, dev, win):
StageInterface.__init__(self, dev, win)
if dev._roeDev is not None:
self.connectRoeBtn = Qt.QPushButton('Enable ROE')
self.connectRoeBtn.setCheckable(True)
self.connectRoeBtn.setChecked(True)
self.layout.addWidget(self.connectRoeBtn, self.nextRow, 0, 1, 2)
self.connectRoeBtn.toggled.connect(self.connectRoeToggled)
self.setZeroBtn = Qt.QPushButton('Set Zero')
self.layout.addWidget(self.setZeroBtn, self.nextRow, 2, 1, 1)
self.setZeroBtn.clicked.connect(self.setZeroClicked)
def setZeroClicked(self):
self.dev.setZero()
def connectRoeToggled(self, b):
self.dev.setRoeEnabled(b)
class MFC1MoveFuture(MoveFuture):
"""Provides access to a move-in-progress on an MPC200 drive.
"""
def __init__(self, dev, pos, speed):
MoveFuture.__init__(self, dev, pos, speed)
self.startPos = dev.getPosition()
self.stopPos = pos
self._moveStatus = {'status': None}
self.id = dev.dev.move(pos[2] / dev.scale[2])
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._getStatus()['status'] in ('interrupted', 'failed')
def percentDone(self):
"""Return an estimate of the percent of move completed based on the
device's speed table.
"""
if self.isDone():
return 100
pos = self.dev.getPosition()[2] - self.startPos[2]
target = self.stopPos[2] - self.startPos[2]
if target == 0:
return 99
return 100 * pos / target
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus()['status'] in ('interrupted', 'failed', 'done')
def errorMessage(self):
stat = self._getStatus()
if stat['status'] == 'interrupted':
return "move was interrupted"
elif stat['status'] == 'failed':
return "did not reach the expected position (%s != %s)" % (stat['final_pos'], stat['target'])
else:
return None
def _getStatus(self):
# check status of move unless we already know it is complete.
if self._moveStatus['status'] in (None, 'moving'):
self._moveStatus = self.dev.dev.move_status(self.id)
return self._moveStatus
| null |
1,392 |
# Copyright 2021-2023 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.e
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unified_planning as up
from unified_planning.model.problem import Problem
from unified_planning.model.expression import ConstantExpression
from unified_planning.model.fluent import get_all_fluent_exp
from typing import Dict, Optional, Iterable, Set, List, Union
class ContingentProblem(Problem):
"""This class represent a contingent planning problem."""
def __init__(
self,
name: Optional[str] = None,
environment: Optional["up.environment.Environment"] = None,
*,
initial_defaults: Dict["up.model.types.Type", "ConstantExpression"] = {},
):
Problem.__init__(self, name, environment, initial_defaults=initial_defaults)
self._hidden_fluents: Set["up.model.fnode.FNode"] = set()
self._or_initial_constraints: List[List["up.model.fnode.FNode"]] = []
self._oneof_initial_constraints: List[List["up.model.fnode.FNode"]] = []
def __repr__(self) -> str:
s = []
s.append(super().__repr__())
s.append("initial constraints = [\n")
for c in self._or_initial_constraints:
s.append(f" (or {' '.join([str(f) for f in c])})\n")
for c in self._oneof_initial_constraints:
s.append(f" (oneof {' '.join([str(f) for f in c])})\n")
s.append("]\n\n")
return "".join(s)
def __eq__(self, oth: object) -> bool:
if not isinstance(oth, ContingentProblem):
return False
elif not super().__eq__(oth):
return False
elif self._hidden_fluents != oth._hidden_fluents:
return False
elif set(set(c) for c in self._or_initial_constraints) != set(
set(c) for c in oth._or_initial_constraints
):
return False
elif set(set(c) for c in self._oneof_initial_constraints) != set(
set(c) for c in oth._oneof_initial_constraints
):
return False
else:
return True
def __hash__(self) -> int:
res = super().__hash__()
for c in self._or_initial_constraints:
for f in c:
res += hash(f)
for c in self._oneof_initial_constraints:
for f in c:
res += hash(f)
return res
def clone(self):
new_p = ContingentProblem(self._name, self._env)
new_p._fluents = self._fluents[:]
new_p._actions = [a.clone() for a in self._actions]
new_p._user_types = self._user_types[:]
new_p._user_types_hierarchy = self._user_types_hierarchy.copy()
new_p._objects = self._objects[:]
new_p._initial_value = self._initial_value.copy()
new_p._timed_effects = {
t: [e.clone() for e in el] for t, el in self._timed_effects.items()
}
new_p._timed_goals = {i: [g for g in gl] for i, gl in self._timed_goals.items()}
new_p._goals = self._goals[:]
new_p._metrics = []
for m in self._metrics:
if m.is_minimize_action_costs():
assert isinstance(m, up.model.metrics.MinimizeActionCosts)
costs: Dict["up.model.Action", "up.model.Expression"] = {
new_p.action(a.name): c for a, c in m.costs.items()
}
new_p._metrics.append(up.model.metrics.MinimizeActionCosts(costs))
else:
new_p._metrics.append(m)
new_p._initial_defaults = self._initial_defaults.copy()
new_p._fluents_defaults = self._fluents_defaults.copy()
new_p._hidden_fluents = self._hidden_fluents.copy()
new_p._or_initial_constraints = self._or_initial_constraints.copy()
new_p._oneof_initial_constraints = self._oneof_initial_constraints.copy()
return new_p
def add_oneof_initial_constraint(
self, fluents: Iterable[Union["up.model.fnode.FNode", "up.model.fluent.Fluent"]]
):
"""
Adds a oneof initial constraint on some hidden fluents.
:param fluents: a sequence of fluents expressions, exactly one of them must hold in the initial state.
"""
em = self._env.expression_manager
constraints: List["up.model.fnode.FNode"] = em.auto_promote(fluents)
for f_exp in constraints:
self._hidden_fluents.add(f_exp)
self._oneof_initial_constraints.append(constraints)
def add_or_initial_constraint(
self, fluents: Iterable[Union["up.model.fnode.FNode", "up.model.fluent.Fluent"]]
):
"""
Adds a or initial constraint on some hidden fluents.
:param fluents: a list of fluent expressions, at least one of them must hold in the initial state.
"""
em = self._env.expression_manager
constraints: List["up.model.fnode.FNode"] = em.auto_promote(fluents)
for f_exp in constraints:
self._hidden_fluents.add(f_exp)
self._or_initial_constraints.append(constraints)
def add_unknown_initial_constraint(
self, fluent: Union["up.model.fnode.FNode", "up.model.fluent.Fluent"]
):
"""
Adds an unknown initial constraint on a hidden fluent.
:param fluent: the unknown hidden fluent.
"""
em = self._env.expression_manager
(fluent_exp,) = em.auto_promote(fluent)
self._hidden_fluents.add(fluent_exp)
self._hidden_fluents.add(em.Not(fluent_exp))
c = [em.Not(fluent_exp), fluent_exp]
self._or_initial_constraints.append(c)
@property
def initial_values(self) -> Dict["up.model.fnode.FNode", "up.model.fnode.FNode"]:
"""Gets the initial value of the fluents.
IMPORTANT NOTE: this property does a lot of computation, so it should be called as
seldom as possible."""
res = self._initial_value
for f in self._fluents:
for f_exp in get_all_fluent_exp(self, f):
res[f_exp] = self.initial_value(f_exp)
return res
@property
def kind(self) -> "up.model.problem_kind.ProblemKind":
"""Returns the problem kind of this planning problem.
IMPORTANT NOTE: this property does a lot of computation, so it should be called as
minimum time as possible."""
self._kind = super().kind
self._kind.set_problem_class("CONTINGENT")
return self._kind
@property
def METHOD_NAME(self) -> List[List["up.model.fnode.FNode"]]:
"""Returns the `or` initial constraints on the hidden fluents."""
return self._or_initial_constraints
@property
def oneof_constraints(self) -> List[List["up.model.fnode.FNode"]]:
"""Returns the `oneof` initial constraints on the hidden fluents."""
return self._oneof_initial_constraints
@property
def hidden_fluents(self) -> Set["up.model.fnode.FNode"]:
"""Returns the hidden fluents."""
return self._hidden_fluents
| null |
1,393 |
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
def METHOD_NAME(key):
"""
Decorates functions whose result should be cached. Use it like:
@cached_by_key(key=...)
def func(..., export_settings):
...
The decorated function, func, must always take an "export_settings" arg
(the cache is stored here).
The key argument to the decorator is a function that computes the key to
cache on. It is passed all the arguments to func.
"""
def inner(func):
@functools.wraps(func)
def wrapper_cached(*args, **kwargs):
if kwargs.get("export_settings"):
export_settings = kwargs["export_settings"]
else:
export_settings = args[-1]
cache_key = key(*args, **kwargs)
# invalidate cache if export settings have changed
if not hasattr(func, "__export_settings") or export_settings != func.__export_settings:
func.__cache = {}
func.__export_settings = export_settings
# use or fill cache
if cache_key in func.__cache:
return func.__cache[cache_key]
else:
result = func(*args, **kwargs)
func.__cache[cache_key] = result
return result
return wrapper_cached
return inner
def default_key(*args, **kwargs):
"""
Default cache key for @cached functions.
Cache on all arguments (except export_settings).
"""
assert len(args) >= 2 and 0 <= len(kwargs) <= 1, "Wrong signature for cached function"
cache_key_args = args
# make a shallow copy of the keyword arguments so that 'export_settings' can be removed
cache_key_kwargs = dict(kwargs)
if kwargs.get("export_settings"):
del cache_key_kwargs["export_settings"]
else:
cache_key_args = args[:-1]
cache_key = ()
for i in cache_key_args:
cache_key += (i,)
for i in cache_key_kwargs.values():
cache_key += (i,)
return cache_key
def cached(func):
return METHOD_NAME(key=default_key)(func)
def datacache(func):
def reset_all_cache():
func.__cache = {}
func.reset_cache = reset_all_cache
@functools.wraps(func)
def wrapper_objectcache(*args, **kwargs):
# 0 : path
# 1 : object_uuid
# 2 : bone (can be, of course, None for path other than 'bone')
# 3 : action_name
# 4 : current_frame
# 5 : step
# 6 : export_settings
# only_gather_provided : only_gather_provided
cache_key_args = args
cache_key_args = args[:-1]
if not hasattr(func, "__cache"):
func.reset_cache()
# object is not cached yet
if cache_key_args[1] not in func.__cache.keys():
result = func(*args)
func.__cache = result
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
# object is in cache, but not this action
# We need to keep other actions
elif cache_key_args[3] not in func.__cache[cache_key_args[1]].keys():
result = func(*args, only_gather_provided=True)
# The result can contains multiples animations, in case this is an armature with drivers
# Need to create all newly retrieved animations
func.__cache.update(result)
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
# all is already cached
else:
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
return func.__cache[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
return wrapper_objectcache
# TODO: replace "cached" with "unique" in all cases where the caching is functional and not only for performance reasons
call_or_fetch = cached
unique = cached
def skdriverdiscovercache(func):
def reset_cache_skdriverdiscovercache():
func.__current_armature_uuid = None
func.__skdriverdiscover = {}
func.reset_cache = reset_cache_skdriverdiscovercache
@functools.wraps(func)
def wrapper_skdriverdiscover(*args, **kwargs):
# 0 : armature_uuid
# 1 : export_settings
cache_key_args = args
cache_key_args = args[:-1]
if not hasattr(func, "__current_armature_uuid") or func.__current_armature_uuid is None:
func.reset_cache()
if cache_key_args[0] != func.__current_armature_uuid:
result = func(*args)
func.__skdriverdiscover[cache_key_args[0]] = result
func.__current_armature_uuid = cache_key_args[0]
return result
else:
return func.__skdriverdiscover[cache_key_args[0]]
return wrapper_skdriverdiscover
| null |
1,394 |
from unittest import TestCase
from pcs import settings
from pcs.common import file_type_codes
from pcs.common.file import RawFileError
from pcs.common.reports import codes as report_codes
from pcs.lib.commands import dr
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
REASON = "error msg"
class Config(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
def test_success(self):
(
self.config.raw_file.exists(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
).raw_file.read(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
content="""
{
"local": {
"role": "PRIMARY"
},
"remote_sites": [
{
"nodes": [
{
"name": "recovery-node"
}
],
"role": "RECOVERY"
}
]
}
""",
)
)
self.assertEqual(
dr.get_config(self.env_assist.get_env()),
{
"local_site": {
"node_list": [],
"site_role": "PRIMARY",
},
"remote_site_list": [
{
"node_list": [
{"name": "recovery-node"},
],
"site_role": "RECOVERY",
},
],
},
)
def METHOD_NAME(self):
(
self.config.raw_file.exists(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
exists=False,
)
)
self.env_assist.assert_raise_library_error(
lambda: dr.get_config(self.env_assist.get_env()),
)
self.env_assist.assert_reports(
[
fixture.error(
report_codes.DR_CONFIG_DOES_NOT_EXIST,
),
]
)
def test_config_read_error(self):
(
self.config.raw_file.exists(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
).raw_file.read(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
exception_msg=REASON,
)
)
self.env_assist.assert_raise_library_error(
lambda: dr.get_config(self.env_assist.get_env()),
)
self.env_assist.assert_reports(
[
fixture.error(
report_codes.FILE_IO_ERROR,
file_type_code=file_type_codes.PCS_DR_CONFIG,
file_path=settings.pcsd_dr_config_location,
operation=RawFileError.ACTION_READ,
reason=REASON,
),
]
)
def test_config_parse_error(self):
(
self.config.raw_file.exists(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
).raw_file.read(
file_type_codes.PCS_DR_CONFIG,
settings.pcsd_dr_config_location,
content="bad content",
)
)
self.env_assist.assert_raise_library_error(
lambda: dr.get_config(self.env_assist.get_env()),
)
self.env_assist.assert_reports(
[
fixture.error(
report_codes.PARSE_ERROR_JSON_FILE,
file_type_code=file_type_codes.PCS_DR_CONFIG,
file_path=settings.pcsd_dr_config_location,
line_number=1,
column_number=1,
position=0,
reason="Expecting value",
full_msg="Expecting value: line 1 column 1 (char 0)",
),
]
)
| null |
1,395 |
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import math
from mathutils import Matrix, Vector, Quaternion, Euler
from .gltf2_blender_data_path import get_target_property_name
def list_to_mathutils(values: typing.List[float], data_path: str) -> typing.Union[Vector, Quaternion, Euler]:
"""Transform a list to blender py object."""
target = get_target_property_name(data_path)
if target == 'delta_location':
return Vector(values) # TODO Should be Vector(values) - Vector(something)?
elif target == 'delta_rotation_euler':
return Euler(values).to_quaternion() # TODO Should be Euler(values).to_quaternion() @ something?
elif target == 'location':
return Vector(values)
elif target == 'rotation_axis_angle':
angle = values[0]
axis = values[1:]
return Quaternion(axis, math.radians(angle))
elif target == 'rotation_euler':
return Euler(values).to_quaternion()
elif target == 'rotation_quaternion':
return Quaternion(values)
elif target == 'scale':
return Vector(values)
elif target == 'value':
return Vector(values)
return values
def mathutils_to_gltf(x: typing.Union[Vector, Quaternion]) -> typing.List[float]:
"""Transform a py object to glTF list."""
if isinstance(x, Vector):
return list(x)
if isinstance(x, Quaternion):
# Blender has w-first quaternion notation
return [x[1], x[2], x[3], x[0]]
else:
return list(x)
def to_yup() -> Matrix:
"""Transform to Yup."""
return Matrix(
((1.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, -1.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 1.0))
)
to_zup = to_yup
def swizzle_yup(v: typing.Union[Vector, Quaternion], data_path: str) -> typing.Union[Vector, Quaternion]:
"""Manage Yup."""
target = get_target_property_name(data_path)
swizzle_func = {
"delta_location": swizzle_yup_location,
"delta_rotation_euler": swizzle_yup_rotation,
"location": swizzle_yup_location,
"rotation_axis_angle": swizzle_yup_rotation,
"rotation_euler": swizzle_yup_rotation,
"rotation_quaternion": swizzle_yup_rotation,
"scale": swizzle_yup_scale,
"value": swizzle_yup_value
}.get(target)
if swizzle_func is None:
raise RuntimeError("Cannot transform values at {}".format(data_path))
return swizzle_func(v)
def swizzle_yup_location(loc: Vector) -> Vector:
"""Manage Yup location."""
return Vector((loc[0], loc[2], -loc[1]))
def swizzle_yup_rotation(rot: Quaternion) -> Quaternion:
"""Manage Yup rotation."""
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
def swizzle_yup_scale(scale: Vector) -> Vector:
"""Manage Yup scale."""
return Vector((scale[0], scale[2], scale[1]))
def swizzle_yup_value(value: typing.Any) -> typing.Any:
"""Manage Yup value."""
return value
def METHOD_NAME(v: typing.Union[Vector, Quaternion], data_path: str, METHOD_NAME: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> typing \
.Union[Vector, Quaternion]:
"""Manage transformations."""
target = get_target_property_name(data_path)
transform_func = {
"delta_location": transform_location,
"delta_rotation_euler": transform_rotation,
"location": transform_location,
"rotation_axis_angle": transform_rotation,
"rotation_euler": transform_rotation,
"rotation_quaternion": transform_rotation,
"scale": transform_scale,
"value": transform_value
}.get(target)
if transform_func is None:
raise RuntimeError("Cannot transform values at {}".format(data_path))
return transform_func(v, METHOD_NAME, need_rotation_correction)
def transform_location(location: Vector, METHOD_NAME: Matrix = Matrix.Identity(4), need_rotation_correction:bool = False) -> Vector:
"""Transform location."""
correction = Quaternion((2**0.5/2, -2**0.5/2, 0.0, 0.0))
m = Matrix.Translation(location)
if need_rotation_correction:
m @= correction.to_matrix().to_4x4()
m = METHOD_NAME @ m
return m.to_translation()
def transform_rotation(rotation: Quaternion, METHOD_NAME: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> Quaternion:
"""Transform rotation."""
rotation.normalize()
correction = Quaternion((2**0.5/2, -2**0.5/2, 0.0, 0.0))
m = rotation.to_matrix().to_4x4()
if need_rotation_correction:
m @= correction.to_matrix().to_4x4()
m = METHOD_NAME @ m
return m.to_quaternion()
def transform_scale(scale: Vector, METHOD_NAME: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> Vector:
"""Transform scale."""
m = Matrix.Identity(4)
m[0][0] = scale.x
m[1][1] = scale.y
m[2][2] = scale.z
m = METHOD_NAME @ m
return m.to_scale()
def transform_value(value: Vector, _: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> Vector:
"""Transform value."""
return value
def round_if_near(value: float, target: float) -> float:
"""If value is very close to target, round to target."""
return value if abs(value - target) > 2.0e-6 else target
def scale_rot_swap_matrix(rot):
"""Returns a matrix m st. Scale[s] Rot[rot] = Rot[rot] Scale[m s].
If rot.to_matrix() is a signed permutation matrix, works for any s.
Otherwise works only if s is a uniform scaling.
"""
m = nearby_signed_perm_matrix(rot) # snap to signed perm matrix
m.transpose() # invert permutation
for i in range(3):
for j in range(3):
m[i][j] = abs(m[i][j]) # discard sign
return m
def nearby_signed_perm_matrix(rot):
"""Returns a signed permutation matrix close to rot.to_matrix().
(A signed permutation matrix is like a permutation matrix, except
the non-zero entries can be ±1.)
"""
m = rot.to_matrix()
x, y, z = m[0], m[1], m[2]
# Set the largest entry in the first row to ±1
a, b, c = abs(x[0]), abs(x[1]), abs(x[2])
i = 0 if a >= b and a >= c else 1 if b >= c else 2
x[i] = 1 if x[i] > 0 else -1
x[(i+1) % 3] = 0
x[(i+2) % 3] = 0
# Same for second row: only two columns to consider now.
a, b = abs(y[(i+1) % 3]), abs(y[(i+2) % 3])
j = (i+1) % 3 if a >= b else (i+2) % 3
y[j] = 1 if y[j] > 0 else -1
y[(j+1) % 3] = 0
y[(j+2) % 3] = 0
# Same for third row: only one column left
k = (0 + 1 + 2) - i - j
z[k] = 1 if z[k] > 0 else -1
z[(k+1) % 3] = 0
z[(k+2) % 3] = 0
return m
| null |
1,396 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ListMediaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ListMedia')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MediaName(self): # String
return self.get_query_params().get('MediaName')
def set_MediaName(self, MediaName): # String
self.add_query_param('MediaName', MediaName)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_AccessStatus(self): # String
return self.get_query_params().get('AccessStatus')
def set_AccessStatus(self, AccessStatus): # String
self.add_query_param('AccessStatus', AccessStatus)
def get_FirstScene(self): # String
return self.get_query_params().get('FirstScene')
def set_FirstScene(self, FirstScene): # String
self.add_query_param('FirstScene', FirstScene)
def get_EndCreateTime(self): # Long
return self.get_query_params().get('EndCreateTime')
def set_EndCreateTime(self, EndCreateTime): # Long
self.add_query_param('EndCreateTime', EndCreateTime)
def get_Business(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_Os(self): # String
return self.get_query_params().get('Os')
def set_Os(self, Os): # String
self.add_query_param('Os', Os)
def get_MediaStatus(self): # String
return self.get_query_params().get('MediaStatus')
def set_MediaStatus(self, MediaStatus): # String
self.add_query_param('MediaStatus', MediaStatus)
def METHOD_NAME(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_StartCreateTime(self): # Long
return self.get_query_params().get('StartCreateTime')
def set_StartCreateTime(self, StartCreateTime): # Long
self.add_query_param('StartCreateTime', StartCreateTime)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_SecondScene(self): # String
return self.get_query_params().get('SecondScene')
def set_SecondScene(self, SecondScene): # String
self.add_query_param('SecondScene', SecondScene)
def get_MediaType(self): # String
return self.get_query_params().get('MediaType')
def set_MediaType(self, MediaType): # String
self.add_query_param('MediaType', MediaType)
| null |
1,397 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkros.endpoint import endpoint_data
class CreateChangeSetRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ROS', '2019-09-10', 'CreateChangeSet','ros')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_StackPolicyDuringUpdateBody(self):
return self.get_query_params().get('StackPolicyDuringUpdateBody')
def set_StackPolicyDuringUpdateBody(self,StackPolicyDuringUpdateBody):
self.add_query_param('StackPolicyDuringUpdateBody',StackPolicyDuringUpdateBody)
def get_TemplateVersion(self):
return self.get_query_params().get('TemplateVersion')
def set_TemplateVersion(self,TemplateVersion):
self.add_query_param('TemplateVersion',TemplateVersion)
def get_StackName(self):
return self.get_query_params().get('StackName')
def set_StackName(self,StackName):
self.add_query_param('StackName',StackName)
def get_ChangeSetType(self):
return self.get_query_params().get('ChangeSetType')
def set_ChangeSetType(self,ChangeSetType):
self.add_query_param('ChangeSetType',ChangeSetType)
def get_DisableRollback(self):
return self.get_query_params().get('DisableRollback')
def set_DisableRollback(self,DisableRollback):
self.add_query_param('DisableRollback',DisableRollback)
def get_TemplateId(self):
return self.get_query_params().get('TemplateId')
def set_TemplateId(self,TemplateId):
self.add_query_param('TemplateId',TemplateId)
def get_Parameters(self):
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameterss):
for depth1 in range(len(Parameterss)):
if Parameterss[depth1].get('ParameterValue') is not None:
self.add_query_param('Parameters.' + str(depth1 + 1) + '.ParameterValue', Parameterss[depth1].get('ParameterValue'))
if Parameterss[depth1].get('ParameterKey') is not None:
self.add_query_param('Parameters.' + str(depth1 + 1) + '.ParameterKey', Parameterss[depth1].get('ParameterKey'))
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def METHOD_NAME(self):
return self.get_query_params().get('TemplateBody')
def set_TemplateBody(self,TemplateBody):
self.add_query_param('TemplateBody',TemplateBody)
def get_StackId(self):
return self.get_query_params().get('StackId')
def set_StackId(self,StackId):
self.add_query_param('StackId',StackId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_TemplateURL(self):
return self.get_query_params().get('TemplateURL')
def set_TemplateURL(self,TemplateURL):
self.add_query_param('TemplateURL',TemplateURL)
def get_NotificationURLs(self):
return self.get_query_params().get('NotificationURLs')
def set_NotificationURLs(self, NotificationURLss):
for depth1 in range(len(NotificationURLss)):
if NotificationURLss[depth1] is not None:
self.add_query_param('NotificationURLs.' + str(depth1 + 1) , NotificationURLss[depth1])
def get_ResourcesToImport(self):
return self.get_query_params().get('ResourcesToImport')
def set_ResourcesToImport(self, ResourcesToImports):
for depth1 in range(len(ResourcesToImports)):
if ResourcesToImports[depth1].get('ResourceIdentifier') is not None:
self.add_query_param('ResourcesToImport.' + str(depth1 + 1) + '.ResourceIdentifier', ResourcesToImports[depth1].get('ResourceIdentifier'))
if ResourcesToImports[depth1].get('LogicalResourceId') is not None:
self.add_query_param('ResourcesToImport.' + str(depth1 + 1) + '.LogicalResourceId', ResourcesToImports[depth1].get('LogicalResourceId'))
if ResourcesToImports[depth1].get('ResourceType') is not None:
self.add_query_param('ResourcesToImport.' + str(depth1 + 1) + '.ResourceType', ResourcesToImports[depth1].get('ResourceType'))
def get_StackPolicyBody(self):
return self.get_query_params().get('StackPolicyBody')
def set_StackPolicyBody(self,StackPolicyBody):
self.add_query_param('StackPolicyBody',StackPolicyBody)
def get_StackPolicyDuringUpdateURL(self):
return self.get_query_params().get('StackPolicyDuringUpdateURL')
def set_StackPolicyDuringUpdateURL(self,StackPolicyDuringUpdateURL):
self.add_query_param('StackPolicyDuringUpdateURL',StackPolicyDuringUpdateURL)
def get_RamRoleName(self):
return self.get_query_params().get('RamRoleName')
def set_RamRoleName(self,RamRoleName):
self.add_query_param('RamRoleName',RamRoleName)
def get_UsePreviousParameters(self):
return self.get_query_params().get('UsePreviousParameters')
def set_UsePreviousParameters(self,UsePreviousParameters):
self.add_query_param('UsePreviousParameters',UsePreviousParameters)
def get_ReplacementOption(self):
return self.get_query_params().get('ReplacementOption')
def set_ReplacementOption(self,ReplacementOption):
self.add_query_param('ReplacementOption',ReplacementOption)
def get_StackPolicyURL(self):
return self.get_query_params().get('StackPolicyURL')
def set_StackPolicyURL(self,StackPolicyURL):
self.add_query_param('StackPolicyURL',StackPolicyURL)
def get_ChangeSetName(self):
return self.get_query_params().get('ChangeSetName')
def set_ChangeSetName(self,ChangeSetName):
self.add_query_param('ChangeSetName',ChangeSetName
| null |
1,398 |
import difflib
import math
import re
import string
from collections import Counter
def remove_words(phrase):
# Removes words and punctuation that don't help the diff comparison.
stop_words = (
r"a|an|and|as|at|but|by|en|etc|for|if|in|is|of|on|or|the|to|v\.?|via"
+ r"|vs\.?|united|states?|et|al|appellants?|defendants?|administrator|plaintiffs?|error"
+ r"|others|against|ex|parte|complainants?|original|claimants?|devisee"
+ r"|executrix|executor"
)
stop_words_reg = re.compile(r"^(%s)$" % stop_words, re.IGNORECASE)
# strips punctuation
exclude = set(string.punctuation)
phrase = "".join(ch for ch in phrase if ch not in exclude)
words = re.split("[\t ]", phrase)
result = []
for word in words:
word = stop_words_reg.sub("", word)
result.append(word)
return "".join(result)
def gen_diff_ratio(left, right):
"""
Generates a difference between two strings.
Returns a value between 0 and 1. 0 means the strings are totally different.
1 means they are identical.
This is a case sensitive comparison. If you want case-insensitive, ensure
that you run lower() on your strings before passing them in.
"""
# Remove common strings from all case names /before/ comparison.
# Doing so lowers the opportunity for false positives.
left = remove_words(left)
right = remove_words(right)
# compute the difference value
diff = difflib.SequenceMatcher(None, left.strip(), right.strip()).ratio()
return diff
def METHOD_NAME(items, s, case_sensitive=True):
"""Find the string in the list that is the closest match to the string
:param items: The list to search within
:param s: The string to attempt to match
:param case_sensitive: Whether comparisons should honor case
:return dict with the index of the best matching value, its value, and its
match ratio.
"""
diff_ratios = []
if not case_sensitive:
s = s.lower()
for item in items:
# Calculate its diff_ratio, and add it to an array
if not case_sensitive:
item = item.lower()
diff = gen_diff_ratio(item, s)
diff_ratios.append(diff)
# Find the max ratio, and grab the corresponding result
max_ratio = max(diff_ratios)
i = diff_ratios.index(max_ratio)
return {
"match_index": i,
"match_str": items[i],
"ratio": max_ratio,
}
def find_confidences(results, case_name):
"""Returns all matches above a threshold.
This is nearly identical to find_best_match, but returns any good matches
in an array, and returns their confidence thresholds in a second array.
"""
diff_ratios = []
for result in results:
# Calculate its diff_ratio, and add it to an array
candidate_case_name = result["caseName"]
diff = gen_diff_ratio(candidate_case_name, case_name)
diff_ratios.append(diff)
return diff_ratios
def string_to_vector(text: str) -> Counter:
"""Convert strings to counter dict.
:param text: Text to vectorize
:return: A dictionary of words by count
"""
WORD = re.compile(r"\w+")
words = WORD.findall(text)
return Counter(words)
def get_cosine_similarity(left_str: str, right_str: str) -> float:
"""Calculate the cosine similarity of two strings.
This can be useful in circumstances when the counts of the words in the
strings have more meaning that the order of the characters or the edit
distances of individual words.
Better for long strings with sentence-length differences, where diff_lib's
ratio() can fall down.
"""
left, right = string_to_vector(left_str), string_to_vector(right_str)
intersection = set(left.keys()) & set(right.keys())
numerator = sum([left[x] * right[x] for x in intersection])
sum1 = sum([left[x] ** 2 for x in left.keys()])
sum2 = sum([right[x] ** 2 for x in right.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
| null |
1,399 |
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
AppConf class for HPL reference (netlib) benchmark.
'''
import os
import sys
import math
import textwrap
from apps import apps
def METHOD_NAME(parser):
""" Add common arguments for all run scripts:
--frac-dram
"""
help_text = 'Ratio of the total node DRAM that should be used for the HPL ' + \
'matrix (assuming DP). Value should be between 0 and 1. ' + \
'Default is 0.7. 0.8-0.9 is a better value but might fail due to ' + \
'out-of-memory.'
parser.add_argument('--frac-dram', dest='frac_dram_per_node',
action='store', type=float, default=0.7,
help=help_text)
def create_appconf(mach, args):
return HplNetlibAppConf(args.node_count, mach, args.frac_dram_per_node)
class HplNetlibAppConf(apps.AppConf):
@staticmethod
def name():
return 'hpl_netlib'
def __init__(self, num_nodes, mach, frac_dram_per_node, cores_per_node=None):
'''
num_nodes: Number of MPI ranks (1 node per rank) -- 2, 4, 8 or 16.
frac_dram_per_node: Ratio of the total node DRAM that should be used for the
HPL matrix (assuming DP).
80-90% is a good amount to maximize efficiency.
cores_per_node: Number of Xeon cores that each MPI process can offload to via OMP.
Total number of physical cores will be selected if this is set to None.
'''
dram_for_app = num_nodes * mach.total_node_memory_bytes() * frac_dram_per_node
if cores_per_node is None:
cores_per_node = mach.num_core()
benchmark_dir = os.path.dirname(os.path.abspath(__file__))
self.exec_path = os.path.join(benchmark_dir, 'hpl-2.3/bin/Linux_Intel64/xhpl')
self.NBs = 384 # This is the recommended size for Intel (R) Xeon (R) Scalable family.
process_grid_ratios = {
1: {'P': 1, 'Q': 1},
2: {'P': 1, 'Q': 2},
4: {'P': 2, 'Q': 2},
8: {'P': 2, 'Q': 4},
16: {'P': 4, 'Q': 4}
}
if num_nodes not in process_grid_ratios:
raise RuntimeError("Number of nodes {} is not defined for HPL.".format(num_nodes))
self.P = process_grid_ratios[num_nodes]['P']
self.Q = process_grid_ratios[num_nodes]['Q']
self.N = int(round(math.sqrt(dram_for_app / 8)))
self._cpu_per_rank = cores_per_node
sys.stdout.write('DRAM reserved for APP: {dram_for_app:0.2f}GB\n'.format(dram_for_app=dram_for_app/2**30))
sys.stdout.write('Cores for app: {}\n'.format(cores_per_node))
sys.stdout.write('N={}\n'.format(self.N))
def trial_setup(self, run_id, output_dir):
dat_file_path = os.path.join(output_dir + "/HPL.dat")
if not os.path.isfile(dat_file_path):
dat_file_text = textwrap.dedent('''\
HPLinpack benchmark input file
Innovative Computing Laboratory, University of Tennessee
HPL.out output file name (if any)
6 device out (6=stdout,7=stderr,file)
1 # of problems sizes (N)
{N} Ns
1 # of NBs
{NBs} NBs
0 PMAP process mapping (0=Row-,1=Column-major)
1 # of process grids (P x Q)
{P} Ps
{Q} Qs
16.0 threshold
1 # of panel fact
1 PFACTs (0=left, 1=Crout, 2=Right)1
1 # of recursive stopping criterium
4 NBMINs (>= 1)
1 # of panels in recursion
2 NDIVs
1 # of recursive panel fact.
1 RFACTs (0=left, 1=Crout, 2=Right)
1 # of broadcast
0 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)
1 # of lookahead depth
0 DEPTHs (>=0)
2 SWAP (0=bin-exch,1=long,2=mix)
64 swapping threshold
0 L1 in (0=transposed,1=no-transposed) form
0 U in (0=transposed,1=no-transposed) form
1 Equilibration (0=no,1=yes)
8 memory alignment in double (> 0)
'''.format(N=self.N, NBs=self.NBs, P=self.P, Q=self.Q))
with open(dat_file_path, "w") as dat_file:
dat_file.write(dat_file_text)
def get_rank_per_node(self):
return 1
def get_cpu_per_rank(self):
return self._cpu_per_rank
def get_bash_exec_path(self):
return self.exec_path
def get_bash_exec_args(self):
return ''
def get_custom_geopm_args(self):
# See README.md for an explanation of why
# HPL cannot start in process control mode.
# Also hyperthreading does not benefit HPL and
# it is turned off.
return ['--geopm-ctl=application',
'--geopm-hyperthreads-disable']
def parse_fom(self, log_path):
result = None
key = 'WR00'
with open(log_path) as fid:
for line in fid.readlines():
if key in line:
result = float(line.split(' ')[-1])
break
return result
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.