id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,700 |
block inc
|
from __future__ import annotations
import random
from concurrent.futures import (
FIRST_COMPLETED,
FIRST_EXCEPTION,
Future,
TimeoutError,
as_completed,
wait,
)
from time import sleep
import pytest
from tlz import take
from distributed.event import Event
from distributed.metrics import time
from distributed.utils import CancelledError
from distributed.utils_test import inc, slowadd, slowinc, throws, varying
def number_of_processing_tasks(client):
return sum(len(v) for k, v in client.processing().items())
def test_submit(client):
with client.get_executor() as e:
f1 = e.submit(slowadd, 1, 2)
assert isinstance(f1, Future)
f2 = e.submit(slowadd, 3, y=4)
f3 = e.submit(throws, "foo")
f4 = e.submit(slowadd, x=5, y=6)
assert f1.result() == 3
assert f2.result() == 7
with pytest.raises(RuntimeError):
f3.result()
assert f4.result() == 11
def test_as_completed(client):
with client.get_executor() as e:
N = 10
fs = [e.submit(slowinc, i, delay=0.02) for i in range(N)]
expected = set(range(1, N + 1))
for f in as_completed(fs):
res = f.result()
assert res in expected
expected.remove(res)
assert not expected
def test_wait(client):
def METHOD_NAME(x, ev):
ev.wait()
return x + 1
with client.get_executor(pure=False) as e:
ev = Event()
N = 10
fs = [e.submit(METHOD_NAME, i, ev, pure=False) for i in range(N)]
res = wait(fs, timeout=0.01)
assert len(res.not_done) > 0
ev.set()
res = wait(fs)
assert len(res.not_done) == 0
assert res.done == set(fs)
ev.clear()
nthreads = sum(client.nthreads().values())
fs = [e.submit(METHOD_NAME, i, ev, pure=False) for i in range(nthreads - 1)]
fs.append(e.submit(inc, 0))
fs.extend([e.submit(METHOD_NAME, i, ev, pure=False) for i in range(nthreads, N)])
res = wait(fs, return_when=FIRST_COMPLETED)
assert len(res.not_done) > 0
assert len(res.done) >= 1
ev.set()
res = wait(fs)
assert len(res.not_done) == 0
assert res.done == set(fs)
ev.clear()
fs = [e.submit(inc, i) for i in range(N)]
fs += [e.submit(throws, None)]
fs += [e.submit(METHOD_NAME, i, ev, pure=False) for i in range(N)]
res = wait(fs, return_when=FIRST_EXCEPTION)
assert any(f.exception() for f in res.done)
assert res.not_done
errors = []
for fs in res.done:
try:
fs.result()
except RuntimeError as e:
errors.append(e)
assert len(errors) == 1
assert "hello" in str(errors[0])
ev.set()
def test_cancellation(client):
with client.get_executor(pure=False) as e:
fut = e.submit(sleep, 2.0)
start = time()
while number_of_processing_tasks(client) == 0:
assert time() < start + 30
sleep(0.01)
assert not fut.done()
fut.cancel()
assert fut.cancelled()
start = time()
while number_of_processing_tasks(client) != 0:
assert time() < start + 30
sleep(0.01)
with pytest.raises(CancelledError):
fut.result()
def test_cancellation_wait(client):
with client.get_executor(pure=False) as e:
fs = [e.submit(slowinc, i, delay=0.2) for i in range(10)]
fs[3].cancel()
res = wait(fs, return_when=FIRST_COMPLETED, timeout=30)
assert len(res.not_done) > 0
assert len(res.done) >= 1
assert fs[3] in res.done
assert fs[3].cancelled()
def test_cancellation_as_completed(client):
with client.get_executor(pure=False) as e:
fs = [e.submit(slowinc, i, delay=0.2) for i in range(10)]
fs[3].cancel()
fs[8].cancel()
n_cancelled = sum(f.cancelled() for f in as_completed(fs, timeout=30))
assert n_cancelled == 2
@pytest.mark.slow()
def test_map(client):
with client.get_executor() as e:
N = 10
it = e.map(inc, range(N))
expected = set(range(1, N + 1))
for x in it:
expected.remove(x)
assert not expected
with client.get_executor(pure=False) as e:
N = 10
it = e.map(slowinc, range(N), [0.3] * N, timeout=1.2)
results = []
with pytest.raises(TimeoutError):
for x in it:
results.append(x)
assert 2 <= len(results) < 7
with client.get_executor(pure=False) as e:
N = 10
# Not consuming the iterator will cancel remaining tasks
it = e.map(slowinc, range(N), [0.3] * N)
for _ in take(2, it):
pass
# Some tasks still processing
assert number_of_processing_tasks(client) > 0
# Garbage collect the iterator => remaining tasks are cancelled
del it
sleep(0.5)
assert number_of_processing_tasks(client) == 0
def get_random():
return random.random()
def test_pure(client):
N = 10
with client.get_executor() as e:
fs = [e.submit(get_random) for i in range(N)]
res = [fut.result() for fut in as_completed(fs)]
assert len(set(res)) < len(res)
with client.get_executor(pure=False) as e:
fs = [e.submit(get_random) for i in range(N)]
res = [fut.result() for fut in as_completed(fs)]
assert len(set(res)) == len(res)
def test_workers(client, s, a, b):
N = 10
with client.get_executor(workers=[b["address"]]) as e:
fs = [e.submit(slowinc, i) for i in range(N)]
wait(fs)
has_what = client.has_what()
assert not has_what.get(a["address"])
assert len(has_what[b["address"]]) == N
def test_unsupported_arguments(client, s, a, b):
with pytest.raises(TypeError) as excinfo:
client.get_executor(workers=[b["address"]], foo=1, bar=2)
assert "unsupported arguments to ClientExecutor: ['bar', 'foo']" in str(
excinfo.value
)
def test_retries(client):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 42]
with client.get_executor(retries=5, pure=False) as e:
future = e.submit(varying(args))
assert future.result() == 42
with client.get_executor(retries=4) as e:
future = e.submit(varying(args))
result = future.result()
assert result == 42
with client.get_executor(retries=2) as e:
future = e.submit(varying(args))
with pytest.raises(ZeroDivisionError, match="two"):
res = future.result()
with client.get_executor(retries=0) as e:
future = e.submit(varying(args))
with pytest.raises(ZeroDivisionError, match="one"):
res = future.result()
def test_shutdown_wait(client):
# shutdown(wait=True) waits for pending tasks to finish
e = client.get_executor()
start = time()
fut = e.submit(sleep, 1.0)
e.shutdown()
assert time() >= start + 1.0
sleep(0.1) # wait for future outcome to propagate
assert fut.done()
fut.result() # doesn't raise
with pytest.raises(RuntimeError):
e.submit(sleep, 1.0)
def test_shutdown_nowait(client):
# shutdown(wait=False) cancels pending tasks
e = client.get_executor()
start = time()
fut = e.submit(sleep, 5.0)
e.shutdown(wait=False)
assert time() < start + 2.0
sleep(0.1) # wait for future outcome to propagate
assert fut.cancelled()
with pytest.raises(RuntimeError):
e.submit(sleep, 1.0)
|
2,701 |
slice by element id
|
from io import StringIO
from numpy import zeros, searchsorted, where, asarray, array
from pyNastran.dev.bdf_vectorized.cards.elements.element import Element
class SolidElement(Element):
def __init__(self, model):
Element.__init__(self, model)
def __getitem__(self, i):
return self.slice_by_index(i)
def METHOD_NAME(self, element_ids):
"""
Allows for slicing:
- elements[1:10]
- elements[4]
- elements[1:10:2]
- elements[[1,2,5]]
- elements[array([1,2,5])]
"""
try:
i = searchsorted(self.element_id, element_ids)
elements = self.slice_by_index(i)
except TypeError:
msg = 'Error with Solid slice; did you mean to use slice_by_index?\n'
msg += ' self.element_id=%s\n' % self.element_id
msg += ' element_ids=%s\n' % element_ids
raise TypeError(msg)
return elements
def __repr__(self):
f = StringIO()
f.write('<%s object> n=%s\n' % (self.type, self.n))
self.write_card(f)
return f.getvalue()
def get_property_id_by_element_index(self, i=None):
#i = self._get_sorted_index(self.element_id, element_id,
# 'element_id', 'element_id in %s' % self.type, check=True)
return self.property_id[i]
def get_property_id_by_element_id(self, element_id=None):
i = self._get_sorted_index(self.element_id, element_id,
'element_id', 'element_id in %s' % self.type, check=True)
return self.get_property_id_by_element_index(i)
def get_material_id_by_element_id(self, element_id=None):
return self.model.elements.elements_solid.get_material_id_by_element_id(element_id)
def _get_node_locations_by_element_id(self, element_id=None, xyz_cid0=None):
i = self._get_sorted_index(self.element_id, element_id,
'element_id', 'element_id in %s' % self.type, check=True)
self.model.log.debug('ielem = %s' % i)
if xyz_cid0 is None:
xyz_cid0 = self.model.grid.get_position_by_node_index()
return self._get_node_locations_by_index(i, xyz_cid0)
def allocate(self, ncards):
#ncards = card_count[self.type]
#self.model.log.debug('%s.allocate(%s)' % (self.type, ncards))
if ncards:
self.n = ncards
#float_fmt = self.model.float_fmt
self.element_id = zeros(ncards, 'int32')
self.property_id = zeros(ncards, 'int32')
self.node_ids = zeros((ncards, self.nnodes), 'int32')
#self._comments.append(comment)
def build(self):
#self.model.log.debug('self.n = %i' % self.n)
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self._cards = []
self._comments = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def get_mass_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the mass for one or more SolidElement elements.
Parameters
----------
element_id : (N, ) int ndarray; (default=None -> all)
the elements to consider
xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto)
the positions of the GRIDs in CID=0
total : bool; default=False
should the centroid be summed
"""
if element_id is None:
element_id = self.element_id
V = self.get_volume_by_eLement_id(element_id, xyz_cid0)
mid = self.model.properties_solid.get_material_id_by_property_id(self.property_id)
rho = self.model.materials.get_density_by_material_id(mid)
rho = self.model.properties_solid.psolid.get_density_by_property_id(self.property_id)
#rho = self.model.materials.get_density_by_material_id(mid)
try:
mass = V * rho
except ValueError:
msg = 'element_id = %s; n=%s\n' % (element_id, len(element_id))
msg += 'mid=%s\n' % mid
msg += 'rho=%s\n' % rho
msg += 'V.shape = %s\n' % str(V.shape)
msg += 'rho.shape = %s' % str(rho.shape)
print(msg)
raise
n = len(element_id)
assert mass.shape == (n, ), mass.shape
if total:
mass = mass.sum()
return mass
def get_mass_centroid_inertia_by_eLement_id(self, p=None, element_id=None, xyz_cid0=None, total=False):
"""
Calculates the mass, centroid, and (3, 3) moment of interia
matrix. Considers position, but not the (hopefully) small
elemental term.
:param p: the point to take the moment of inertia about (default=None -> origin)
a = integral(mu * (y^2 + z^2), dV)
b = integral(mu * (x^2 + z^2), dV)
c = integral(mu * (y^2 + y^2), dV)
a' = integral(mu * (yz), dV)
b' = integral(mu * (xz), dV)
c' = integral(mu * (xy), dV)
I = [ a -b', -c']
[-b' b -a']
[-c' -a' c ]
Exact MOI for tetrahedron
http://www.thescipub.com/abstract/?doi=jmssp.2005.8.11
"""
if p is None:
p = zeros(3, self.model.float_fmt)
r = centroid - p # 2D array - 1D array
I = mass * r**2 # column vector * 2D array
return mass, centroid, I
def get_density_by_element_id(self, element_id=None):
if element_id is None:
element_id = self.element_id
n = len(element_id)
rho = zeros(n, dtype='float64')
i = where(element_id == self.element_id)[0]
for pid in self.property_id[i]:
j = where(pid == self.property_id[i])[0]
rhoi = self.model.properties_solid.psolid.get_density_by_property_id(pid)
rho[j] = rhoi
assert rho.shape == (n, ), rho.shape
return rho
def slice_by_index(self, i):
i = asarray(i)
#name = self.__class__.__name__
#obj_class = type(name, (SolidElement, ), {})
obj_class = self.__class__#.__class__
obj = obj_class(self.model)
try:
n = len(i)
except TypeError:
msg = 'i=%s; self.n=%s' % (i, self.n)
raise TypeError(msg)
obj.n = n
obj.i = n
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.element_id = self.element_id[i]
obj.property_id = self.property_id[i]
obj.node_ids = self.node_ids[i, :]
return obj
|
2,702 |
test check strategy might suggest sampled from
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import functools
import pytest
from hypothesis import find, given
from hypothesis.errors import InvalidArgument
from hypothesis.internal.validation import check_type
from hypothesis.strategies import (
SearchStrategy as ActualSearchStrategy,
binary,
booleans,
data,
dictionaries,
floats,
frozensets,
integers,
lists,
nothing,
recursive,
sets,
text,
)
from hypothesis.strategies._internal.strategies import check_strategy
from tests.common.utils import fails_with
def test_errors_when_given_varargs():
@given(integers())
def has_varargs(*args):
pass
with pytest.raises(InvalidArgument) as e:
has_varargs()
assert "varargs" in e.value.args[0]
def test_varargs_without_positional_arguments_allowed():
@given(somearg=integers())
def has_varargs(somearg, *args):
pass
def test_errors_when_given_varargs_and_kwargs_with_positional_arguments():
@given(integers())
def has_varargs(*args, **kw):
pass
with pytest.raises(InvalidArgument) as e:
has_varargs()
assert "varargs" in e.value.args[0]
def test_varargs_and_kwargs_without_positional_arguments_allowed():
@given(somearg=integers())
def has_varargs(*args, **kw):
pass
def test_bare_given_errors():
@given()
def test():
pass
with pytest.raises(InvalidArgument):
test()
def test_errors_on_unwanted_kwargs():
@given(hello=int, world=int)
def greet(world):
pass
with pytest.raises(InvalidArgument):
greet()
def test_errors_on_too_many_positional_args():
@given(integers(), int, int)
def foo(x, y):
pass
with pytest.raises(InvalidArgument):
foo()
def test_errors_on_any_varargs():
@given(integers())
def oops(*args):
pass
with pytest.raises(InvalidArgument):
oops()
def test_can_put_arguments_in_the_middle():
@given(y=integers())
def foo(x, y, z):
pass
foo(1, 2)
def test_float_ranges():
with pytest.raises(InvalidArgument):
floats(float("nan"), 0).example()
with pytest.raises(InvalidArgument):
floats(1, -1).example()
def test_float_range_and_allow_nan_cannot_both_be_enabled():
with pytest.raises(InvalidArgument):
floats(min_value=1, allow_nan=True).example()
with pytest.raises(InvalidArgument):
floats(max_value=1, allow_nan=True).example()
def test_float_finite_range_and_allow_infinity_cannot_both_be_enabled():
with pytest.raises(InvalidArgument):
floats(0, 1, allow_infinity=True).example()
def test_does_not_error_if_min_size_is_bigger_than_default_size():
lists(integers(), min_size=50).example()
sets(integers(), min_size=50).example()
frozensets(integers(), min_size=50).example()
lists(integers(), min_size=50, unique=True).example()
def test_list_unique_and_unique_by_cannot_both_be_enabled():
@given(lists(integers(), unique=True, unique_by=lambda x: x))
def boom(t):
pass
with pytest.raises(InvalidArgument) as e:
boom()
assert "unique " in e.value.args[0]
assert "unique_by" in e.value.args[0]
def test_min_before_max():
with pytest.raises(InvalidArgument):
integers(min_value=1, max_value=0).validate()
def test_filter_validates():
with pytest.raises(InvalidArgument):
integers(min_value=1, max_value=0).filter(bool).validate()
def test_recursion_validates_base_case():
with pytest.raises(InvalidArgument):
recursive(integers(min_value=1, max_value=0), lists).validate()
def test_recursion_validates_recursive_step():
with pytest.raises(InvalidArgument):
recursive(integers(), lambda x: lists(x, min_size=3, max_size=1)).validate()
@fails_with(InvalidArgument)
@given(x=integers())
def test_stuff_keyword(x=1):
pass
@fails_with(InvalidArgument)
@given(integers())
def test_stuff_positional(x=1):
pass
@fails_with(InvalidArgument)
@given(integers(), integers())
def test_too_many_positional(x):
pass
def test_given_warns_on_use_of_non_strategies():
@given(bool)
def test(x):
pass
with pytest.raises(InvalidArgument):
test()
def test_given_warns_when_mixing_positional_with_keyword():
@given(booleans(), y=booleans())
def test(x, y):
pass
with pytest.raises(InvalidArgument):
test()
def test_cannot_find_non_strategies():
with pytest.raises(InvalidArgument):
find(bool, bool)
@pytest.mark.parametrize(
"strategy",
[
functools.partial(lists, elements=integers()),
functools.partial(dictionaries, keys=integers(), values=integers()),
text,
binary,
],
)
@pytest.mark.parametrize("min_size,max_size", [(0, "10"), ("0", 10)])
def test_valid_sizes(strategy, min_size, max_size):
@given(strategy(min_size=min_size, max_size=max_size))
def test(x):
pass
with pytest.raises(InvalidArgument):
test()
def test_check_type_with_tuple_of_length_two():
def type_checker(x):
check_type((int, str), x, "x")
type_checker(1)
type_checker("1")
with pytest.raises(InvalidArgument, match="Expected one of int, str but got "):
type_checker(1.0)
def test_validation_happens_on_draw():
@given(data())
def test(data):
data.draw(integers().flatmap(lambda _: lists(nothing(), min_size=1)))
with pytest.raises(InvalidArgument, match="has no values"):
test()
class SearchStrategy:
"""Not the SearchStrategy type you were looking for."""
def check_type_(*args):
return check_type(*args)
def test_check_type_suggests_check_strategy():
check_type_(SearchStrategy, SearchStrategy(), "this is OK")
with pytest.raises(AssertionError, match="use check_strategy instead"):
check_type_(ActualSearchStrategy, None, "SearchStrategy assertion")
def check_strategy_(*args):
return check_strategy(*args)
def METHOD_NAME():
with pytest.raises(InvalidArgument) as excinfo:
check_strategy_("not a strategy")
assert "sampled_from" not in str(excinfo.value)
with pytest.raises(InvalidArgument, match="such as st.sampled_from"):
check_strategy_([1, 2, 3])
with pytest.raises(InvalidArgument, match="such as st.sampled_from"):
check_strategy_((1, 2, 3))
check_strategy_(integers(), "passes for our custom coverage check")
|
2,703 |
build list request
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import CognitiveServicesManagementClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(
location: str, subscription_id: str, *, filter: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/locations/{location}/usages",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cognitiveservices.CognitiveServicesManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, location: str, filter: Optional[str] = None, **kwargs: Any) -> Iterable["_models.Usage"]:
"""Get usages for the requested subscription.
:param location: Resource location. Required.
:type location: str
:param filter: An OData filter expression that describes a subset of usages to return. The
supported parameter is name.value (name of the metric, can have an or of multiple names).
Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cognitiveservices.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.UsageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
location=location,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/locations/{location}/usages"
}
|
2,704 |
keys
|
# MONKEY PATCH!!!
import json
import os
from unittest import mock
import mockredis
import redis
import swsssdk
from sonic_py_common import multi_asic
from swsssdk import SonicDBConfig, SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector
from swsscommon import swsscommon
topo = None
dedicated_dbs = {}
def clean_up_config():
# Set SonicDBConfig variables to initial state
# so that it can be loaded with single or multiple
# namespaces before the test begins.
SonicDBConfig._sonic_db_config = {}
SonicDBConfig._sonic_db_global_config_init = False
SonicDBConfig._sonic_db_config_init = False
def load_namespace_config():
# To support multi asic testing
# SonicDBConfig load_sonic_global_db_config
# is invoked to load multiple namespaces
clean_up_config()
SonicDBConfig.load_sonic_global_db_config(
global_db_file_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'database_global.json'))
def load_database_config():
# Load local database_config.json for single namespace test scenario
clean_up_config()
SonicDBConfig.load_sonic_db_config(
sonic_db_file_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'database_config.json'))
_old_connect_SonicV2Connector = SonicV2Connector.connect
def connect_SonicV2Connector(self, db_name, retry_on=True):
# add topo to kwargs for testing different topology
self.dbintf.redis_kwargs['topo'] = topo
# add the namespace to kwargs for testing multi asic
self.dbintf.redis_kwargs['namespace'] = self.namespace
# Mock DB filename for unit-test
global dedicated_dbs
if dedicated_dbs and dedicated_dbs.get(db_name):
self.dbintf.redis_kwargs['db_name'] = dedicated_dbs[db_name]
else:
self.dbintf.redis_kwargs['db_name'] = db_name
self.dbintf.redis_kwargs['decode_responses'] = True
_old_connect_SonicV2Connector(self, db_name, retry_on)
def _subscribe_keyspace_notification(self, db_name, client):
pass
def config_set(self, *args):
pass
class MockPubSub:
def get_message(self):
return None
def psubscribe(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def listen(self):
return []
def punsubscribe(self, *args, **kwargs):
pass
def clear(self):
pass
INPUT_DIR = os.path.dirname(os.path.abspath(__file__))
class SwssSyncClient(mockredis.MockRedis):
def __init__(self, *args, **kwargs):
super(SwssSyncClient, self).__init__(strict=True, *args, **kwargs)
# Namespace is added in kwargs specifically for unit-test
# to identify the file path to load the db json files.
topo = kwargs.pop('topo')
namespace = kwargs.pop('namespace')
db_name = kwargs.pop('db_name')
self.decode_responses = kwargs.pop('decode_responses', False) == True
fname = db_name.lower() + ".json"
self.pubsub = MockPubSub()
if namespace is not None and namespace is not multi_asic.DEFAULT_NAMESPACE:
fname = os.path.join(INPUT_DIR, namespace, fname)
elif topo is not None:
fname = os.path.join(INPUT_DIR, topo, fname)
else:
fname = os.path.join(INPUT_DIR, fname)
if os.path.exists(fname):
with open(fname) as f:
js = json.load(f)
for k, v in js.items():
if 'expireat' in v and 'ttl' in v and 'type' in v and 'value' in v:
# database is in redis-dump format
if v['type'] == 'hash':
# ignore other types for now since sonic has hset keys only in the db
for attr, value in v['value'].items():
self.hset(k, attr, value)
else:
for attr, value in v.items():
self.hset(k, attr, value)
# Patch mockredis/mockredis/client.py
# The offical implementation assume decode_responses=False
# Here we detect the option and decode after doing encode
def _encode(self, value):
"Return a bytestring representation of the value. Taken from redis-py connection.py"
value = super(SwssSyncClient, self)._encode(value)
if self.decode_responses:
return value.decode('utf-8')
# Patch mockredis/mockredis/client.py
# The official implementation will filter out keys with a slash '/'
# ref: https://github.com/locationlabs/mockredis/blob/master/mockredis/client.py
def METHOD_NAME(self, pattern='*'):
"""Emulate keys."""
import fnmatch
import re
# Make regex out of glob styled pattern.
regex = fnmatch.translate(pattern)
regex = re.compile(regex)
# Find every key that matches the pattern
return [key for key in self.redis if regex.match(key)]
swsssdk.interface.DBInterface._subscribe_keyspace_notification = _subscribe_keyspace_notification
mockredis.MockRedis.config_set = config_set
redis.StrictRedis = SwssSyncClient
SonicV2Connector.connect = connect_SonicV2Connector
swsscommon.SonicV2Connector = SonicV2Connector
swsscommon.ConfigDBConnector = ConfigDBConnector
swsscommon.ConfigDBPipeConnector = ConfigDBPipeConnector
|
2,705 |
test var
|
# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <[email protected]>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor import compile_extern
from jittor.test.test_core import expect_error
class TestArray(unittest.TestCase):
def test_data(self):
a = jt.array([1,2,3])
assert (a.data == [1,2,3]).all()
d = a.data
a.data[1] = -2
assert (a.data == [1,-2,3]).all()
assert (a.fetch_sync()==[1,-2,3]).all()
li = jt.liveness_info()
del a
assert li == jt.liveness_info()
del d
assert li != jt.liveness_info()
def test_set_data(self):
a = jt.array([1,2,3])
assert (a.fetch_sync()==[1,2,3]).all()
a.data = [4,5,6]
assert (a.fetch_sync()==[4,5,6]).all()
a.data = jt.array([7,8,9])
assert (a.fetch_sync()==[7,8,9]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
@jt.flag_scope(use_cuda=1)
def test_memcopy_overlap(self):
import time
from jittor.models import resnet
im=np.random.rand(100,3,224,224).astype(np.float32)
net = resnet.Resnet34()
net.eval()
# warm up
x = jt.array(im).stop_grad()
for i in range(10):
a = net(x)
a.sync()
jt.sync(device_sync=True)
# pure compute
time_start=time.time()
x = jt.array(im).stop_grad()
for i in range(10):
a = net(x)
a.sync()
jt.sync(device_sync=True)
t1 = time.time() - time_start
# warm up
for i in range(3):
x = jt.array(im)
b = net(x)
b.fetch(lambda b: None)
b.sync()
jt.sync(device_sync=True)
# overlap
time_start=time.time()
results = []
for i in range(10):
x = jt.array(im)
b = net(x)
b.fetch(lambda b: results.append(b))
b.sync()
# del c
jt.sync(device_sync=True)
t2 = time.time() - time_start
assert t2-t1 < 0.010, (t2, t1, t2-t1)
assert np.allclose(a.data, b.data)
assert len(results) == 10
for v in results:
assert np.allclose(a.data, v), (v.shape, a.data.shape)
jt.LOG.v(f"pure compute: {t1}, overlap: {t2}")
def test_segfault(self):
a = jt.array([1.0,2.0,3.0])
b = (jt.maximum(a, 0)).sum() * 2.0
da = jt.grad(b, a)
jt.sync_all()
assert (a.data==[1,2,3]).all()
assert (da.data==[2,2,2]).all()
def test_segfault2(self):
assert (jt.array([1,2,3]).reshape((1,3)).data==[1,2,3]).all()
if jt.has_cuda:
with jt.flag_scope(use_cuda=1):
assert (jt.array([1,2,3]).reshape((1,3)).data==[1,2,3]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def test_array_dual(self):
with jt.flag_scope(use_cuda=1):
a = jt.array(np.float32([1,2,3]))
assert (a.data==[1,2,3]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def test_array_migrate(self):
with jt.flag_scope(use_cuda=1):
a = jt.array(np.float32([1,2,3]))
b = jt.code(a.shape, a.dtype, [a], cpu_src="""
for (int i=0; i<in0_shape0; i++)
@out(i) = @in0(i)*@in0(i)*2;
""")
assert (b.data==[2,8,18]).all()
def test_not_c_style(self):
a = np.array([1,2,3])
b = a[::-1]
x = jt.array(b)
x = x + b
assert (x.data == [6,4,2]).all()
def test_scalar(self):
assert jt.array(1).data == 1
assert jt.array(np.float64(1)).data == 1
assert jt.array(np.float32(1)).data == 1
assert jt.array(np.int32(1)).data == 1
assert jt.array(np.int64(1)).data == 1
def test_array_dtype(self):
a = jt.array([1,2,3], dtype=jt.NanoString("float32"))
a = jt.array([1,2,3], dtype=jt.float32)
def METHOD_NAME(self):
a = jt.Var([1,2,3])
b = jt.Var([1,2,3], "float32")
assert a.dtype == "int32"
assert b.dtype == "float32"
assert (a.numpy() == [1,2,3]).all()
assert (b.numpy() == [1,2,3]).all()
def test_np_array(self):
a = jt.Var([1,2,3])
b = np.array(a)
assert (b==[1,2,3]).all()
def test_pickle(self):
import pickle
a = jt.Var([1,2,3,4])
s = pickle.dumps(a, pickle.HIGHEST_PROTOCOL)
b = pickle.loads(s)
assert isinstance(b, jt.Var)
assert (b.data == [1,2,3,4]).all()
def test_tuple_array(self):
a = jt.array((4,5))
expect_error(lambda : jt.array({}))
expect_error(lambda : jt.array("asdasd"))
expect_error(lambda : jt.array(jt))
def test_64_bit(self):
a = np.random.rand(10)
b = jt.array(a)
assert b.dtype == "float32"
with jt.flag_scope(auto_convert_64_to_32=0):
a = np.random.rand(10)
b = jt.array(a)
assert b.dtype == "float64"
a = np.random.rand(10)
b = jt.array64(a)
assert b.dtype == "float64"
def test_all_type(self):
with jt.flag_scope(auto_convert_64_to_32=0):
types = [
"bool",
"int8", "uint8",
"int16", "uint16",
"int32", "uint32",
"int64", "uint64",
"float32", "float64",
]
for t in types:
a = np.random.random(1000).astype(t)
b = jt.array(a)
assert str(b.dtype) == t
c = b.numpy()
assert str(c.dtype) == t
np.testing.assert_allclose(a, c)
def test_scalar_fuse_unary(self):
c = jt.ones(10)
jt.sync_all()
with jt.profile_scope() as rep:
b = c-1
assert b.data[1] == 0
assert len(rep) == 2
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def test_scalar_fuse_unary_cuda(self):
with jt.flag_scope(use_cuda=1):
self.test_scalar_fuse_unary()
if __name__ == "__main__":
unittest.main(
|
2,706 |
test propagation failure on run exception
|
from ipaddress import IPv4Address
from threading import Event
from typing import Callable, Tuple, Type
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from agent_plugins.exploiters.mssql.src.mssql_client import MSSQLClient
from agent_plugins.exploiters.mssql.src.mssql_exploit_client import MSSQLExploitClient
from agent_plugins.exploiters.mssql.src.mssql_options import MSSQLOptions
from tests.data_for_tests.propagation_credentials import IDENTITIES, SECRETS
from common import OperatingSystem
from common.agent_events import ExploitationEvent, PropagationEvent
from common.credentials import Credentials
from common.event_queue import IAgentEventPublisher
from common.types import NetworkPort, NetworkProtocol, NetworkService, PortStatus
from infection_monkey.i_puppet import PortScanData, TargetHost
EXPLOITER_NAME = "MSSQL"
AGENT_ID = UUID("9614480d-471b-4568-86b5-cb922a34ed8a")
DOWNLOAD_COMMAND = "echo download"
LAUNCH_COMMAND = "echo launch"
CREDENTIALS = (Credentials(identity=IDENTITIES[0], secret=SECRETS[0]),)
TCP_PORT = NetworkPort(12345)
TCP_PORT_DATA = PortScanData(
port=TCP_PORT,
status=PortStatus.OPEN,
protocol=NetworkProtocol.TCP,
service=NetworkService.MSSQL,
)
CLOSED_PORT = NetworkPort(12346)
CLOSED_PORT_DATA = PortScanData(
port=CLOSED_PORT,
status=PortStatus.CLOSED,
protocol=NetworkProtocol.TCP,
service=NetworkService.MSSQL,
)
UNKNOWN_PORT = NetworkPort(12347)
UNKNOWN_PORT_DATA = PortScanData(
port=UNKNOWN_PORT,
status=PortStatus.OPEN,
protocol=NetworkProtocol.TCP,
service=NetworkService.UNKNOWN,
)
@pytest.fixture
def mock_mssql_client() -> MagicMock:
client = MagicMock(spec=MSSQLClient)
return client
@pytest.fixture
def mock_agent_event_publisher() -> IAgentEventPublisher:
return MagicMock(spec=IAgentEventPublisher)
@pytest.fixture
def target_host() -> TargetHost:
return TargetHost(ip=IPv4Address("1.1.1.1"), operating_system=OperatingSystem.WINDOWS)
@pytest.fixture
def agent_binary_downloaded() -> Event:
agent_binary_downloaded_event = Event()
agent_binary_downloaded_event.set()
return agent_binary_downloaded_event
@pytest.fixture
def mssql_exploit_client(
mock_agent_event_publisher: IAgentEventPublisher,
mock_mssql_client,
) -> MSSQLExploitClient:
return MSSQLExploitClient(
EXPLOITER_NAME,
AGENT_ID,
mock_agent_event_publisher,
mock_mssql_client,
)
@pytest.fixture
def exploit_host(
target_host, agent_binary_downloaded, mssql_exploit_client
) -> Callable[[], Tuple[bool, bool]]:
def _inner() -> Tuple[bool, bool]:
return mssql_exploit_client.exploit_host(
target_host,
MSSQLOptions(agent_binary_download_timeout=0.001),
CREDENTIALS,
DOWNLOAD_COMMAND,
LAUNCH_COMMAND,
agent_binary_downloaded,
set(),
)
return _inner
def test_exploit__registers_command(
mock_mssql_client, exploit_host: Callable[[], Tuple[bool, bool]]
):
exploit_host()
assert mock_mssql_client.run_command.call_count == 2
def test_exploit__success(exploit_host: Callable[[], Tuple[bool, bool]]):
exploitation_success, propagation_success = exploit_host()
assert exploitation_success
assert propagation_success
def test_exploit__failure_if_login_fails(
mock_mssql_client, exploit_host: Callable[[], Tuple[bool, bool]]
):
mock_mssql_client.login.side_effect = Exception("test")
exploitation_success, propagation_success = exploit_host()
assert not exploitation_success
assert not propagation_success
def _assert_published_events(agent_event_publisher: MagicMock, success: bool):
published_events = agent_event_publisher.publish.call_args_list
published_events = [param[0][0] for param in published_events]
assert ExploitationEvent in [type(event) for event in published_events]
assert PropagationEvent in [type(event) for event in published_events]
assert all([event.success == success for event in published_events])
def test_exploit__sends_events_on_success(
exploit_host: Callable[[], Tuple[bool, bool]],
mock_agent_event_publisher: MagicMock,
):
exploit_host()
_assert_published_events(mock_agent_event_publisher, success=True)
def test_exploit__sends_events_on_failure(
mock_mssql_client,
exploit_host: Callable[[], Tuple[bool, bool]],
mock_agent_event_publisher: MagicMock,
):
mock_mssql_client.login.side_effect = Exception("test")
exploit_host()
published_events = mock_agent_event_publisher.publish.call_args_list
published_events = [param[0][0] for param in published_events]
assert ExploitationEvent in [type(event) for event in published_events]
assert all([event.success is False for event in published_events])
def get_event_by_type(agent_event_publisher: MagicMock, event_type: Type[Event]):
published_events = agent_event_publisher.publish.call_args_list
published_events = [param[0][0] for param in published_events]
return next(event for event in published_events if isinstance(event, event_type))
def test_propagation_fails_if_binary_not_downloaded(
agent_binary_downloaded: Event,
exploit_host: Callable[[], Tuple[bool, bool]],
mock_agent_event_publisher: MagicMock,
):
agent_binary_downloaded.clear()
exploitation_success, propagation_success = exploit_host()
propagation_event = get_event_by_type(mock_agent_event_publisher, PropagationEvent)
assert exploitation_success
assert not propagation_success
assert not propagation_event.success
def test_exploit__failure_on_run_exception(
mock_mssql_client, exploit_host: Callable[[], Tuple[bool, bool]]
):
mock_mssql_client.login.side_effect = Exception("test")
exploitation_success, propagation_success = exploit_host()
assert not exploitation_success
assert not propagation_success
def METHOD_NAME(
mock_mssql_client, exploit_host: Callable[[], Tuple[bool, bool]]
):
mock_mssql_client.run_command.side_effect = Exception("test")
exploitation_success, propagation_success = exploit_host()
assert exploitation_success
assert not propagation_success
|
2,707 |
test get shipping methods for checkout use
|
from datetime import timedelta
from decimal import Decimal
from unittest import mock
from django.utils import timezone
from ....webhook.event_types import WebhookEventSyncType
from ....webhook.payloads import generate_checkout_payload
from ..plugin import CACHE_TIME_SHIPPING_LIST_METHODS_FOR_CHECKOUT
from ..shipping import get_cache_data_for_shipping_list_methods_for_checkout
from ..utils import generate_cache_key_for_webhook
@mock.patch("saleor.plugins.webhook.tasks.cache.set")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_get_shipping_methods_for_checkout_set_cache(
mocked_webhook,
mocked_cache_set,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_webhook.return_value = [
{
"id": "method-1",
"name": "Standard Shipping",
"amount": Decimal("5.5"),
"currency": "GBP",
}
]
plugin = webhook_plugin()
# when
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert mocked_webhook.called
assert mocked_cache_set.called
@mock.patch("saleor.plugins.webhook.tasks.cache.set")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_get_shipping_methods_no_webhook_response_does_not_set_cache(
mocked_webhook,
mocked_cache_set,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_webhook.return_value = None
plugin = webhook_plugin()
# when
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert mocked_webhook.called
assert not mocked_cache_set.called
@mock.patch("saleor.plugins.webhook.tasks.cache.get")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_get_shipping_methods_for_checkout_use_cache(
mocked_webhook,
mocked_cache_get,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_cache_get.return_value = [
{
"id": "method-1",
"name": "Standard Shipping",
"amount": Decimal("5.5"),
"currency": "GBP",
}
]
plugin = webhook_plugin()
# when
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert not mocked_webhook.called
assert mocked_cache_get.called
@mock.patch("saleor.plugins.webhook.tasks.cache.get")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def METHOD_NAME(
mocked_webhook,
mocked_cache_get,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_cache_get.return_value = []
plugin = webhook_plugin()
# when
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert not mocked_webhook.called
assert mocked_cache_get.called
@mock.patch("saleor.plugins.webhook.tasks.cache.set")
@mock.patch("saleor.plugins.webhook.tasks.cache.get")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_checkout_change_invalidates_cache_key(
mocked_webhook,
mocked_cache_get,
mocked_cache_set,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_webhook_response = [
{
"id": "method-1",
"name": "Standard Shipping",
"amount": Decimal("5.5"),
"currency": "GBP",
}
]
mocked_webhook.return_value = mocked_webhook_response
mocked_cache_get.return_value = None
payload = generate_checkout_payload(checkout_with_item)
key_data = get_cache_data_for_shipping_list_methods_for_checkout(payload)
target_url = shipping_app.webhooks.first().target_url
cache_key = generate_cache_key_for_webhook(
key_data,
target_url,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
shipping_app.id,
)
plugin = webhook_plugin()
# when
checkout_with_item.email = "[email protected]"
checkout_with_item.save(update_fields=["email"])
new_payload = generate_checkout_payload(checkout_with_item)
new_key_data = get_cache_data_for_shipping_list_methods_for_checkout(new_payload)
new_cache_key = generate_cache_key_for_webhook(
new_key_data,
target_url,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
shipping_app.id,
)
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert cache_key != new_cache_key
mocked_cache_get.assert_called_once_with(new_cache_key)
mocked_cache_set.assert_called_once_with(
new_cache_key,
mocked_webhook_response,
timeout=CACHE_TIME_SHIPPING_LIST_METHODS_FOR_CHECKOUT,
)
@mock.patch("saleor.plugins.webhook.tasks.cache.set")
@mock.patch("saleor.plugins.webhook.tasks.cache.get")
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_ignore_selected_fields_on_generating_cache_key(
mocked_webhook,
mocked_cache_get,
mocked_cache_set,
webhook_plugin,
checkout_with_item,
shipping_app,
):
# given
mocked_webhook_response = [
{
"id": "method-1",
"name": "Standard Shipping",
"amount": Decimal("5.5"),
"currency": "GBP",
}
]
mocked_webhook.return_value = mocked_webhook_response
mocked_cache_get.return_value = None
payload = generate_checkout_payload(checkout_with_item)
key_data = get_cache_data_for_shipping_list_methods_for_checkout(payload)
target_url = shipping_app.webhooks.first().target_url
cache_key = generate_cache_key_for_webhook(
key_data,
target_url,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
shipping_app.id,
)
plugin = webhook_plugin()
# when
checkout_with_item.last_change = timezone.now() + timedelta(seconds=30)
checkout_with_item.save(update_fields=["last_change"])
new_payload = generate_checkout_payload(checkout_with_item)
new_key_data = get_cache_data_for_shipping_list_methods_for_checkout(new_payload)
new_cache_key = generate_cache_key_for_webhook(
new_key_data,
target_url,
WebhookEventSyncType.SHIPPING_LIST_METHODS_FOR_CHECKOUT,
shipping_app.id,
)
plugin.get_shipping_methods_for_checkout(checkout_with_item, None)
# then
assert cache_key == new_cache_key
mocked_cache_get.assert_called_once_with(new_cache_key)
mocked_cache_set.assert_called_once_with(
new_cache_key,
mocked_webhook_response,
timeout=CACHE_TIME_SHIPPING_LIST_METHODS_FOR_CHECKOUT,
)
|
2,708 |
dependencies
|
import os
import re
import subprocess
import sys
from contextlib import contextmanager
FORBIDDEN_CODECOV_FLAG_CHARS = re.compile(r'[^\w\.\-]')
class GoModule:
"""
A Go module abstraction.
independent specifies whether this modules is supposed to exist independently of the datadog-agent module.
If True, a check will run to ensure this is true.
"""
def __init__(
self,
path,
targets=None,
condition=lambda: True,
should_tag=True,
importable=True,
independent=False,
lint_targets=None,
):
self.path = path
self.targets = targets if targets else ["."]
self.lint_targets = lint_targets if lint_targets else self.targets
self.condition = condition
self.should_tag = should_tag
# HACK: Workaround for modules that can be tested, but not imported (eg. gohai), because
# they define a main package
# A better solution would be to automatically detect if a module contains a main package,
# at the cost of spending some time parsing the module.
self.importable = importable
self.independent = independent
self._dependencies = None
def __version(self, agent_version):
"""Return the module version for a given Agent version.
>>> mods = [GoModule("."), GoModule("pkg/util/log")]
>>> [mod.__version("7.27.0") for mod in mods]
["v7.27.0", "v0.27.0"]
"""
if self.path == ".":
return "v" + agent_version
return "v0" + agent_version[1:]
def __compute_dependencies(self):
"""
Computes the list of github.com/DataDog/datadog-agent/ dependencies of the module.
"""
prefix = "github.com/DataDog/datadog-agent/"
base_path = os.getcwd()
mod_parser_path = os.path.join(base_path, "internal", "tools", "modparser")
if not os.path.isdir(mod_parser_path):
raise Exception(f"Cannot find go.mod parser in {mod_parser_path}")
try:
output = subprocess.check_output(
["go", "run", ".", "-path", os.path.join(base_path, self.path), "-prefix", prefix],
cwd=mod_parser_path,
).decode("utf-8")
except subprocess.CalledProcessError as e:
print(f"Error while calling go.mod parser: {e.output}")
raise e
# Remove github.com/DataDog/datadog-agent/ from each line
return [line[len(prefix) :] for line in output.strip().splitlines()]
# FIXME: Change when Agent 6 and Agent 7 releases are decoupled
def tag(self, agent_version):
"""Return the module tag name for a given Agent version.
>>> mods = [GoModule("."), GoModule("pkg/util/log")]
>>> [mod.tag("7.27.0") for mod in mods]
[["6.27.0", "7.27.0"], ["pkg/util/log/v0.27.0"]]
"""
if self.path == ".":
return ["6" + agent_version[1:], "7" + agent_version[1:]]
return [f"{self.path}/{self.__version(agent_version)}"]
def codecov_path(self):
"""Return the path of the Go module, normalized to satisfy Codecov
restrictions on flags.
https://docs.codecov.com/docs/flags
"""
if self.path == ".":
return "main"
return re.sub(FORBIDDEN_CODECOV_FLAG_CHARS, '_', self.path)
def full_path(self):
"""Return the absolute path of the Go module."""
return os.path.abspath(self.path)
def go_mod_path(self):
"""Return the absolute path of the Go module go.mod file."""
return self.full_path() + "/go.mod"
@property
def METHOD_NAME(self):
if not self._dependencies:
self._dependencies = self.__compute_dependencies()
return self._dependencies
@property
def import_path(self):
"""Return the Go import path of the Go module
>>> mods = [GoModule("."), GoModule("pkg/util/log")]
>>> [mod.import_path for mod in mods]
["github.com/DataDog/datadog-agent", "github.com/DataDog/datadog-agent/pkg/util/log"]
"""
path = "github.com/DataDog/datadog-agent"
if self.path != ".":
path += "/" + self.path
return path
def dependency_path(self, agent_version):
"""Return the versioned dependency path of the Go module
>>> mods = [GoModule("."), GoModule("pkg/util/log")]
>>> [mod.dependency_path("7.27.0") for mod in mods]
["github.com/DataDog/[email protected]", "github.com/DataDog/datadog-agent/pkg/util/[email protected]"]
"""
return f"{self.import_path}@{self.__version(agent_version)}"
DEFAULT_MODULES = {
".": GoModule(
".",
targets=["./pkg", "./cmd", "./comp"],
),
"internal/tools": GoModule("internal/tools", condition=lambda: False, should_tag=False),
"internal/tools/proto": GoModule("internal/tools/proto", condition=lambda: False, should_tag=False),
"internal/tools/modparser": GoModule("internal/tools/modparser", condition=lambda: False, should_tag=False),
"test/e2e/containers/otlp_sender": GoModule(
"test/e2e/containers/otlp_sender", condition=lambda: False, should_tag=False
),
"test/new-e2e": GoModule(
"test/new-e2e",
independent=True,
targets=["./pkg/runner", "./pkg/utils/e2e/client"],
lint_targets=["."],
),
"test/fakeintake": GoModule("test/fakeintake", independent=True),
"pkg/obfuscate": GoModule("pkg/obfuscate", independent=True),
"pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False),
"pkg/proto": GoModule("pkg/proto", independent=True),
"pkg/trace": GoModule("pkg/trace", independent=True),
"pkg/security/secl": GoModule("pkg/security/secl", independent=True),
"pkg/remoteconfig/state": GoModule("pkg/remoteconfig/state", independent=True),
"pkg/util/cgroups": GoModule("pkg/util/cgroups", independent=True, condition=lambda: sys.platform == "linux"),
"pkg/util/log": GoModule("pkg/util/log", independent=True),
"pkg/util/pointer": GoModule("pkg/util/pointer", independent=True),
"pkg/util/scrubber": GoModule("pkg/util/scrubber", independent=True),
}
MAIN_TEMPLATE = """package main
import (
{imports}
)
func main() {{}}
"""
PACKAGE_TEMPLATE = ' _ "{}"'
@contextmanager
def generate_dummy_package(ctx, folder):
"""
Return a generator-iterator when called.
Allows us to wrap this function with a "with" statement to delete the created dummy pacakage afterwards.
"""
try:
import_paths = []
for mod in DEFAULT_MODULES.values():
if mod.path != "." and mod.condition() and mod.importable:
import_paths.append(mod.import_path)
os.mkdir(folder)
with ctx.cd(folder):
print("Creating dummy 'main.go' file... ", end="")
with open(os.path.join(ctx.cwd, 'main.go'), 'w') as main_file:
main_file.write(
MAIN_TEMPLATE.format(imports="\n".join(PACKAGE_TEMPLATE.format(path) for path in import_paths))
)
print("Done")
ctx.run("go mod init example.com/testmodule")
for mod in DEFAULT_MODULES.values():
if mod.path != ".":
ctx.run(f"go mod edit -require={mod.dependency_path('0.0.0')}")
ctx.run(f"go mod edit -replace {mod.import_path}=../{mod.path}")
# yield folder waiting for a "with" block to be executed (https://docs.python.org/3/library/contextlib.html)
yield folder
# the generator is then resumed here after the "with" block is exited
finally:
# delete test_folder to avoid FileExistsError while running this task again
ctx.run(f"rm -rf ./{folder}")
|
2,709 |
forward
|
import torch
import torch.nn as nn
import torchvision.models as models
class PerceptualLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
super(PerceptualLoss, self).__init__()
self.add_module('vgg', VGG19())
self.criterion = torch.nn.L1Loss()
self.weights = weights
def __call__(self, x, y):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
content_loss = 0.0
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
return content_loss
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
features = models.vgg19(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.relu3_4 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu4_4 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
self.relu5_4 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_2.add_module(str(x), features[x])
for x in range(16, 18):
self.relu3_4.add_module(str(x), features[x])
for x in range(18, 21):
self.relu4_1.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_2.add_module(str(x), features[x])
for x in range(23, 25):
self.relu4_3.add_module(str(x), features[x])
for x in range(25, 27):
self.relu4_4.add_module(str(x), features[x])
for x in range(27, 30):
self.relu5_1.add_module(str(x), features[x])
for x in range(30, 32):
self.relu5_2.add_module(str(x), features[x])
for x in range(32, 34):
self.relu5_3.add_module(str(x), features[x])
for x in range(34, 36):
self.relu5_4.add_module(str(x), features[x])
# don't need the gradients, just want the features
for param in self.parameters():
param.requires_grad = False
def METHOD_NAME(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
relu3_4 = self.relu3_4(relu3_3)
relu4_1 = self.relu4_1(relu3_4)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu4_4 = self.relu4_4(relu4_3)
relu5_1 = self.relu5_1(relu4_4)
relu5_2 = self.relu5_2(relu5_1)
relu5_3 = self.relu5_3(relu5_2)
relu5_4 = self.relu5_4(relu5_3)
out = {
'relu1_1': relu1_1,
'relu1_2': relu1_2,
'relu2_1': relu2_1,
'relu2_2': relu2_2,
'relu3_1': relu3_1,
'relu3_2': relu3_2,
'relu3_3': relu3_3,
'relu3_4': relu3_4,
'relu4_1': relu4_1,
'relu4_2': relu4_2,
'relu4_3': relu4_3,
'relu4_4': relu4_4,
'relu5_1': relu5_1,
'relu5_2': relu5_2,
'relu5_3': relu5_3,
'relu5_4': relu5_4,
}
return out
|
2,710 |
test add data docs site already existing
|
import copy
from unittest import mock
import pytest
import great_expectations.exceptions as gx_exceptions
from great_expectations.data_context import EphemeralDataContext
@pytest.fixture
def new_site_config() -> dict:
return {
"class_name": "SiteBuilder",
"module_name": "great_expectations.render.renderer.site_builder",
"store_backend": {
"module_name": "great_expectations.data_context.store.tuple_store_backend",
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "/my_new_site/",
},
"site_index_builder": {"class_name": "DefaultSiteIndexBuilder"},
}
class TestAddDataDocsSite:
@pytest.mark.unit
def test_add_data_docs_site(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Check that the new site is present
assert new_site_name in ephemeral_context_with_defaults.get_site_names()
@pytest.mark.unit
def test_add_data_docs_site_persists(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
new_site_name = "my_new_site"
with mock.patch(
"great_expectations.data_context.EphemeralDataContext._save_project_config"
) as mock_save_project_config:
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
mock_save_project_config.assert_called_once()
@pytest.mark.unit
def METHOD_NAME(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Check fixture configuration
existing_site_name = "local_site"
assert existing_site_name in ephemeral_context_with_defaults.get_site_names()
with pytest.raises(gx_exceptions.InvalidKeyError) as e:
new_site_name = existing_site_name
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
assert "Data Docs Site `local_site` already exists in the Data Context." in str(
e.value
)
class TestListDataDocsSites:
@pytest.mark.unit
def test_list_data_docs_sites(
self, ephemeral_context_with_defaults: EphemeralDataContext
):
site_names = [
d for d in ephemeral_context_with_defaults.list_data_docs_sites().keys()
]
assert site_names == ["local_site"]
class TestUpdateDataDocsSite:
@pytest.mark.unit
def test_update_data_docs_site(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Update the new site
updated_site_config = copy.deepcopy(new_site_config)
updated_site_config["store_backend"]["base_directory"] = "/my_updated_site/"
ephemeral_context_with_defaults.update_data_docs_site(
new_site_name, updated_site_config
)
# Check the updated site config
sites = ephemeral_context_with_defaults.variables.data_docs_sites
assert (
sites[new_site_name]["store_backend"]["base_directory"]
== "/my_updated_site/"
)
@pytest.mark.unit
def test_update_data_docs_site_persists(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Update the new site
updated_site_config = copy.deepcopy(new_site_config)
updated_site_config["store_backend"]["base_directory"] = "/my_updated_site/"
with mock.patch(
"great_expectations.data_context.EphemeralDataContext._save_project_config"
) as mock_save_project_config:
ephemeral_context_with_defaults.update_data_docs_site(
new_site_name, updated_site_config
)
mock_save_project_config.assert_called_once()
@pytest.mark.unit
def test_update_data_docs_site_missing_site_raises_exception(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Check fixture configuration
assert "missing" not in ephemeral_context_with_defaults.get_site_names()
with pytest.raises(gx_exceptions.InvalidKeyError) as e:
ephemeral_context_with_defaults.update_data_docs_site(
site_name="missing", site_config=new_site_config
)
assert (
"Data Docs Site `missing` does not already exist in the Data Context."
in str(e.value)
)
class TestDeleteDataDocsSite:
@pytest.mark.unit
def test_delete_data_docs_site(
self, ephemeral_context_with_defaults: EphemeralDataContext
):
# Check fixture configuration
existing_site_name = "local_site"
assert existing_site_name in ephemeral_context_with_defaults.get_site_names()
ephemeral_context_with_defaults.delete_data_docs_site(existing_site_name)
# Check that the site is no longer present
assert (
existing_site_name not in ephemeral_context_with_defaults.get_site_names()
)
@pytest.mark.unit
def test_delete_data_docs_site_persists(
self, ephemeral_context_with_defaults: EphemeralDataContext
):
# Check fixture configuration
existing_site_name = "local_site"
assert existing_site_name in ephemeral_context_with_defaults.get_site_names()
with mock.patch(
"great_expectations.data_context.EphemeralDataContext._save_project_config"
) as mock_save_project_config:
ephemeral_context_with_defaults.delete_data_docs_site(existing_site_name)
mock_save_project_config.assert_called_once()
@pytest.mark.unit
def test_delete_data_docs_site_missing_site_raises_exception(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
):
# Check fixture configuration
assert "missing" not in ephemeral_context_with_defaults.get_site_names()
with pytest.raises(gx_exceptions.InvalidKeyError) as e:
ephemeral_context_with_defaults.delete_data_docs_site("missing")
assert (
"Data Docs Site `missing` does not already exist in the Data Context."
in str(e.value)
)
|
2,711 |
set plot options
|
import sys
import inspect
import base64
if (sys.version_info > (3, 0)):
import io
else:
import cStringIO as io
from viewerstate import *
from visitstate import RPCType as RPC
class ViewerMethods:
def __init__(self,_state):
self.state = _state
def sync(self):
gmap = sys.modules["__main__"].__dict__
gmap["InvertBackgroundColor"] = self.InvertBackgroundColor
gmap["AddWindow"] = self.AddWindow
gmap["DrawPlots"] = self.DrawPlots
gmap["OpenDatabase"] = self.OpenDatabase
gmap["AddPlot"] = self.AddPlot
gmap["AddOperator"] = self.AddOperator
gmap["SetOperatorOptions"] = self.SetOperatorOptions
gmap["SetPlotOptions"] = self.METHOD_NAME
gmap["DeleteActivePlots"] = self.DeleteActivePlots
gmap["SaveImageWindow"] = self.SaveImageWindow
gmap["ShowImageWindow"] = self.ShowImageWindow
gmap["SaveWindow"] = self.SaveWindow
def find(self,val,lst):
index = -1
for (i,n) in enumerate(lst):
if n == val:
index = i
break
return index
def getKey(self,index,key):
return int(self.state.api(index)[key])
def getContents(self,index,key):
return self.state.data(index)[self.getKey(index,key)]
def setContents(self,index,key,value):
self.state.data(index)[self.getKey(index,key)] = value
def InvertBackgroundColor(self):
self.setContents(0,"RPCType",RPC.InvertBackgroundRPC)
self.state.notify(0)
def AddWindow(self):
self.setContents(0,"RPCType",RPC.AddWindowRPC)
self.state.notify(0)
def DrawPlots(self):
#self.state.data(0)["RPCType"] = RPC.DrawPlotsRPC
self.setContents(0,"RPCType",RPC.DrawPlotsRPC)
self.state.notify(0)
def OpenDatabase(self, database, timeState = 0, addDefaultPlots = True, forcedFileType = ""):
self.setContents(0,"RPCType",RPC.OpenDatabaseRPC)
self.setContents(0,"database",database)
self.setContents(0,"intArg1",timeState)
self.setContents(0,"boolFlag",addDefaultPlots)
self.setContents(0,"stringArg1",forcedFileType)
self.state.notify(0)
def GetEnabledID(self,plot_type, name):
names = self.getContents(14,"name")
types = self.getContents(14,"type")
enabled = self.getContents(14,"enabled")
mapper = []
for i in range(len(names)):
if enabled[i] == True and plot_type == types[i]:
mapper.append(names[i])
mapper.sort()
for i in range(len(mapper)):
if name == mapper[i]:
return i
return -1
def AddPlotByID(self, plot_type, plot_var):
self.setContents(0,"RPCType",RPC.AddPlotRPC)
self.setContents(0,"plotType",plot_type)
self.setContents(0,"variable",plot_var)
self.state.notify(0)
def AddPlot(self,name, plot_var):
index = self.GetEnabledID("plot",name)
if index >= 0:
self.AddPlotByID(index,plot_var)
def AddOperatorByID(self, op_type):
self.setContents(0,"RPCType",RPC.AddOperatorRPC)
self.setContents(0,"operatorType",op_type)
self.setContents(0,"boolFlag",False)
self.state.notify(0)
def AddOperator(self,name):
index = self.GetEnabledID("operator",name)
if index >= 0:
self.AddOperatorByID(index)
def SetActivePlots(self,activePlots):
self.setContents(0,"RPCType",RPC.SetActivePlotsRPC)
self.setContents(0,"activePlotIds",activePlots)
self.state.notify(0)
def DeleteActivePlots(self):
self.setContents(0,"RPCType",RPC.DeleteActivePlotsRPC)
self.state.notify(0)
def HideActivePlots(self):
self.setContents(0,"RPCType",RPC.HideActivePlotsRPC)
self.state.notify(0)
def SetPlotOptionsByID(self,index):
self.setContents(0,"RPCType",RPC.SetPlotOptionsRPC)
self.setContents(0,"plotType",index)
self.state.notify(0)
def METHOD_NAME(self,plot):
plotname = plot.__class__.__name__
name = plotname.replace("Attributes","")
index = self.GetEnabledID("plot",name)
if index == -1 : return
plotdict = self.props(plot)
for i in range(len(self.state.states)):
if self.state.states[i].typename == plotname:
#print name, index, i, plotdict
self.state.data(i).update(plotdict)
self.state.notify(i)
self.SetPlotOptionsByID(index)
break
def SetOperatorOptionsByID(self,index):
self.setContents(0,"RPCType",RPC.SetOperatorOptionsRPC)
self.setContents(0,"operatorType",index)
self.state.notify(0)
def SetOperatorOptions(self,operator):
opname = operator.__class__.__name__
name = opname.replace("Attributes","")
index = self.GetEnabledID("operator",name)
if index == -1: return
opdict = self.props(operator)
for i in range(len(self.state.states)):
if self.state.states[i].typename == opname:
#print name,index,i,opdict
self.state.data(i).update(opdict)
self.state.notify(i)
self.SetOperatorOptionsByID(index)
break
def props(self,obj):
pr = {}
for name in dir(obj):
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[str(obj.__data__[name])] = value
return pr
def ShowImageWindow(self):
try:
import PIL
import PIL.Image
queryAttr = self.state.data(38)["12"]
for attr in queryAttr:
data = io.StringIO(base64.b64decode(attr))
im = PIL.Image.open(data)
im.show()
except:
print("Showing Window Failed...")
def SaveImageWindow(self,filename):
try:
import PIL
import PIL.Image
queryAttr = self.state.data(38)["12"]
for (i,attr) in enumerate(queryAttr):
data = io.StringIO(base64.b64decode(attr))
im = PIL.Image.open(data)
im.save(filename + "_" + str(i) + ".jpg")
except:
print("Saving Window Failed...")
def decode_base64(self,data):
"""Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'='* missing_padding
return base64.decodestring(data)
def SaveWindow(self,filename):
try:
queryAttr = self.state.data(38)["12"]
for (i,attr) in enumerate(queryAttr):
data = base64.b64decode(attr)
f = open(filename + "_" + str(i) + ".vtk","w")
f.write(data)
f.close()
except:
print("Saving VTK File Failed...")
|
2,712 |
decode audio
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text/audio processor class for MusicGen
"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class MusicgenProcessor(ProcessorMixin):
r"""
Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
class.
[`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
Args:
feature_extractor (`EncodecFeatureExtractor`):
An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`T5Tokenizer`):
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "EncodecFeatureExtractor"
tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
inputs = self.tokenizer(text, **kwargs)
if audio is not None:
audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
inputs["input_values"] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
inputs["padding_mask"] = audio_inputs["padding_mask"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
"""
audio_values = kwargs.pop("audio", None)
padding_mask = kwargs.pop("padding_mask", None)
if len(args) > 0:
audio_values = args[0]
args = args[1:]
if audio_values is not None:
return self.METHOD_NAME(audio_values, padding_mask=padding_mask)
else:
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
def METHOD_NAME(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]:
"""
This method strips any padding from the audio values to return a list of numpy audio arrays.
"""
audio_values = to_numpy(audio_values)
bsz, channels, seq_len = audio_values.shape
if padding_mask is None:
return list(audio_values)
padding_mask = to_numpy(padding_mask)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
difference = seq_len - padding_mask.shape[-1]
padding_value = 1 - self.feature_extractor.padding_value
padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
audio_values = audio_values.tolist()
for i in range(bsz):
sliced_audio = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
audio_values[i] = sliced_audio.reshape(channels, -1)
return audio_values
|
2,713 |
run enabled
|
# -*- coding: utf-8 -*-
import re
from thonny import get_runner, get_workbench, ui_utils
from thonny.codeview import CodeViewText
cell_regex = re.compile(r"(^|\n)(# ?%%|##|# In\[\d+\]:)[^\n]*", re.MULTILINE) # @UndefinedVariable
def update_editor_cells(event):
text = event.widget
if not getattr(text, "cell_tags_configured", False):
text.tag_configure("CURRENT_CELL", borderwidth=1, relief="groove", background="LightYellow")
text.tag_configure("CELL_HEADER", font="BoldEditorFont", foreground="#665843")
text.tag_lower("CELL_HEADER")
text.tag_lower("CURRENT_CELL")
text.cell_tags_configured = True
text.tag_remove("CURRENT_CELL", "0.1", "end")
text.tag_remove("CELL_HEADER", "0.1", "end")
source = text.get("1.0", "end")
cells = []
prev_marker = 0
for match in cell_regex.finditer(source):
if match.start() == 0:
this_marker = match.start()
else:
this_marker = match.start() + 1
cell_start_index = text.index("1.0+%dc" % prev_marker)
header_end_index = text.index("1.0+%dc" % match.end())
cell_end_index = text.index("1.0+%dc" % this_marker)
text.tag_add("CELL_HEADER", cell_end_index, header_end_index)
cells.append((cell_start_index, cell_end_index))
prev_marker = this_marker
if prev_marker != 0:
cells.append((text.index("1.0+%dc" % prev_marker), "end"))
# if get_workbench().focus_get() == text:
# It's nice to have cell highlighted even when focus
# is elsewhere ? This would act as kind of bookmark.
for start_index, end_index in cells:
if text.compare(start_index, "<=", "insert") and text.compare(end_index, ">", "insert"):
text.tag_add("CURRENT_CELL", start_index, end_index)
break
def _submit_code(code):
lines = code.splitlines()
# remove starting comments
while len(lines) > 0 and lines[0].strip().startswith("#"):
lines = lines[1:]
# remove starting empty lines
while len(lines) > 0 and lines[0].strip() == "":
lines = lines[1:]
# remove trailing empty lines
while len(lines) > 0 and lines[-1].strip() == "":
lines = lines[:-1]
if len(lines) > 0:
code = "\n".join(lines) + "\n"
# if code is function definition/last line start with whitespace
# end function definition with a second endline
if re.match(r"^[ \t].*", lines[-1]) is not None:
code += "\n"
shell = get_workbench().show_view("ShellView", False)
shell.submit_python_code(code)
def _patch_perform_return():
original_perform_return = CodeViewText.perform_return
def _patched_perform_return(self, event):
text = event.widget
ranges = text.tag_ranges("CURRENT_CELL")
if len(ranges) == 2 and (
ui_utils.shift_is_pressed(event) or ui_utils.control_is_pressed(event)
):
if METHOD_NAME():
code = text.get(ranges[0], ranges[1]).strip()
_submit_code(code)
if ui_utils.shift_is_pressed(event):
# advance to next cell
text.mark_set("insert", ranges[1])
return "break"
else:
return original_perform_return(self, event)
CodeViewText.perform_return = _patched_perform_return
def _patch_intercept_mark():
"""Need to make cursor wider when in first column. Otherwise
the border of the cell box makes it hard to notice the cursor.
NB! Need to be careful with setting text["insertwidth"]!
My first straightforward solution caused unexplainable
infinite loop of insertions and deletions in the text
(insert a line and a word, select that word and then do Ctrl-Z).
Looks like this solution is safe, but I don't dare to include
it in the main code.
UPDATE: not safe. Select and delete a block of lines. Write a new
line and do Ctrl-Z"""
original_intercept_mark = CodeViewText.intercept_mark
def _patched_intercept_mark(self, *args):
if args[:2] == ("set", "insert") and args[2].endswith(".0"):
self.set_insertwidth(3)
else:
self.set_insertwidth(2)
original_intercept_mark(self, *args)
CodeViewText.intercept_mark = _patched_intercept_mark
def dummy(event=None):
"This is dummy method"
def run_selection(event=None):
widget = get_workbench().focus_get()
if isinstance(widget, CodeViewText):
text = widget
if text.has_selection():
code = text.get("sel.first", "sel.last")
else:
code = text.get("insert linestart", "insert lineend")
# move cursor to next row
row, col = map(int, text.index("insert").split("."))
text.mark_set("insert", "{}.{}".format(row + 1, col))
_submit_code(code)
def METHOD_NAME():
widget = get_workbench().focus_get()
return isinstance(widget, CodeViewText) and get_runner().is_waiting_toplevel_command()
def _load_plugin():
wb = get_workbench()
wb.bind_class("EditorCodeViewText", "<<CursorMove>>", update_editor_cells, True)
wb.bind_class("EditorCodeViewText", "<<TextChange>>", update_editor_cells, True)
wb.bind_class("EditorCodeViewText", "<FocusIn>", update_editor_cells, True)
wb.bind_class("EditorCodeViewText", "<FocusOut>", update_editor_cells, True)
_patch_perform_return()
# TODO: try changing insertwidth in keyup/mouseup events
# _patch_intercept_mark() # Still causes freezes
wb.add_command(
"run_cell",
"run",
("Run cell"),
handler=dummy, # actual handler is in the patch
default_sequence="<Control-Return>",
tester=METHOD_NAME,
group=11,
)
wb.add_command(
"run_cell_and_advance",
"run",
("Run cell and advance"),
handler=dummy, # actual handler is in the patch
default_sequence="<Shift-Return>",
tester=METHOD_NAME,
group=11,
)
wb.add_command(
"run_selection",
"run",
("Run selection or current line"),
handler=run_selection,
default_sequence="<F9>",
tester=METHOD_NAME,
group=11,
)
|
2,714 |
process raw data
|
import os
import dgl
import numpy
import pandas
import torch
def METHOD_NAME(raw_dir, processed_dir):
r"""
Description
-----------
Preprocess Elliptic dataset like the EvolveGCN official instruction:
github.com/IBM/EvolveGCN/blob/master/elliptic_construction.md
The main purpose is to convert original idx to contiguous idx start at 0.
"""
oid_nid_path = os.path.join(processed_dir, "oid_nid.npy")
id_label_path = os.path.join(processed_dir, "id_label.npy")
id_time_features_path = os.path.join(processed_dir, "id_time_features.npy")
src_dst_time_path = os.path.join(processed_dir, "src_dst_time.npy")
if (
os.path.exists(oid_nid_path)
and os.path.exists(id_label_path)
and os.path.exists(id_time_features_path)
and os.path.exists(src_dst_time_path)
):
print(
"The preprocessed data already exists, skip the preprocess stage!"
)
return
print("starting process raw data in {}".format(raw_dir))
id_label = pandas.read_csv(
os.path.join(raw_dir, "elliptic_txs_classes.csv")
)
src_dst = pandas.read_csv(
os.path.join(raw_dir, "elliptic_txs_edgelist.csv")
)
# elliptic_txs_features.csv has no header, and it has the same order idx with elliptic_txs_classes.csv
id_time_features = pandas.read_csv(
os.path.join(raw_dir, "elliptic_txs_features.csv"), header=None
)
# get oldId_newId
oid_nid = id_label.loc[:, ["txId"]]
oid_nid = oid_nid.rename(columns={"txId": "originalId"})
oid_nid.insert(1, "newId", range(len(oid_nid)))
# map classes unknown,1,2 to -1,1,0 and construct id_label. type 1 means illicit.
id_label = pandas.concat(
[
oid_nid["newId"],
id_label["class"].map({"unknown": -1.0, "1": 1.0, "2": 0.0}),
],
axis=1,
)
# replace originalId to newId.
# Attention: the timestamp in features start at 1.
id_time_features[0] = oid_nid["newId"]
# construct originalId2newId dict
oid_nid_dict = oid_nid.set_index(["originalId"])["newId"].to_dict()
# construct newId2timestamp dict
nid_time_dict = id_time_features.set_index([0])[1].to_dict()
# Map id in edgelist to newId, and add a timestamp to each edge.
# Attention: From the EvolveGCN official instruction, the timestamp with edgelist start at 0, rather than 1.
# see: github.com/IBM/EvolveGCN/blob/master/elliptic_construction.md
# Here we dose not follow the official instruction, which means timestamp with edgelist also start at 1.
# In EvolveGCN example, the edge timestamp will not be used.
#
# Note: in the dataset, src and dst node has the same timestamp, so it's easy to set edge's timestamp.
new_src = src_dst["txId1"].map(oid_nid_dict).rename("newSrc")
new_dst = src_dst["txId2"].map(oid_nid_dict).rename("newDst")
edge_time = new_src.map(nid_time_dict).rename("timestamp")
src_dst_time = pandas.concat([new_src, new_dst, edge_time], axis=1)
# save oid_nid, id_label, id_time_features, src_dst_time to disk. we can convert them to numpy.
# oid_nid: type int. id_label: type int. id_time_features: type float. src_dst_time: type int.
oid_nid = oid_nid.to_numpy(dtype=int)
id_label = id_label.to_numpy(dtype=int)
id_time_features = id_time_features.to_numpy(dtype=float)
src_dst_time = src_dst_time.to_numpy(dtype=int)
numpy.save(oid_nid_path, oid_nid)
numpy.save(id_label_path, id_label)
numpy.save(id_time_features_path, id_time_features)
numpy.save(src_dst_time_path, src_dst_time)
print(
"Process Elliptic raw data done, data has saved into {}".format(
processed_dir
)
)
class EllipticDataset:
def __init__(
self, raw_dir, processed_dir, self_loop=True, reverse_edge=True
):
self.raw_dir = raw_dir
self.processd_dir = processed_dir
self.self_loop = self_loop
self.reverse_edge = reverse_edge
def process(self):
METHOD_NAME(self.raw_dir, self.processd_dir)
id_time_features = torch.Tensor(
numpy.load(os.path.join(self.processd_dir, "id_time_features.npy"))
)
id_label = torch.IntTensor(
numpy.load(os.path.join(self.processd_dir, "id_label.npy"))
)
src_dst_time = torch.IntTensor(
numpy.load(os.path.join(self.processd_dir, "src_dst_time.npy"))
)
src = src_dst_time[:, 0]
dst = src_dst_time[:, 1]
# id_label[:, 0] is used to add self loop
if self.self_loop:
if self.reverse_edge:
g = dgl.graph(
data=(
torch.cat((src, dst, id_label[:, 0])),
torch.cat((dst, src, id_label[:, 0])),
),
num_nodes=id_label.shape[0],
)
g.edata["timestamp"] = torch.cat(
(
src_dst_time[:, 2],
src_dst_time[:, 2],
id_time_features[:, 1].int(),
)
)
else:
g = dgl.graph(
data=(
torch.cat((src, id_label[:, 0])),
torch.cat((dst, id_label[:, 0])),
),
num_nodes=id_label.shape[0],
)
g.edata["timestamp"] = torch.cat(
(src_dst_time[:, 2], id_time_features[:, 1].int())
)
else:
if self.reverse_edge:
g = dgl.graph(
data=(torch.cat((src, dst)), torch.cat((dst, src))),
num_nodes=id_label.shape[0],
)
g.edata["timestamp"] = torch.cat(
(src_dst_time[:, 2], src_dst_time[:, 2])
)
else:
g = dgl.graph(data=(src, dst), num_nodes=id_label.shape[0])
g.edata["timestamp"] = src_dst_time[:, 2]
time_features = id_time_features[:, 1:]
label = id_label[:, 1]
g.ndata["label"] = label
g.ndata["feat"] = time_features
# used to construct time-based sub-graph.
node_mask_by_time = []
start_time = int(torch.min(id_time_features[:, 1]))
end_time = int(torch.max(id_time_features[:, 1]))
for i in range(start_time, end_time + 1):
node_mask = id_time_features[:, 1] == i
node_mask_by_time.append(node_mask)
return g, node_mask_by_time
@property
def num_classes(self):
r"""Number of classes for each node."""
return 2
|
2,715 |
get validation support
|
import os
import re
from typing import Tuple
from qtpy.QtCore import QSize
from qtpy.QtWidgets import QFileDialog, QHBoxLayout, QLineEdit, QToolButton, QWidget
from ert.gui.ertwidgets import resourceIcon
from ert.gui.ertwidgets.validationsupport import ValidationSupport
class PathChooser(QWidget):
"""
PathChooser: shows, enables choosing of and validates paths. The data
structure expected and sent to the models getValue and setValue is a string.
"""
PATH_DOES_NOT_EXIST_MSG = "The specified path does not exist."
FILE_IS_NOT_EXECUTABLE_MSG = "The specified file is not an executable."
PATH_IS_NOT_A_FILE_MSG = "The specified path must be a file."
PATH_IS_NOT_ABSOLUTE_MSG = "The specified path must be an absolute path."
PATH_IS_NOT_A_DIRECTORY_MSG = "The specified path must be a directory."
REQUIRED_FIELD_MSG = "A path is required."
def __init__(self, model):
"""
:type model: ert.gui.ertwidgets.models.path_model.PathModel
:param help_link: str
"""
QWidget.__init__(self)
self._validation_support = ValidationSupport(self)
self._editing = True
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self._path_line = QLineEdit()
self._path_line.setMinimumWidth(250)
layout.addWidget(self._path_line)
dialog_button = QToolButton(self)
dialog_button.setIcon(resourceIcon("folder_open.svg"))
dialog_button.setIconSize(QSize(16, 16))
dialog_button.clicked.connect(self.selectPath)
layout.addWidget(dialog_button)
self.valid_color = self._path_line.palette().color(
self._path_line.backgroundRole()
)
self._path_line.setText(os.getcwd())
self._editing = False
self._model = model
self._model.valueChanged.connect(self.getPathFromModel)
self._path_line.editingFinished.connect(self.validatePath)
self._path_line.editingFinished.connect(self.contentsChanged)
self._path_line.textChanged.connect(self.validatePath)
self.setLayout(layout)
self.getPathFromModel()
def isPathValid(self, path) -> Tuple[bool, str]:
path = path.strip()
path_exists = os.path.exists(path)
is_file = os.path.isfile(path)
is_directory = os.path.isdir(path)
is_executable = os.access(path, os.X_OK)
is_absolute = os.path.isabs(path)
valid = True
message = ""
if path == "":
if self._model.pathIsRequired():
valid = False
message = PathChooser.REQUIRED_FIELD_MSG
elif not path_exists:
if self._model.pathMustExist():
valid = False
message = PathChooser.PATH_DOES_NOT_EXIST_MSG
# todo: check if new (non-existing) file has directory or file format?
elif path_exists:
if self._model.pathMustBeExecutable() and is_file and not is_executable:
valid = False
message = PathChooser.FILE_IS_NOT_EXECUTABLE_MSG
elif self._model.pathMustBeADirectory() and not is_directory:
valid = False
message = PathChooser.PATH_IS_NOT_A_DIRECTORY_MSG
elif self._model.pathMustBeAbsolute() and not is_absolute:
valid = False
message = PathChooser.PATH_IS_NOT_ABSOLUTE_MSG
elif self._model.pathMustBeAFile() and not is_file:
valid = False
message = PathChooser.PATH_IS_NOT_A_FILE_MSG
return valid, message
def validatePath(self):
"""Called whenever the path is modified"""
palette = self._path_line.palette()
valid, message = self.isPathValid(self.getPath())
validity_type = ValidationSupport.WARNING
if not valid:
color = ValidationSupport.ERROR_COLOR
else:
color = self.valid_color
self._validation_support.setValidationMessage(message, validity_type)
self._path_line.setToolTip(message)
palette.setColor(self._path_line.backgroundRole(), color)
self._path_line.setPalette(palette)
def getPath(self):
"""Returns the path"""
return os.path.expanduser(str(self._path_line.text()).strip())
def selectPath(self):
"""Pops up the 'select a file/directory' dialog"""
# todo: This probably needs some reworking to work properly with
# different scenarios... (file + dir)
self._editing = True
current_directory = self.getPath()
if self._model.pathMustBeAFile():
current_directory: tuple(str, str) = QFileDialog.getOpenFileName(
self, "Select a file path", current_directory
)[0]
else:
current_directory: str = QFileDialog.getExistingDirectory(
self, "Select a directory", current_directory
)
if current_directory != "":
if not self._model.pathMustBeAbsolute():
cwd = os.getcwd()
match = re.match(cwd + "/(.*)", current_directory)
if match:
current_directory = match.group(1)
self._path_line.setText(current_directory)
self._model.setPath(self.getPath())
self._editing = False
def contentsChanged(self):
"""Called whenever the path is changed."""
path_is_valid, message = self.isPathValid(self.getPath())
if not self._editing and path_is_valid:
self._model.setPath(self.getPath())
def getPathFromModel(self):
"""Retrieves data from the model and inserts it into the edit line"""
self._editing = True
path = self._model.getPath()
if path is None:
path = ""
self._path_line.setText(str(path))
self._editing = False
def METHOD_NAME(self):
return self._validation_support
def isValid(self):
return self._validation_support.isValid()
|
2,716 |
open process for memory access
|
#!/usr/bin/env python
"""OSX specific utils."""
import ctypes
import ctypes.util
import logging
import os
import platform
from grr_response_client import client_utils_osx_linux
from grr_response_client.osx import objc
from grr_response_client.osx import process
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# Shared functions between macOS and Linux.
# pylint: disable=invalid-name
GetExtAttrs = client_utils_osx_linux.GetExtAttrs
CanonicalPathToLocalPath = client_utils_osx_linux.CanonicalPathToLocalPath
LocalPathToCanonicalPath = client_utils_osx_linux.LocalPathToCanonicalPath
VerifyFileOwner = client_utils_osx_linux.VerifyFileOwner
TransactionLog = client_utils_osx_linux.TransactionLog
CreateProcessFromSerializedFileDescriptor = process.Process.CreateFromSerializedFileDescriptor
# pylint: enable=invalid-name
def FindProxies():
"""This reads the OSX system configuration and gets the proxies."""
sc = objc.SystemConfiguration()
# Get the dictionary of network proxy settings
settings = sc.dll.SCDynamicStoreCopyProxies(None)
if not settings:
return []
try:
cf_http_enabled = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPEnable")
if cf_http_enabled and bool(sc.CFNumToInt32(cf_http_enabled)):
# Proxy settings for HTTP are enabled
cfproxy = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPProxy")
cfport = sc.CFDictRetrieve(settings, "kSCPropNetProxiesHTTPPort")
if cfproxy and cfport:
proxy = sc.CFStringToPystring(cfproxy)
port = sc.CFNumToInt32(cfport)
return ["http://%s:%d/" % (proxy, port)]
cf_auto_enabled = sc.CFDictRetrieve(
settings, "kSCPropNetProxiesProxyAutoConfigEnable")
if cf_auto_enabled and bool(sc.CFNumToInt32(cf_auto_enabled)):
cfurl = sc.CFDictRetrieve(settings,
"kSCPropNetProxiesProxyAutoConfigURLString")
if cfurl:
unused_url = sc.CFStringToPystring(cfurl)
# TODO(amoser): Auto config is enabled, what is the plan here?
# Basically, all we get is the URL of a javascript file. To get the
# correct proxy for a given URL, browsers call a Javascript function
# that returns the correct proxy URL. The question is now, do we really
# want to start running downloaded js on the client?
return []
finally:
sc.dll.CFRelease(settings)
return []
def GetMountpoints():
"""List all the filesystems mounted on the system."""
devices = {}
for filesys in GetFileSystems():
devices[filesys.f_mntonname.decode("utf-8")] = (
filesys.f_mntfromname.decode("utf-8"),
filesys.f_fstypename.decode("utf-8"))
return devices
class StatFSStruct(utils.Struct):
"""Parse filesystems getfsstat."""
_fields = [
("h", "f_otype;"),
("h", "f_oflags;"),
("l", "f_bsize;"),
("l", "f_iosize;"),
("l", "f_blocks;"),
("l", "f_bfree;"),
("l", "f_bavail;"),
("l", "f_files;"),
("l", "f_ffree;"),
("Q", "f_fsid;"),
("l", "f_owner;"),
("h", "f_reserved1;"),
("h", "f_type;"),
("l", "f_flags;"),
("2l", "f_reserved2"),
("15s", "f_fstypename"),
("90s", "f_mntonname"),
("90s", "f_mntfromname"),
("x", "f_reserved3"),
("16x", "f_reserved4")
] # pyformat:disable
class StatFS64Struct(utils.Struct):
"""Parse filesystems getfsstat for 64 bit."""
_fields = [
("<L", "f_bsize"),
("l", "f_iosize"),
("Q", "f_blocks"),
("Q", "f_bfree"),
("Q", "f_bavail"),
("Q", "f_files"),
("Q", "f_ffree"),
("l", "f_fsid1"),
("l", "f_fsid2"),
("l", "f_owner"),
("L", "f_type"),
("L", "f_flags"),
("L", "f_fssubtype"),
("16s", "f_fstypename"),
("1024s", "f_mntonname"),
("1024s", "f_mntfromname"),
("32s", "f_reserved")
] # pyformat:disable
def GetFileSystems():
"""Make syscalls to get the mounted filesystems.
Returns:
A list of Struct objects.
Based on the information for getfsstat
http://developer.apple.com/library/mac/#documentation/Darwin/
Reference/ManPages/man2/getfsstat.2.html
"""
version = OSXVersion()
major, minor = version.VersionAsMajorMinor()
libc = objc.LoadLibrary("c")
if major <= 10 and minor <= 5:
use_64 = False
fs_struct = StatFSStruct
else:
use_64 = True
fs_struct = StatFS64Struct
# Get max 20 file systems.
struct_size = fs_struct.GetSize()
buf_size = struct_size * 20
cbuf = ctypes.create_string_buffer(buf_size)
if use_64:
# MNT_NOWAIT = 2 - don't ask the filesystems, just return cache.
ret = libc.getfsstat64(ctypes.byref(cbuf), buf_size, 2)
else:
ret = libc.getfsstat(ctypes.byref(cbuf), buf_size, 2)
if ret == 0:
logging.debug("getfsstat failed err: %s", ret)
return []
return ParseFileSystemsStruct(fs_struct, ret, cbuf)
def ParseFileSystemsStruct(struct_class, fs_count, data):
"""Take the struct type and parse it into a list of structs."""
results = []
cstr = lambda x: x.split(b"\x00", 1)[0]
for count in range(0, fs_count):
struct_size = struct_class.GetSize()
s_data = data[count * struct_size:(count + 1) * struct_size]
s = struct_class(s_data)
s.f_fstypename = cstr(s.f_fstypename)
s.f_mntonname = cstr(s.f_mntonname)
s.f_mntfromname = cstr(s.f_mntfromname)
results.append(s)
return results
def GetRawDevice(path):
"""Resolve the raw device that contains the path."""
device_map = GetMountpoints()
path = utils.SmartUnicode(path)
mount_point = path = utils.NormalizePath(path, "/")
result = rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS)
# Assign the most specific mount point to the result
while mount_point:
try:
result.path, fs_type = device_map[mount_point]
if fs_type in [
"ext2",
"ext3",
"ext4",
"vfat",
"ntfs",
"Apple_HFS",
"hfs",
"msdos",
"apfs",
]:
# These are read filesystems
result.pathtype = rdf_paths.PathSpec.PathType.OS
else:
result.pathtype = rdf_paths.PathSpec.PathType.UNSET
# Drop the mount point
path = utils.NormalizePath(path[len(mount_point):])
return result, path
except KeyError:
mount_point = os.path.dirname(mount_point)
class OSXVersion(object):
"""Convenience functions for working with OSX versions."""
def __init__(self):
self.version = platform.mac_ver()[0]
self.splitversion = self.version.split(".")
self.majorminor = self.splitversion[0:2]
def VersionAsMajorMinor(self):
"""Get version as major minor array.
Returns:
[10, 8] for 10.8.1
"""
return [int(x) for x in self.majorminor]
def VersionString(self):
"""Get version string.
Returns:
"10.8.1" for 10.8.1
"""
return self.version
def KeepAlive():
# Not yet supported for OSX.
pass
def METHOD_NAME(pid=None):
return process.Process(pid=pid)
def MemoryRegions(proc, options):
return proc.Regions(
skip_executable_regions=options.skip_executable_regions,
skip_readonly_regions=options.skip_readonly_regions,
skip_shared_regions=options.skip_shared_regions)
|
2,717 |
one publish many subscribe
|
"""
Example of ZMQ Publish servers and Subscribe clients.
"""
import time
import zmq
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.driver.zmq import ZMQServer, ZMQClient
from testplan.common.utils.context import context
def after_start(env):
# The subscribe client blocks all messages by default, the subscribe method
# allows messages with a prefix that matches the topic_filter. Therefore an
# empty string allows all messages through.
env.client1.subscribe(topic_filter=b"")
env.client2.subscribe(topic_filter=b"")
# The ZMQ Subscribe client takes a bit longer to actually connect, no
# connection results in dropped messages There is no way to verify the
# connection currently so we add a small delay after start.
time.sleep(1)
@testsuite
class ZMQTestsuite:
def setup(self, env):
self.timeout = 5
# The clients must be flushed after each test to remove any extra messages.
# This would occur when running many_publish_one_subscribe before
# one_publish_many_subscribe on client 2.
def post_testcase(self, name, env, result):
env.client1.flush()
env.client2.flush()
# As above the sleep is to verify the clients have reconnected.
time.sleep(1)
@testcase
def many_publish_one_subscribe(self, env, result):
# Many publish servers send a message each to one subscription client as
# shown in the diagram below:
#
# Server1 ---msg1---+
# |
# +---msg1 & msg2---> Client1
# |
# Server2 ---msg2---+
#
# Server 1 sends a unique message to client 1.
msg1 = b"Hello 1"
result.log("Server 1 is sending: {}".format(msg1))
env.server1.send(data=msg1, timeout=self.timeout)
# Server 2 sends a unique message to client 1.
msg2 = b"Hello 2"
result.log("Server 2 is sending: {}".format(msg2))
env.server2.send(data=msg2, timeout=self.timeout)
# Client 1 receives it's first message.
received1 = env.client1.receive(timeout=self.timeout)
# Client 1 receives it's second message.
received2 = env.client1.receive(timeout=self.timeout)
# Check the sent messages are the same as the received messages. Note
# the messages may arrive in a different order.
sent_msgs = set([msg1, msg2])
received_msgs = set([received1, received2])
result.equal(received_msgs, sent_msgs, "Client 1 received")
@testcase
def METHOD_NAME(self, env, result):
# One publish server sends messages to many subscription clients as
# shown in the diagram below:
#
# +---msg---> Client1
# |
# Server1 ---msg---+
# |
# +---msg---> Client2
#
# Server 1 sends a unique message to the clients it is connected to
# (clients 1 & 2).
msg = b"Hello 3"
result.log("Server 1 is sending: {}".format(msg))
env.server1.send(data=msg, timeout=self.timeout)
# Client 1 receives message from server 1.
received1 = env.client1.receive(timeout=self.timeout)
result.equal(received1, msg, "Client 1 received")
# Client 2 receives message from server 1.
received2 = env.client2.receive(timeout=self.timeout)
result.equal(received2, msg, "Client 2 received")
def get_multitest(name):
# The environment contains two ZMQServers and two ZMQClients connected as
# in the diagrams below. This allows us to send messages from one publish
# server to many subscription clients and from many subscription clients to
# one publish server as in the examples above.
#
# +------> Client1
# |
# Server1 ------+
# |
# +------> Client2
#
# Server2 -------------> Client1
test = MultiTest(
name=name,
suites=[ZMQTestsuite()],
environment=[
# Both server's message patterns are defined as ZMQ
# PUB.
ZMQServer(
name="server1",
host="127.0.0.1",
port=0,
message_pattern=zmq.PUB,
),
ZMQServer(
name="server2",
host="127.0.0.1",
port=0,
message_pattern=zmq.PUB,
),
# Both client's message patterns are defined as ZMQ
# SUB.
ZMQClient(
name="client1",
hosts=[
context("server1", "{{host}}"),
context("server2", "{{host}}"),
],
ports=[
context("server1", "{{port}}"),
context("server2", "{{port}}"),
],
message_pattern=zmq.SUB,
),
ZMQClient(
name="client2",
hosts=[context("server1", "{{host}}")],
ports=[context("server1", "{{port}}")],
message_pattern=zmq.SUB,
),
],
after_start=after_start,
)
return test
|
2,718 |
test fail on bad client id
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import transforming_client_data
TEST_DATA = {
'CLIENT A': collections.OrderedDict(
x=[[1, 2], [3, 4], [5, 6]],
y=[4.0, 5.0, 6.0],
z=['a', 'b', 'c'],
),
'CLIENT B': collections.OrderedDict(
x=[[10, 11]],
y=[7.0],
z=['d'],
),
'CLIENT C': collections.OrderedDict(
x=[[100, 101], [200, 201]],
y=[8.0, 9.0],
z=['e', 'f'],
),
}
TEST_CLIENT_DATA = from_tensor_slices_client_data.TestClientData(TEST_DATA)
def _make_transform_expanded(client_id):
index_str = tf.strings.split(client_id, sep='_', maxsplit=1)[0]
index = tf.cast(tf.strings.to_number(index_str), tf.int32)
def fn(data):
return collections.OrderedDict(
[('x', data['x'] + 10 * index), ('y', data['y']), ('z', data['z'])]
)
return fn
def _make_transform_raw(client_id):
del client_id
def fn(data):
data['x'] = data['x'] + 10
return data
return fn
NUM_EXPANDED_CLIENTS = 3
def test_expand_client_id(client_id):
return [str(i) + '_' + client_id for i in range(NUM_EXPANDED_CLIENTS)]
def test_reduce_client_id(client_id):
return tf.strings.split(client_id, sep='_')[1]
TRANSFORMED_CLIENT_DATA = transforming_client_data.TransformingClientData(
TEST_CLIENT_DATA,
_make_transform_expanded,
test_expand_client_id,
test_reduce_client_id,
)
class TransformingClientDataTest(tf.test.TestCase):
def test_client_ids_property(self):
num_transformed_clients = len(TEST_DATA) * NUM_EXPANDED_CLIENTS
client_ids = TRANSFORMED_CLIENT_DATA.client_ids
self.assertLen(client_ids, num_transformed_clients)
for client_id in client_ids:
self.assertIsInstance(client_id, str)
self.assertListEqual(client_ids, sorted(client_ids))
def test_default_num_transformed_clients(self):
transformed_client_data = transforming_client_data.TransformingClientData(
TEST_CLIENT_DATA, _make_transform_raw
)
client_ids = transformed_client_data.client_ids
self.assertCountEqual(client_ids, TEST_DATA.keys())
def METHOD_NAME(self):
# The following three should be valid.
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT A')
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('1_CLIENT B')
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT C')
# This should not be valid: no prefix.
with self.assertRaisesRegex(
ValueError, 'is not a client in this ClientData'
):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('CLIENT A')
# This should not be valid: no corresponding client.
with self.assertRaisesRegex(
ValueError, 'is not a client in this ClientData'
):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('0_CLIENT D')
# This should not be valid: index out of range.
with self.assertRaisesRegex(
ValueError, 'is not a client in this ClientData'
):
TRANSFORMED_CLIENT_DATA.create_tf_dataset_for_client('3_CLIENT B')
def test_dataset_computation(self):
for client_id in TRANSFORMED_CLIENT_DATA.client_ids:
actual_dataset = TRANSFORMED_CLIENT_DATA.dataset_computation(client_id)
self.assertIsInstance(actual_dataset, tf.data.Dataset)
pattern = r'^(\d*)_(.*)$'
match = re.search(pattern, client_id)
client = match.group(2)
base_client_dataset = TEST_CLIENT_DATA.create_tf_dataset_for_client(
client
)
expected_dataset = base_client_dataset.map(
_make_transform_expanded(client_id)
)
for actual_client_data, expected_client_data in zip(
actual_dataset.as_numpy_iterator(),
expected_dataset.as_numpy_iterator(),
):
for actual_datum, expected_datum in zip(
actual_client_data, expected_client_data
):
self.assertEqual(actual_datum, expected_datum)
def test_create_tf_dataset_from_all_clients(self):
# Expands `CLIENT {N}` into N clients which add range(N) to the feature.
def expand_client_id(client_id):
return [client_id + '-' + str(i) for i in range(int(client_id[-1]))]
def make_transform_fn(client_id):
split_client_id = tf.strings.split(client_id, '-')
index = tf.cast(tf.strings.to_number(split_client_id[1]), tf.int32)
return lambda x: x + index
reduce_client_id = lambda client_id: tf.strings.split(client_id, sep='-')[0]
raw_data = {
'CLIENT 1': [0], # expanded to [0]
'CLIENT 2': [1, 3, 5], # expanded to [1, 3, 5], [2, 4, 6]
'CLIENT 3': [7, 10] # expanded to [7, 10], [8, 11], [9, 12]
} # pyformat: disable
client_data = from_tensor_slices_client_data.TestClientData(raw_data)
transformed_client_data = transforming_client_data.TransformingClientData(
client_data, make_transform_fn, expand_client_id, reduce_client_id
)
flat_data = transformed_client_data.create_tf_dataset_from_all_clients()
self.assertIsInstance(flat_data, tf.data.Dataset)
all_features = [batch.numpy() for batch in flat_data]
self.assertCountEqual(all_features, range(13))
if __name__ == '__main__':
execution_contexts.set_sync_local_cpp_execution_context()
tf.test.main()
|
2,719 |
load feature
|
# The learn_kmeans.py uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/examples/hubert/simple_kmeans/learn_kmeans.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import logging
import os
import random
import sys
import joblib
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from espnet.utils.cli_readers import file_reader_helper
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--km_path", type=str, required=True)
parser.add_argument("--n_clusters", type=int, required=True)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
parser.add_argument(
"--in_filetype",
type=str,
default="sound",
choices=["mat", "hdf5", "sound.hdf5", "sound"],
help="Specify the file format for the rspecifier. "
'"mat" is the matrix format in kaldi',
)
parser.add_argument(
"rspecifier",
type=str,
nargs="+",
help="Read specifier for feats. e.g. ark:some.ark",
)
return parser
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(rspecifier, in_filetype, percent):
feats = []
for utt, feat in file_reader_helper(rspecifier, in_filetype):
feats.append(feat)
if percent < 0:
return np.concatenate(feats, axis=0)
else:
nsample = int(np.ceil(len(feats) * percent))
sampled_feat = random.sample(feats, nsample)
sampled_feat = np.concatenate(
sampled_feat,
axis=0,
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from rspecifier {rspecifier}"
)
)
return sampled_feat
def METHOD_NAME(rspecifiers, in_filetype, percent):
assert percent <= 1.0
if not isinstance(rspecifiers, list):
rspecifiers = [rspecifiers]
feat = np.concatenate(
[
load_feature_shard(rspecifier, in_filetype, percent)
for rspecifier in rspecifiers
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
rspecifier,
in_filetype,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = METHOD_NAME(rspecifier, in_filetype, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
|
2,720 |
test create web dirs
|
import os
import shutil
from pathlib import Path
import pytest
from cobbler.cexceptions import CX
from cobbler.utils import filesystem_helpers
from tests.conftest import does_not_raise
@pytest.mark.parametrize(
"test_src,is_symlink,test_dst,expected_result",
[
# ("", "", False), --> This has a task in utils.py
("/tmp/not-safe-file", True, "/tmp/dst", False),
("/tmp/safe-file", False, "/tmp/dst", True),
],
)
def test_is_safe_to_hardlink(
cobbler_api, test_src, is_symlink, test_dst, expected_result
):
# Arrange
if is_symlink and test_src:
os.symlink("/foobar/test", test_src)
elif test_src:
open(test_src, "w").close()
# Act
result = filesystem_helpers.is_safe_to_hardlink(test_src, test_dst, cobbler_api)
# Cleanup
os.remove(test_src)
# Assert
assert expected_result == result
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_hashfile():
# Arrange
# TODO: Create testfile
testfilepath = "/dev/shm/bigtestfile"
expected_hash = ""
# Act
result = filesystem_helpers.hashfile(testfilepath)
# Assert
assert expected_hash == result
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_cachefile():
# Arrange
cache_src = ""
cache_dst = ""
api = None
# Act
filesystem_helpers.cachefile(cache_src, cache_dst)
# Assert
# TODO: Check .link_cache folder exists and the link cache file in it
# TODO: Assert file exists in the cache destination
assert False
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_linkfile(cobbler_api):
# Arrange
test_source = ""
test_destination = ""
# Act
filesystem_helpers.linkfile(cobbler_api, test_source, test_destination)
# Assert
assert False
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_copyfile():
# Arrange
test_source = ""
test_destination = ""
# Act
filesystem_helpers.copyfile(test_source, test_destination)
# Assert
assert False
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_copyremotefile():
# Arrange
test_source = ""
test_destination = ""
# Act
filesystem_helpers.copyremotefile(test_source, test_destination, None)
# Assert
assert False
@pytest.mark.skip("This calls a lot of os-specific stuff. Let's fix this test later.")
def test_copyfile_pattern():
# Arrange
test_pattern = ""
test_destination = ""
# Act
filesystem_helpers.copyfile_pattern(test_pattern, test_destination)
# Assert
assert False
def test_rmfile(tmpdir: Path):
# Arrange
tfile = tmpdir.join("testfile")
# Act
filesystem_helpers.rmfile(tfile)
# Assert
assert not os.path.exists(tfile)
def test_rmglob_files(tmpdir: Path):
# Arrange
tfile1 = tmpdir.join("file1.tfile")
tfile2 = tmpdir.join("file2.tfile")
# Act
filesystem_helpers.rmglob_files(tmpdir, "*.tfile")
# Assert
assert not os.path.exists(tfile1)
assert not os.path.exists(tfile2)
def test_rmtree_contents():
# Arrange
testfolder = "/dev/shm/"
testfiles = ["test1", "blafile", "testremove"]
for file in testfiles:
Path(os.path.join(testfolder, file)).touch()
# Act
filesystem_helpers.rmtree_contents(testfolder)
# Assert
assert len(os.listdir(testfolder)) == 0
def test_rmtree():
# Arrange
testtree = "/dev/shm/testtree"
os.mkdir(testtree)
# Pre assert to check the creation succeeded.
assert os.path.exists(testtree)
# Act
filesystem_helpers.rmtree(testtree)
# Assert
assert not os.path.exists(testtree)
def test_mkdir():
# TODO: Check how already existing folder is handled.
# Arrange
testfolder = "/dev/shm/testfoldercreation"
testmode = 0o600
try:
shutil.rmtree(testfolder)
except OSError:
pass
# Pre assert to check that this actually does something
assert not os.path.exists(testfolder)
# Act
filesystem_helpers.mkdir(testfolder, testmode)
# Assert
assert os.path.exists(testfolder)
@pytest.mark.parametrize(
"test_first_path,test_second_path,expected_result",
[("/tmp/test/a", "/tmp/test/a/b/c", "/b/c"), ("/tmp/test/a", "/opt/test/a", "")],
)
def test_path_tail(test_first_path, test_second_path, expected_result):
# Arrange
# TODO: Check if this actually makes sense...
# Act
result = filesystem_helpers.path_tail(test_first_path, test_second_path)
# Assert
assert expected_result == result
@pytest.mark.parametrize(
"test_input,expected_exception",
[
("Test", does_not_raise()),
("Test;Test", pytest.raises(CX)),
("Test..Test", pytest.raises(CX)),
],
)
def test_safe_filter(test_input, expected_exception):
# Arrange, Act & Assert
with expected_exception:
assert filesystem_helpers.safe_filter(test_input) is None
def METHOD_NAME(mocker, cobbler_api):
# Arrange
settings_mock = mocker.MagicMock()
settings_mock.webdir = "/my/custom/webdir"
mocker.patch.object(cobbler_api, "settings", return_value=settings_mock)
mock_mkdir = mocker.patch("cobbler.utils.filesystem_helpers.mkdir")
mock_copyfile = mocker.patch("cobbler.utils.filesystem_helpers.copyfile")
# Act
filesystem_helpers.create_web_dirs(cobbler_api)
# Assert
assert mock_mkdir.call_count == 9
assert mock_copyfile.call_count == 2
def test_create_tftpboot_dirs(mocker, cobbler_api):
# Arrange
settings_mock = mocker.MagicMock()
settings_mock.tftpboot_location = "/srv/tftpboot"
mocker.patch.object(cobbler_api, "settings", return_value=settings_mock)
mock_mkdir = mocker.patch("cobbler.utils.filesystem_helpers.mkdir")
mock_path_symlink_to = mocker.patch("pathlib.Path.symlink_to")
mocker.patch("pathlib.Path.exists", return_value=False)
# Act
filesystem_helpers.create_tftpboot_dirs(cobbler_api)
# Assert
assert mock_mkdir.call_count == 13
assert mock_path_symlink_to.call_count == 3
def test_create_trigger_dirs(mocker):
# Arrange
mock_mkdir = mocker.patch("cobbler.utils.filesystem_helpers.mkdir")
mocker.patch("pathlib.Path.exists", return_value=False)
# Act
filesystem_helpers.create_trigger_dirs(None)
# Assert
assert mock_mkdir.call_count == 84
def test_create_json_database_dirs(mocker):
# Arrange
mock_mkdir = mocker.patch("cobbler.utils.filesystem_helpers.mkdir")
mocker.patch("pathlib.Path.exists", return_value=False)
# Act
filesystem_helpers.create_json_database_dirs(None)
# Assert
mock_mkdir.assert_any_call("/var/lib/cobbler/collections")
# 1 collections parent directory + (1 child directory per item type * 9 item types atm)
assert mock_mkdir.call_count == 10
|
2,721 |
test simple
|
import os
from pathlib import Path
import time
from ansys.tools.path import find_ansys
import numpy as np
import pytest
from ansys.mapdl.core import LocalMapdlPool, examples
from ansys.mapdl.core.errors import VersionError
from conftest import skip_if_not_local
# skip entire module unless HAS_GRPC
pytestmark = pytest.mark.skip_grpc
skip_if_ignore_pool = pytest.mark.skipif(
os.environ.get("IGNORE_POOL", "").upper() == "TRUE",
reason="Ignoring Pool tests.",
)
MAPDL194PATH = "/usr/ansys_inc/v194/ansys/bin/mapdl"
skip_requires_194 = pytest.mark.skipif(
not os.path.isfile(MAPDL194PATH), reason="Requires MAPDL 194"
)
TWAIT = 90
LAUNCH_SWITCHES = "-smp -m 100 -db 100"
NPROC = 1
@pytest.fixture(scope="module")
def pool(tmpdir_factory):
EXEC_FILE = find_ansys()[0]
run_path = str(tmpdir_factory.mktemp("ansys_pool"))
mapdl_pool = LocalMapdlPool(
4,
license_server_check=False,
run_location=run_path,
port=50056,
start_timeout=30,
exec_file=EXEC_FILE,
additional_switches=LAUNCH_SWITCHES,
nproc=NPROC,
)
yield mapdl_pool
##########################################################################
# test exit
mapdl_pool.exit()
timeout = time.time() + TWAIT
while len(mapdl_pool) != 0:
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError(f"Failed to restart instance in {TWAIT} seconds")
assert len(mapdl_pool) == 0
# check it's been cleaned up
if mapdl_pool[0] is not None:
pth = mapdl_pool[0].directory
if mapdl_pool._spawn_kwargs["remove_temp_files"]:
assert not list(Path(pth).rglob("*.page*"))
@skip_requires_194
def test_invalid_exec():
with pytest.raises(VersionError):
LocalMapdlPool(
4,
nproc=NPROC,
exec_file="/usr/ansys_inc/v194/ansys/bin/mapdl",
additional_switches=LAUNCH_SWITCHES,
)
@skip_if_not_local
def test_heal(pool):
pool_sz = len(pool)
pool[0].exit()
pool[1].exit()
pool[2].exit()
time.sleep(1) # wait for shutdown
timeout = time.time() + TWAIT
while len(pool) < pool_sz:
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError(f"Failed to restart instance in {TWAIT} seconds")
assert len(pool) == pool_sz
pool._verify_unique_ports()
@skip_if_not_local
@skip_if_ignore_pool
def test_simple_map(pool):
pool_sz = len(pool)
_ = pool.map(lambda mapdl: mapdl.prep7())
assert len(pool) == pool_sz
@skip_if_not_local
@skip_if_ignore_pool
def test_map_timeout(pool):
pool_sz = len(pool)
def func(mapdl, tsleep):
mapdl.clear()
mapdl.prep7()
time.sleep(tsleep)
mapdl.post1()
return tsleep
timeout = 2
times = np.array([0, 1, 3, 4])
output = pool.map(func, times, timeout=timeout)
assert len(output) == (times < timeout).sum()
# wait for the pool to heal before continuing
timeout = time.time() + TWAIT
while len(pool) < pool_sz:
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError(f"Failed to restart instance in {TWAIT} seconds")
assert len(pool) == pool_sz
@skip_if_not_local
@skip_if_ignore_pool
def METHOD_NAME(pool):
pool_sz = len(pool)
def func(mapdl):
mapdl.clear()
outs = pool.map(func)
assert len(outs) == len(pool)
assert len(pool) == pool_sz
# fails intermittently
@skip_if_not_local
@skip_if_ignore_pool
def test_batch(pool):
input_files = [examples.vmfiles["vm%d" % i] for i in range(1, 11)]
outputs = pool.run_batch(input_files)
assert len(outputs) == len(input_files)
# fails intermittently
@skip_if_not_local
@skip_if_ignore_pool
def test_map(pool):
completed_indices = []
def func(mapdl, input_file, index):
# input_file, index = args
print(len(pool))
mapdl.clear()
output = mapdl.input(input_file)
completed_indices.append(index)
return mapdl.parameters.routine
inputs = [(examples.vmfiles["vm%d" % i], i) for i in range(1, 11)]
outputs = pool.map(func, inputs, progress_bar=True, wait=True)
assert len(outputs) == len(inputs)
@skip_if_not_local
@skip_if_ignore_pool
def test_abort(pool, tmpdir):
pool_sz = len(pool) # initial pool size
old_paths = [mapdl.directory for mapdl in pool]
tmp_file = str(tmpdir.join("woa.inp"))
with open(tmp_file, "w") as f:
f.write("EXIT")
input_files = [examples.vmfiles["vm%d" % i] for i in range(1, 11)]
input_files += [tmp_file]
outputs = pool.run_batch(input_files)
assert len(outputs) == len(input_files)
# ensure failed instance restarts
timeout = time.time() + TWAIT
while len(pool) < pool_sz:
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError(f"Failed to restart instance in {TWAIT} seconds")
assert len(pool) == pool_sz
# verify the temporary directory has been cleaned up for one of the instances
for path in old_paths:
path_deleted = os.path.isdir(path)
if path_deleted:
break
assert path_deleted
@skip_if_not_local
@skip_if_ignore_pool
def test_directory_names_default(pool):
dirs_path_pool = os.listdir(pool._root_dir)
assert "Instance_0" in dirs_path_pool
assert "Instance_1" in dirs_path_pool
assert "Instance_2" in dirs_path_pool
assert "Instance_3" in dirs_path_pool
@skip_if_not_local
@skip_if_ignore_pool
def test_directory_names_custom_string(tmpdir):
pool = LocalMapdlPool(
2,
run_location=tmpdir,
nproc=NPROC,
names="my_instance",
port=50056,
additional_switches=LAUNCH_SWITCHES,
)
dirs_path_pool = os.listdir(pool._root_dir)
assert "my_instance_0" in dirs_path_pool
assert "my_instance_1" in dirs_path_pool
pool.exit(block=True)
@skip_if_not_local
@skip_if_ignore_pool
def test_directory_names_function(tmpdir):
def myfun(i):
if i == 0:
return "instance_zero"
elif i == 1:
return "instance_one"
else:
return "Other_instance"
pool = LocalMapdlPool(
3,
nproc=NPROC,
names=myfun,
run_location=tmpdir,
additional_switches=LAUNCH_SWITCHES,
)
dirs_path_pool = os.listdir(pool._root_dir)
assert "instance_zero" in dirs_path_pool
assert "instance_one" in dirs_path_pool
assert "Other_instance" in dirs_path_pool
pool.exit(block=True)
|
2,722 |
tags
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSubnetGroupResult',
'AwaitableGetSubnetGroupResult',
'get_subnet_group',
'get_subnet_group_output',
]
@pulumi.output_type
class GetSubnetGroupResult:
"""
A collection of values returned by getSubnetGroup.
"""
def __init__(__self__, arn=None, description=None, id=None, name=None, subnet_ids=None, METHOD_NAME=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_ids and not isinstance(subnet_ids, list):
raise TypeError("Expected argument 'subnet_ids' to be a list")
pulumi.set(__self__, "subnet_ids", subnet_ids)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the subnet group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the subnet group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Sequence[str]:
"""
Set of VPC Subnet ID-s of the subnet group.
"""
return pulumi.get(self, "subnet_ids")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Map of tags assigned to the subnet group.
"""
return pulumi.get(self, "tags")
class AwaitableGetSubnetGroupResult(GetSubnetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubnetGroupResult(
arn=self.arn,
description=self.description,
id=self.id,
name=self.name,
subnet_ids=self.subnet_ids,
METHOD_NAME=self.METHOD_NAME)
def get_subnet_group(name: Optional[str] = None,
METHOD_NAME: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetGroupResult:
"""
Provides information about a ElastiCache Subnet Group.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.elasticache.get_subnet_group(name="my-subnet-group")
```
:param str name: Name of the subnet group.
:param Mapping[str, str] tags: Map of tags assigned to the subnet group.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = METHOD_NAME
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:elasticache/getSubnetGroup:getSubnetGroup', __args__, opts=opts, typ=GetSubnetGroupResult).value
return AwaitableGetSubnetGroupResult(
arn=pulumi.get(__ret__, 'arn'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
subnet_ids=pulumi.get(__ret__, 'subnet_ids'),
METHOD_NAME=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_subnet_group)
def get_subnet_group_output(name: Optional[pulumi.Input[str]] = None,
METHOD_NAME: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubnetGroupResult]:
"""
Provides information about a ElastiCache Subnet Group.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.elasticache.get_subnet_group(name="my-subnet-group")
```
:param str name: Name of the subnet group.
:param Mapping[str, str] tags: Map of tags assigned to the subnet group.
"""
...
|
2,723 |
test get connections target models
|
# -*- coding: utf-8 -*-
#
# test_getconnections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
GetConnections
"""
import unittest
import nest
nest.set_verbosity("M_ERROR")
@nest.ll_api.check_stack
class GetConnectionsTestCase(unittest.TestCase):
"""Find connections and test if values can be set."""
def test_GetConnections(self):
"""GetConnections"""
nest.ResetKernel()
a = nest.Create("iaf_psc_alpha", 3)
nest.Connect(a, a)
c1 = nest.GetConnections(a)
c2 = nest.GetConnections(a, synapse_model="static_synapse")
self.assertEqual(c1, c2)
weights = (2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)
d1 = tuple({"weight": w} for w in weights)
c3 = nest.GetConnections(a, a)
nest.SetStatus(c3, d1)
s1 = nest.GetStatus(c3, "weight")
self.assertEqual(s1, weights)
c4 = nest.GetConnections()
self.assertEqual(c1, c4)
weights = (11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0)
d1 = tuple({"weight": w} for w in weights)
c5 = nest.GetConnections(a, a)
c5.set(d1)
s2 = c5.get("weight")
self.assertEqual(s2, list(weights))
c6 = nest.GetConnections()
self.assertEqual(c1, c6)
def METHOD_NAME(self):
"""GetConnections iterating models for target"""
for model in nest.node_models:
nest.ResetKernel()
alpha = nest.Create("iaf_psc_alpha")
try:
other = nest.Create(model)
nest.Connect(alpha, other)
except nest.kernel.NESTError:
# If we can't create a node with this model, or connect
# to a node of this model, we ignore it.
continue
for get_conn_args in [{"source": alpha, "target": other}, {"source": alpha}, {"target": other}]:
conns = nest.GetConnections(**get_conn_args)
self.assertEqual(
len(conns),
1,
"Failed to get connection with target model {} (specifying {})".format(
model, ", ".join(get_conn_args.keys())
),
)
def test_GetConnectionsSourceModels(self):
"""GetConnections iterating models for source"""
for model in nest.node_models:
nest.ResetKernel()
alpha = nest.Create("iaf_psc_alpha")
try:
other = nest.Create(model)
nest.Connect(other, alpha)
except nest.kernel.NESTError:
# If we can't create a node with this model, or connect
# to a node of this model, we ignore it.
continue
for get_conn_args in [{"source": other, "target": alpha}, {"source": other}, {"target": alpha}]:
conns = nest.GetConnections(**get_conn_args)
self.assertEqual(
len(conns),
1,
"Failed to get connection with source model {} (specifying {})".format(
model, ", ".join(get_conn_args.keys())
),
)
def test_GetConnectionsSynapseModel(self):
"""GetConnections using synapse_model as argument"""
num_src = 3
num_tgt = 5
for synapse_model in nest.synapse_models:
nest.ResetKernel()
src = nest.Create("iaf_psc_alpha", num_src)
tgt = nest.Create("iaf_psc_alpha", num_tgt)
# First create one connection with static_synapse
nest.Connect(src[0], tgt[0])
try:
# Connect with specified synapse
nest.Connect(src, tgt, syn_spec={"synapse_model": synapse_model})
except nest.kernel.NESTError:
# If we can't connect iaf_psc_alpha with the given synapse_model, we ignore it.
continue
reference_list = [synapse_model] * num_src * num_tgt
if synapse_model == "static_synapse":
reference_list += ["static_synapse"]
conns = nest.GetConnections(synapse_model=synapse_model)
self.assertEqual(reference_list, conns.synapse_model)
# Also test that it works if we specify source/target and synapse_model
conns = nest.GetConnections(source=src, target=tgt, synapse_model=synapse_model)
self.assertEqual(reference_list, conns.synapse_model)
conns = nest.GetConnections(source=src, synapse_model=synapse_model)
self.assertEqual(reference_list, conns.synapse_model)
conns = nest.GetConnections(target=tgt, synapse_model=synapse_model)
self.assertEqual(reference_list, conns.synapse_model)
def test_GetConnectionsSynapseLabel(self):
"""GetConnections using synapse_label as argument"""
label = 123
num_src = 3
num_tgt = 5
for synapse_model in [s for s in nest.synapse_models if s.endswith("_lbl")]:
nest.ResetKernel()
src = nest.Create("iaf_psc_alpha", num_src)
tgt = nest.Create("iaf_psc_alpha", num_tgt)
# First create one connection with static_synapse
nest.Connect(src[0], tgt[0])
try:
# Connect with specified synapse
nest.Connect(src, tgt, syn_spec={"synapse_model": synapse_model, "synapse_label": label})
except nest.kernel.NESTError:
# If we can't connect iaf_psc_alpha with the given synapse_model, we ignore it.
continue
reference_list = [synapse_model] * num_src * num_tgt
label_list = [label] * num_src * num_tgt
# Call GetConnections with specified synapse_label and test that connections with
# corresponding model are returned
conns = nest.GetConnections(synapse_label=label)
self.assertEqual(reference_list, conns.synapse_model)
self.assertEqual(label_list, conns.synapse_label)
# Also test that it works if we specify source/target and synapse_label
conns = nest.GetConnections(source=src, target=tgt, synapse_label=label)
self.assertEqual(reference_list, conns.synapse_model)
conns = nest.GetConnections(source=src, synapse_label=label)
self.assertEqual(reference_list, conns.synapse_model)
conns = nest.GetConnections(target=tgt, synapse_label=label)
self.assertEqual(reference_list, conns.synapse_model)
def suite():
suite = unittest.makeSuite(GetConnectionsTestCase, "test")
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
2,724 |
has camera
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys
import subprocess
import datetime
# There is something like a set of gphoto2 python bindings, at
# http://magiclantern.wikia.com/wiki/Remote_control_with_PTP_and_Python
# but it's not widely available and you'd have to compile it for the RPi.
camera_name = None
def METHOD_NAME():
'''Is there a gphoto2-compatible camera installed?
gphoto2 --auto-detect doesn't do anything helpful like,
say, exiting with a different status if there's no camera.
So we have to parse the output.
'''
try:
output = subprocess.check_output(["/usr/bin/gphoto2",
"--auto-detect"]).split('\n')
seen_separator = False
for line in output:
if seen_separator:
if len(line) > 1 and not line[0].isspace():
global camera_name
camera_name = line.strip()
return camera_name
elif line.startswith("---------------"):
seen_separator = True
return False
except:
return False
class Gphoto :
def __init__(self, res=None, verbose=False):
'''May raise CalledProcessError or NotImplementedError
if there's no compatible camera connected and switched on.
XXX res is ignored for now.
'''
self.verbose = verbose
def check_config(self):
'''This routine tends to fail -- gphoto2 prints
"** Error (-1: 'Unspecified error') ***" --
even when the camera can capture just fine. So skip it.
'''
# Do we have a real camera attached using PTP so gphoto2 can talk to it?
has_capture = False
# For some reason gphoto2 --list-config ALWAYS exits with nonzero
# and "*** Error (-1: 'Unspecified error') ***"
# so alas we have to ignore error returns.
# gphoto2 --set-config capture=1 --list-config is the right way.
try:
args = [ "/usr/bin/gphoto2",
# "--debug", "--debug-logfile=/tmp/log.txt",
"--list-config",
"capture=on"]
config = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print("list-config exited with status", e.returncode)
config = e.output
print("output was: <START>", end=' ')
print(config)
print("<END>")
for line in config.split('\n'):
if line.startswith('/main/settings/capture'):
has_capture = True
break
else: print(line, "isn't capture")
if not has_capture:
raise NotImplementedError
def take_still(self, outfile=None, res=None, zoom=None):
if res:
print("Warning: gphoto wrapper ignoring resolution", res)
# gphoto2 can only take photos to files on the same filesystem
# as the current working directory.
print()
# So if outfile points to another filesystem, we need to
# change directory to that filesystem.
if outfile and outfile != '-':
if outfile[0] == '/':
outdir = os.path.split(outfile)[0]
if os.stat(outdir).st_dev != os.stat(os.getcwd()).st_dev:
if self.verbose:
print(outfile, "is not on the same filesystem as", \
os.getcwd())
print("Changing directory to", outdir)
os.chdir(outdir)
# XXX should we change back afterward?
# gphoto2 will also prompt if the target file already exists,
# so we have to rename or remove it.
print("Checking whether", outfile, "exists already")
if os.path.exists(outfile):
if self.verbose:
print("Renaming", outfile, "to", outfile + ".bak")
os.rename(outfile, outfile + ".bak")
else:
print("Not checking, outfile =", outfile)
if not outfile:
now = datetime.datetime.now()
outfile = "snap-%04d-%02d-%02d-%02d-%02d-%02d.jpg" % \
(now.year, now.month, now.day, now.hour, now.minute, now.second)
# gphoto2 can handle date formatting, but in that case
# we'd have no idea what the actual filename was
# so we couldn't do anything with it later.
print("outfile is now", outfile)
args = [ "/usr/bin/gphoto2", "--set-config", "syncdatetime=1",
"--set-config", "capturetarget=sdram" ]
if zoom:
args.append("--set-config")
args.append("zoom=%s" % str(zoom))
# The capture image command and filename have to come last:
args.append("--capture-image-and-download")
args.append("--filename")
args.append(outfile)
if self.verbose:
print("Calling:", args)
rv = subprocess.call(args)
if __name__ == '__main__':
gphoto = Gphoto(verbose=True)
if not gphoto.METHOD_NAME():
print("No camera connected!")
sys.exit(0)
gphoto.take_still()
gphoto.take_still(zoom=10)
|
2,725 |
get assessment output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAssessmentResult',
'AwaitableGetAssessmentResult',
'get_assessment',
'get_assessment_output',
]
@pulumi.output_type
class GetAssessmentResult:
"""
Security assessment on a resource - response format
"""
def __init__(__self__, additional_data=None, display_name=None, id=None, links=None, metadata=None, name=None, partners_data=None, resource_details=None, status=None, type=None):
if additional_data and not isinstance(additional_data, dict):
raise TypeError("Expected argument 'additional_data' to be a dict")
pulumi.set(__self__, "additional_data", additional_data)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if links and not isinstance(links, dict):
raise TypeError("Expected argument 'links' to be a dict")
pulumi.set(__self__, "links", links)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partners_data and not isinstance(partners_data, dict):
raise TypeError("Expected argument 'partners_data' to be a dict")
pulumi.set(__self__, "partners_data", partners_data)
if resource_details and not isinstance(resource_details, dict):
raise TypeError("Expected argument 'resource_details' to be a dict")
pulumi.set(__self__, "resource_details", resource_details)
if status and not isinstance(status, dict):
raise TypeError("Expected argument 'status' to be a dict")
pulumi.set(__self__, "status", status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="additionalData")
def additional_data(self) -> Optional[Mapping[str, str]]:
"""
Additional data regarding the assessment
"""
return pulumi.get(self, "additional_data")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
User friendly display name of the assessment
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def links(self) -> 'outputs.AssessmentLinksResponse':
"""
Links relevant to the assessment
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def metadata(self) -> Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']:
"""
Describes properties of an assessment metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnersData")
def partners_data(self) -> Optional['outputs.SecurityAssessmentPartnerDataResponse']:
"""
Data regarding 3rd party partner integration
"""
return pulumi.get(self, "partners_data")
@property
@pulumi.getter(name="resourceDetails")
def resource_details(self) -> Any:
"""
Details of the resource that was assessed
"""
return pulumi.get(self, "resource_details")
@property
@pulumi.getter
def status(self) -> 'outputs.AssessmentStatusResponseResponse':
"""
The result of the assessment
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetAssessmentResult(GetAssessmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAssessmentResult(
additional_data=self.additional_data,
display_name=self.display_name,
id=self.id,
links=self.links,
metadata=self.metadata,
name=self.name,
partners_data=self.partners_data,
resource_details=self.resource_details,
status=self.status,
type=self.type)
def get_assessment(assessment_name: Optional[str] = None,
expand: Optional[str] = None,
resource_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssessmentResult:
"""
Get a security assessment on your scanned resource
:param str assessment_name: The Assessment Key - Unique key for the assessment type
:param str expand: OData expand. Optional.
:param str resource_id: The identifier of the resource.
"""
__args__ = dict()
__args__['assessmentName'] = assessment_name
__args__['expand'] = expand
__args__['resourceId'] = resource_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:security/v20210601:getAssessment', __args__, opts=opts, typ=GetAssessmentResult).value
return AwaitableGetAssessmentResult(
additional_data=pulumi.get(__ret__, 'additional_data'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
links=pulumi.get(__ret__, 'links'),
metadata=pulumi.get(__ret__, 'metadata'),
name=pulumi.get(__ret__, 'name'),
partners_data=pulumi.get(__ret__, 'partners_data'),
resource_details=pulumi.get(__ret__, 'resource_details'),
status=pulumi.get(__ret__, 'status'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_assessment)
def METHOD_NAME(assessment_name: Optional[pulumi.Input[str]] = None,
expand: Optional[pulumi.Input[Optional[str]]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAssessmentResult]:
"""
Get a security assessment on your scanned resource
:param str assessment_name: The Assessment Key - Unique key for the assessment type
:param str expand: OData expand. Optional.
:param str resource_id: The identifier of the resource.
"""
...
|
2,726 |
timings
|
# encoding: utf-8
"""
Utilities for timing code execution.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# If possible (Unix), use the resource module instead of time.clock()
try:
import resource
except ImportError:
resource = None
# Some implementations (like jyputerlite) don't have getrusage
if resource is not None and hasattr(resource, "getrusage"):
def clocku():
"""clocku() -> floating point number
Return the *USER* CPU time in seconds since the start of the process.
This is done via a call to resource.getrusage, so it avoids the
wraparound problems in time.clock()."""
return resource.getrusage(resource.RUSAGE_SELF)[0]
def clocks():
"""clocks() -> floating point number
Return the *SYSTEM* CPU time in seconds since the start of the process.
This is done via a call to resource.getrusage, so it avoids the
wraparound problems in time.clock()."""
return resource.getrusage(resource.RUSAGE_SELF)[1]
def clock():
"""clock() -> floating point number
Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
the process. This is done via a call to resource.getrusage, so it
avoids the wraparound problems in time.clock()."""
u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
return u+s
def clock2():
"""clock2() -> (t_user,t_system)
Similar to clock(), but return a tuple of user/system times."""
return resource.getrusage(resource.RUSAGE_SELF)[:2]
else:
# There is no distinction of user/system time under windows, so we just use
# time.process_time() for everything...
clocku = clocks = clock = time.process_time
def clock2():
"""Under windows, system CPU time can't be measured.
This just returns process_time() and zero."""
return time.process_time(), 0.0
def timings_out(reps,func,*args,**kw):
"""timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
Execute a function reps times, return a tuple with the elapsed total
CPU time in seconds, the time per call and the function's output.
Under Unix, the return value is the sum of user+system time consumed by
the process, computed via the resource module. This prevents problems
related to the wraparound effect which the time.clock() function has.
Under Windows the return value is in wall clock seconds. See the
documentation for the time module for more details."""
reps = int(reps)
assert reps >=1, 'reps must be >= 1'
if reps==1:
start = clock()
out = func(*args,**kw)
tot_time = clock()-start
else:
rng = range(reps-1) # the last time is executed separately to store output
start = clock()
for dummy in rng: func(*args,**kw)
out = func(*args,**kw) # one last time
tot_time = clock()-start
av_time = tot_time / reps
return tot_time,av_time,out
def METHOD_NAME(reps,func,*args,**kw):
"""timings(reps,func,*args,**kw) -> (t_total,t_per_call)
Execute a function reps times, return a tuple with the elapsed total CPU
time in seconds and the time per call. These are just the first two values
in timings_out()."""
return timings_out(reps,func,*args,**kw)[0:2]
def timing(func,*args,**kw):
"""timing(func,*args,**kw) -> t_total
Execute a function once, return the elapsed total CPU time in
seconds. This is just the first value in timings_out()."""
return timings_out(1,func,*args,**kw)[0]
|
2,727 |
get module constant
|
import sys
import marshal
import contextlib
import dis
from setuptools.extern.packaging import version
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import _imp
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(
self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = version.Version
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and self.format(version) >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = METHOD_NAME(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(str(version))
def maybe_close(f):
@contextlib.contextmanager
def empty():
yield
return
if not f:
return empty()
return contextlib.closing(f)
def METHOD_NAME(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = _imp.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
imported = _imp.get_module(module, paths, info)
return getattr(imported, symbol, None)
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in dis.Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
2,728 |
get params
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _GraphFn(x, add_quantization_nodes):
def _Quantize(x, r):
if add_quantization_nodes:
x = gen_array_ops.fake_quant_with_min_max_vars(x, -r, r)
return x
x = _Quantize(x, 10.0)
x = x + 5
x = _Quantize(x, 15.0)
x = x - 5
x = _Quantize(x, 10.0)
x = x * 0.1
x = _Quantize(x, 1.0)
w = constant_op.constant(np.ones((8, 1)), dtype=dtypes.float32)
x = math_ops.matmul(x, w)
x = _Quantize(x, 10.0)
return array_ops.identity(x, name="output_0")
def METHOD_NAME(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[8, 8]], [[8, 1]])
class QuantizationMissingAllRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=False)
def GetParams(self):
return METHOD_NAME(self)
def ShouldRunTest(self, run_params):
if get_linked_tensorrt_version()[0] < 5:
return False
# Only test static engine mode, with or without calibration.
return (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.convert_online and not run_params.dynamic_engine)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
# In static engine mode with calibration, it should build a calibration
# engine.
# In static engine mode without calibration, the engine building will
# succeed but fall back to non-quantized ops.
return ["TRTEngineOp_0"]
class QuantizationWithRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=True)
def GetParams(self):
return METHOD_NAME(self)
def ShouldRunTest(self, run_params):
if get_linked_tensorrt_version()[0] < 5:
return False
# Test static/dynamic engine with/without calibration.
return (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.convert_online)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
class NonQuantizedPrecisionsWithRangesTest(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=True)
def GetParams(self):
return METHOD_NAME(self)
def ShouldRunTest(self, run_params):
# Only test FP32/FP16 mode.
return not trt_test.IsQuantizationMode(run_params.precision_mode)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
# The fake quant ops are not supported in FP32/FP16 mode, and will split the
# graph into three TRT segments.
return ["TRTEngineOp_0", "TRTEngineOp_1", "TRTEngineOp_2", "TRTEngineOp_3"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
if __name__ == "__main__":
test.main()
|
2,729 |
parse
|
# Copyright (C) Donald Stufft and individual contributors
# SPDX-License-Identifier: BSD-2-Clause
"""
This file was partially copied from the packaging.version module before the
LegacyVersion class was removed to continue to support version parsing in
a backward-compatible way where PEP 440 support can't be used.
Copyright (c) Donald Stufft and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import re
from typing import Iterator, List, Tuple, Union
from packaging.version import InvalidVersion, Version, _BaseVersion
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
def METHOD_NAME(version: str) -> Union["_LegacyVersion", "Version"]:
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`_LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return _LegacyVersion(version)
class _LegacyVersion(_BaseVersion):
def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self) -> str:
return self._version
def __repr__(self) -> str:
return f"<_LegacyVersion('{self}')>"
@property
def public(self) -> str:
return self._version
@property
def base_version(self) -> str:
return self._version
@property
def epoch(self) -> int:
return -1
@property
def release(self) -> None:
return None
@property
def pre(self) -> None:
return None
@property
def post(self) -> None:
return None
@property
def dev(self) -> None:
return None
@property
def local(self) -> None:
return None
@property
def is_prerelease(self) -> bool:
return False
@property
def is_postrelease(self) -> bool:
return False
@property
def is_devrelease(self) -> bool:
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s: str) -> Iterator[str]:
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
return epoch, tuple(parts)
|
2,730 |
suite
|
# -*- coding: utf-8 -*-
#
# test_connect_one_to_one.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest
import connect_test_base
import nest
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, "NEST was compiled without multi-threading")
@nest.ll_api.check_stack
class TestOneToOne(connect_test_base.ConnectTestBase):
# specify connection pattern
rule = "one_to_one"
conn_dict = {"rule": rule}
# sizes of populations
N = 6
N1 = N
N2 = N
N_array = 1000
def testConnectivity(self):
self.setUpNetwork(self.conn_dict)
# make sure all connections do exist
M = connect_test_base.get_connectivity_matrix(self.pop1, self.pop2)
connect_test_base.mpi_assert(M, np.identity(self.N), self)
# make sure no connections were drawn from the target to the source
# population
M = connect_test_base.get_connectivity_matrix(self.pop2, self.pop1)
connect_test_base.mpi_assert(M, np.zeros((self.N, self.N)), self)
def testSymmetricFlag(self):
conn_dict_symmetric = self.conn_dict.copy()
conn_dict_symmetric["make_symmetric"] = True
self.setUpNetwork(conn_dict_symmetric)
M1 = connect_test_base.get_connectivity_matrix(self.pop1, self.pop2)
M2 = connect_test_base.get_connectivity_matrix(self.pop2, self.pop1)
# test that connections were created in both directions
connect_test_base.mpi_assert(M1, np.transpose(connect_test_base.gather_data(M2)), self)
# test that no other connections were created
connect_test_base.mpi_assert(M1, np.zeros_like(M1) + np.identity(self.N), self)
def testInputArray(self):
syn_params = {}
for label in ["weight", "delay"]:
if label == "weight":
self.param_array = np.arange(self.N_array, dtype=float)
elif label == "delay":
self.param_array = np.arange(1, self.N_array + 1) * 0.1
syn_params[label] = self.param_array
nest.ResetKernel()
self.setUpNetwork(self.conn_dict, syn_params, N1=self.N_array, N2=self.N_array)
M_nest = connect_test_base.get_weighted_connectivity_matrix(self.pop1, self.pop2, label)
connect_test_base.mpi_assert(M_nest, np.diag(self.param_array), self)
def testInputArrayRPort(self):
syn_params = {}
neuron_model = "iaf_psc_exp_multisynapse"
neuron_dict = {"tau_syn": [0.1 + i for i in range(self.N1)]}
self.pop1 = nest.Create(neuron_model, self.N1, neuron_dict)
self.pop2 = nest.Create(neuron_model, self.N1, neuron_dict)
self.param_array = np.arange(1, self.N1 + 1, dtype=int)
syn_params["receptor_type"] = self.param_array
nest.Connect(self.pop1, self.pop2, self.conn_dict, syn_params)
M = connect_test_base.get_weighted_connectivity_matrix(self.pop1, self.pop2, "receptor")
connect_test_base.mpi_assert(M, np.diag(self.param_array), self)
def testInputArrayToStdpSynapse(self):
params = ["Wmax", "alpha", "lambda", "mu_minus", "mu_plus", "tau_plus"]
syn_params = {"synapse_model": "stdp_synapse"}
values = [np.arange(self.N1, dtype=float) for i in range(6)]
for i, param in enumerate(params):
syn_params[param] = values[i]
self.setUpNetwork(self.conn_dict, syn_params)
for i, param in enumerate(params):
a = connect_test_base.get_weighted_connectivity_matrix(self.pop1, self.pop2, param)
connect_test_base.mpi_assert(np.diag(a), values[i], self)
def METHOD_NAME():
METHOD_NAME = unittest.TestLoader().loadTestsFromTestCase(TestOneToOne)
return METHOD_NAME
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(METHOD_NAME())
if __name__ == "__main__":
run()
|
2,731 |
tf create global tensor
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2023 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import numpy as np
import lazy_import
from vineyard._C import ObjectMeta
from vineyard.core import context
from vineyard.core.resolver import default_resolver_context
from vineyard.core.resolver import resolver_context
from vineyard.data.utils import build_numpy_buffer
from vineyard.data.utils import from_json
from vineyard.data.utils import normalize_dtype
from vineyard.data.utils import to_json
tf = lazy_import.lazy_module("tensorflow")
def tf_tensor_builder(client, value, **kw):
meta = ObjectMeta()
meta['typename'] = 'vineyard::Tensor'
meta['num'] = to_json(len(value))
meta['partition_index_'] = to_json(kw.get('partition_index', []))
data = value
data = value.batch(len(value))
for i in data:
meta.add_member('buffer_data_', build_numpy_buffer(client, i[0].numpy()))
meta.add_member('buffer_label_', build_numpy_buffer(client, i[1].numpy()))
meta['data_shape_'] = to_json(i[0].numpy().shape)
meta['label_shape_'] = to_json(i[1].numpy().shape)
meta['data_type_'] = i[0].numpy().dtype.name
meta['label_type_'] = i[1].numpy().dtype.name
meta['data_type_meta_'] = i[0].numpy().dtype.str
meta['label_type_meta_'] = i[1].numpy().dtype.str
return client.create_metadata(meta)
def tf_dataframe_builder(client, value, builder, **kw):
meta = ObjectMeta()
meta['typename'] = 'vineyard::DataFrame'
for feat, labels in value.take(1):
cols = list(feat.keys())
cols.append('label')
meta['columns_'] = to_json(cols)
for i, col in enumerate(cols):
ls = []
for feat, labels in value.take(len(value)):
if col == 'label':
ls.append(labels.numpy())
else:
ls.append(feat[col].numpy())
meta['__values_-key-%d' % i] = to_json(col)
meta.add_member('__values_-value-%d' % i, builder.run(client, ls))
meta['__values_-size'] = len(cols)
meta['partition_index_row_'] = kw.get('partition_index', [-1, -1])[0]
meta['partition_index_column_'] = kw.get('partition_index', [-1, -1])[1]
meta['row_batch_index_'] = kw.get('row_batch_index', 0)
return client.create_metadata(meta)
def tf_builder(client, value, builder, **kw):
for x, _y in value.take(1):
if isinstance(x, dict):
return tf_dataframe_builder(client, value, builder, **kw)
else:
return tf_tensor_builder(client, value, **kw)
def METHOD_NAME(
client, value, builder, **kw
): # pylint: disable=unused-argument
# TODO
pass
def tf_create_global_dataframe(
client, value, builder, **kw
): # pylint: disable=unused-argument
# TODO
pass
def tf_tensor_resolver(obj):
meta = obj.meta
_num = from_json(meta['num']) # noqa: F841
data_shape = from_json(meta['data_shape_'])
label_shape = from_json(meta['label_shape_'])
data_name = meta['data_type_']
label_name = meta['label_type_']
data_type = normalize_dtype(data_name, meta.get('value_type_meta_', None))
label_type = normalize_dtype(label_name, meta.get('value_type_meta_', None))
data = np.frombuffer(
memoryview(obj.member('buffer_data_')), dtype=data_type
).reshape(data_shape)
label = np.frombuffer(
memoryview(obj.member('buffer_label_')), dtype=label_type
).reshape(label_shape)
data = tf.data.Dataset.from_tensor_slices((data, label))
return data
def tf_dataframe_resolver(obj, **kw):
with resolver_context(base=default_resolver_context) as resolver:
df = resolver(obj, **kw)
labels = df.pop(kw.get('label', 'label'))
if 'data' in kw:
return tf.data.Dataset.from_tensor_slices(
(np.stack(df[kw['data']], axis=0), labels)
)
return tf.data.Dataset.from_tensor_slices((dict(df), labels))
def tf_record_batch_resolver(obj, **kw):
with resolver_context(base=default_resolver_context) as resolver:
records = resolver(obj, **kw)
records = records.to_pandas()
labels = records.pop('label')
return tf.data.Dataset.from_tensor_slices((dict(records), labels))
def tf_table_resolver(obj, resolver):
meta = obj.meta
batches = []
for idx in range(int(meta['__batches_-size'])):
batches.append(resolver(obj.member('__batches_-%d' % idx)))
tf_data = batches[0]
for i in range(1, len(batches)):
tf_data = tf_data.concatenate(batches[i])
return tf_data
def tf_global_tensor_resolver(obj, resolver, **_kw):
meta = obj.meta
num = int(meta['partitions_-size'])
data = []
for i in range(num):
if meta[f'partitions_-{i}'].islocal:
data.append(resolver.run(obj.member(f'partitions_-{i}')))
tf_data = data[0]
for i in range(1, len(data)):
tf_data = tf_data.concatenate(data[i])
return tf_data
def tf_global_dataframe_resolver(obj, resolver, **kw):
meta = obj.meta
num = int(meta['partitions_-size'])
data = []
for i in range(num):
if meta[f'partitions_-{i}'].islocal:
data.append(resolver(obj.member(f'partitions_-{i}'), **kw))
tf_data = data[0]
for i in range(1, len(data)):
tf_data = tf_data.concatenate(data[i])
return tf_data
def register_tensorflow_types(builder_ctx, resolver_ctx):
if builder_ctx is not None:
builder_ctx.register(tf.data.Dataset, tf_builder)
if resolver_ctx is not None:
resolver_ctx.register('vineyard::Tensor', tf_tensor_resolver)
resolver_ctx.register('vineyard::DataFrame', tf_dataframe_resolver)
resolver_ctx.register('vineyard::RecordBatch', tf_record_batch_resolver)
resolver_ctx.register('vineyard::Table', tf_table_resolver)
resolver_ctx.register('vineyard::GlobalTensor', tf_global_tensor_resolver)
resolver_ctx.register('vineyard::GlobalDataFrame', tf_global_dataframe_resolver)
@contextlib.contextmanager
def tensorflow_context():
with context() as (builder_ctx, resolver_ctx):
with contextlib.suppress(ImportError):
register_tensorflow_types(builder_ctx, resolver_ctx)
yield builder_ctx, resolver_ctx
|
2,732 |
test getattribute
|
# third party
import numpy as np
# syft absolute
from syft.service.action.action_object import ActionObject
from syft.service.action.plan import planify
from syft.types.twin_object import TwinObject
def test_eager_permissions(worker, guest_client):
root_domain_client = worker.root_client
input_obj = TwinObject(
private_obj=np.array([[3, 3, 3], [3, 3, 3]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
input_ptr = root_domain_client.api.services.action.set(input_obj)
pointer = guest_client.api.services.action.get_pointer(input_ptr.id)
input_ptr = root_domain_client.api.services.action.set(input_obj)
pointer = guest_client.api.services.action.get_pointer(input_ptr.id)
flat_ptr = pointer.flatten()
res_guest = guest_client.api.services.action.get(flat_ptr.id)
assert not isinstance(res_guest, ActionObject)
res_root = root_domain_client.api.services.action.get(flat_ptr.id)
assert all(res_root == [3, 3, 3, 3, 3, 3])
def test_plan(worker):
root_domain_client = worker.root_client
guest_client = worker.guest_client
@planify
def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008
y = x.flatten()
z = y.prod()
return z
plan_ptr = my_plan.send(guest_client)
input_obj = TwinObject(
private_obj=np.array([[3, 3, 3], [3, 3, 3]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
input_obj = root_domain_client.api.services.action.set(input_obj)
pointer = guest_client.api.services.action.get_pointer(input_obj.id)
res_ptr = plan_ptr(x=pointer)
# guest cannot access result
assert not isinstance(
guest_client.api.services.action.get(res_ptr.id), ActionObject
)
# root can access result
assert (
root_domain_client.api.services.action.get(res_ptr.id)
== np.array([[3, 3, 3], [3, 3, 3]]).flatten().prod()
)
# guest can request result
res_ptr.request(guest_client)
# root approves result
root_domain_client.api.services.request[-1].approve_with_client(root_domain_client)
assert res_ptr.get_from(guest_client) == 729
def test_plan_with_function_call(worker, guest_client):
root_domain_client = worker.root_client
guest_client = worker.guest_client
@planify
def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008
y = x.flatten()
w = guest_client.api.lib.numpy.sum(y)
return w
plan_ptr = my_plan.send(guest_client)
input_obj = TwinObject(
private_obj=np.array([[3, 3, 3], [3, 3, 3]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
input_obj = root_domain_client.api.services.action.set(input_obj)
pointer = guest_client.api.services.action.get_pointer(input_obj.id)
res_ptr = plan_ptr(x=pointer)
assert root_domain_client.api.services.action.get(res_ptr.id) == 18
def test_plan_with_object_instantiation(worker, guest_client):
@planify
def my_plan(x=np.array([1, 2, 3, 4, 5, 6])): # noqa: B008
return x + 1
root_domain_client = worker.root_client
plan_ptr = my_plan.send(guest_client)
input_obj = TwinObject(
private_obj=np.array([1, 2, 3, 4, 5, 6]), mock_obj=np.array([1, 1, 1, 1, 1, 1])
)
_id = root_domain_client.api.services.action.set(input_obj).id
pointer = guest_client.api.services.action.get_pointer(_id)
res_ptr = plan_ptr(x=pointer)
assert all(
root_domain_client.api.services.action.get(res_ptr.id).syft_action_data
== np.array([2, 3, 4, 5, 6, 7])
)
def test_setattribute(worker, guest_client):
root_domain_client = worker.root_client
private_data, mock_data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
)
obj = TwinObject(private_obj=private_data, mock_obj=mock_data)
assert private_data.dtype != np.int32
obj_pointer = root_domain_client.api.services.action.set(obj)
obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id)
original_id = obj_pointer.id
# note that this messes up the data and the shape of the array
obj_pointer.dtype = np.int32
# local object is updated
assert obj_pointer.id.id in worker.action_store.data
assert obj_pointer.id != original_id
res = root_domain_client.api.services.action.get(obj_pointer.id)
# check if updated
assert res.dtype == np.int32
private_data.dtype = np.int32
mock_data.dtype = np.int32
assert (res == private_data).all()
assert (obj_pointer.syft_action_data == mock_data).all()
assert not (obj_pointer.syft_action_data == private_data).all()
def METHOD_NAME(worker, guest_client):
root_domain_client = worker.root_client
obj = TwinObject(
private_obj=np.array([[1, 2, 3], [4, 5, 6]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
obj_pointer = root_domain_client.api.services.action.set(obj)
obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id)
size_pointer = obj_pointer.size
# check result
assert size_pointer.id.id in worker.action_store.data
assert root_domain_client.api.services.action.get(size_pointer.id) == 6
def test_eager_method(worker, guest_client):
root_domain_client = worker.root_client
obj = TwinObject(
private_obj=np.array([[1, 2, 3], [4, 5, 6]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
obj_pointer = root_domain_client.api.services.action.set(obj)
obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id)
flat_pointer = obj_pointer.flatten()
assert flat_pointer.id.id in worker.action_store.data
# check result
assert all(
root_domain_client.api.services.action.get(flat_pointer.id)
== np.array([1, 2, 3, 4, 5, 6])
)
def test_eager_dunder_method(worker, guest_client):
root_domain_client = worker.root_client
obj = TwinObject(
private_obj=np.array([[1, 2, 3], [4, 5, 6]]),
mock_obj=np.array([[1, 1, 1], [1, 1, 1]]),
)
obj_pointer = root_domain_client.api.services.action.set(obj)
obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id)
first_row_pointer = obj_pointer[0]
assert first_row_pointer.id.id in worker.action_store.data
# check result
assert all(
root_domain_client.api.services.action.get(first_row_pointer.id)
== np.array([1, 2, 3])
)
|
2,733 |
build stdlib
|
# Copyright 2019 The Bazel Go Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//go/private:common.bzl",
"COVERAGE_OPTIONS_DENYLIST",
)
load(
"//go/private:providers.bzl",
"GoStdLib",
)
load(
"//go/private:mode.bzl",
"LINKMODE_NORMAL",
"extldflags_from_cc_toolchain",
"link_mode_args",
)
load("//go/private:sdk.bzl", "parse_version")
load("//go/private/actions:utils.bzl", "quote_opts")
def emit_stdlib(go):
"""Returns a standard library for the target configuration.
If the precompiled standard library is suitable, it will be returned.
Otherwise, the standard library will be compiled for the target.
Returns:
A list of providers containing GoLibrary and GoSource. GoSource.stdlib
will point to a new GoStdLib.
"""
library = go.new_library(go, resolver = _stdlib_library_to_source)
source = go.library_to_source(go, {}, library, False)
return [source, library]
def _stdlib_library_to_source(go, _attr, source, _merge):
if _should_use_sdk_stdlib(go):
source["stdlib"] = _sdk_stdlib(go)
else:
source["stdlib"] = METHOD_NAME(go)
def _should_use_sdk_stdlib(go):
version = parse_version(go.sdk.version)
if version and version[0] <= 1 and version[1] <= 19 and go.sdk.experiments:
# The precompiled stdlib shipped with 1.19 or below doesn't have experiments
return False
return (go.sdk.libs and # go.sdk.libs is non-empty if sdk ships with precompiled .a files
go.mode.goos == go.sdk.goos and
go.mode.goarch == go.sdk.goarch and
not go.mode.race and # TODO(jayconrod): use precompiled race
not go.mode.msan and
not go.mode.pure and
not go.mode.gc_goopts and
go.mode.link == LINKMODE_NORMAL)
def _build_stdlib_list_json(go):
out = go.declare_file(go, "stdlib.pkg.json")
cache_dir = go.declare_directory(go, "gocache")
args = go.builder_args(go, "stdliblist")
args.add("-sdk", go.sdk.root_file.dirname)
args.add("-out", out)
args.add("-cache", cache_dir.path)
inputs = go.sdk_files
if not go.mode.pure:
inputs += go.crosstool
go.actions.run(
inputs = inputs,
outputs = [out, cache_dir],
mnemonic = "GoStdlibList",
executable = go.toolchain._builder,
arguments = [args],
env = _build_env(go),
)
return out
def _build_env(go):
env = go.env
if go.mode.pure:
env.update({"CGO_ENABLED": "0"})
return env
# NOTE(#2545): avoid unnecessary dynamic link
# go std library doesn't use C++, so should not have -lstdc++
# Also drop coverage flags as nothing in the stdlib is compiled with
# coverage - we disable it for all CGo code anyway.
# NOTE(#3590): avoid forcing static linking.
ldflags = [
option
for option in extldflags_from_cc_toolchain(go)
if option not in ("-lstdc++", "-lc++", "-static") and option not in COVERAGE_OPTIONS_DENYLIST
]
env.update({
"CGO_ENABLED": "1",
"CC": go.cgo_tools.c_compiler_path,
"CGO_CFLAGS": " ".join(go.cgo_tools.c_compile_options),
"CGO_LDFLAGS": " ".join(ldflags),
})
return env
def _sdk_stdlib(go):
return GoStdLib(
_list_json = _build_stdlib_list_json(go),
libs = go.sdk.libs,
root_file = go.sdk.root_file,
)
def METHOD_NAME(go):
pkg = go.declare_directory(go, path = "pkg")
args = go.builder_args(go, "stdlib")
args.add("-out", pkg.dirname)
if go.mode.race:
args.add("-race")
args.add("-package", "std")
if not go.mode.pure:
args.add("-package", "runtime/cgo")
args.add_all(link_mode_args(go.mode))
args.add("-gcflags", quote_opts(go.mode.gc_goopts))
inputs = (go.sdk.srcs +
go.sdk.headers +
go.sdk.tools +
[go.sdk.go, go.sdk.package_list, go.sdk.root_file] +
go.crosstool)
if go.mode.pgoprofile:
args.add("-pgoprofile", go.mode.pgoprofile)
inputs.append(go.mode.pgoprofile)
outputs = [pkg]
go.actions.run(
inputs = inputs,
outputs = outputs,
mnemonic = "GoStdlib",
executable = go.toolchain._builder,
arguments = [args],
env = _build_env(go),
)
return GoStdLib(
_list_json = _build_stdlib_list_json(go),
libs = [pkg],
root_file = pkg,
)
|
2,734 |
key func 0
|
"""Index entries adapters for sphinx.environment."""
from __future__ import annotations
import re
import unicodedata
from itertools import groupby
from typing import TYPE_CHECKING, Any, Literal
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.index_entries import _split_into
if TYPE_CHECKING:
from sphinx.builders import Builder
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
class IndexEntries:
def __init__(self, env: BuildEnvironment) -> None:
self.env = env
self.builder: Builder
def create_index(self, builder: Builder, group_entries: bool = True,
_fixre: re.Pattern = re.compile(r'(.*) ([(][^()]*[)])'),
) -> list[tuple[str, list[tuple[str, Any]]]]:
"""Create the real index from the collected index entries."""
new: dict[str, list] = {}
rel_uri: str | Literal[False]
index_domain = self.env.domains['index']
for docname, entries in index_domain.entries.items():
try:
rel_uri = builder.get_relative_uri('genindex', docname)
except NoUri:
rel_uri = False
# new entry types must be listed in directives/other.py!
for entry_type, value, target_id, main, category_key in entries:
uri = rel_uri is not False and f'{rel_uri}#{target_id}'
try:
if entry_type == 'single':
try:
entry, sub_entry = _split_into(2, 'single', value)
except ValueError:
entry, = _split_into(1, 'single', value)
sub_entry = ''
_add_entry(entry, sub_entry, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'pair':
first, second = _split_into(2, 'pair', value)
_add_entry(first, second, main,
dic=new, link=uri, key=category_key)
_add_entry(second, first, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'triple':
first, second, third = _split_into(3, 'triple', value)
_add_entry(first, second + ' ' + third, main,
dic=new, link=uri, key=category_key)
_add_entry(second, third + ', ' + first, main,
dic=new, link=uri, key=category_key)
_add_entry(third, first + ' ' + second, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'see':
first, second = _split_into(2, 'see', value)
_add_entry(first, _('see %s') % second, None,
dic=new, link=False, key=category_key)
elif entry_type == 'seealso':
first, second = _split_into(2, 'see', value)
_add_entry(first, _('see also %s') % second, None,
dic=new, link=False, key=category_key)
else:
logger.warning(__('unknown index entry type %r'), entry_type,
location=docname)
except ValueError as err:
logger.warning(str(err), location=docname)
for (targets, sub_items, _category_key) in new.values():
targets.sort(key=METHOD_NAME)
for (sub_targets, _0, _sub_category_key) in sub_items.values():
sub_targets.sort(key=METHOD_NAME)
new_list = sorted(new.items(), key=_key_func_1)
if group_entries:
# fixup entries: transform
# func() (in module foo)
# func() (in module bar)
# into
# func()
# (in module foo)
# (in module bar)
old_key = ''
old_sub_items: dict[str, list] = {}
i = 0
while i < len(new_list):
key, (targets, sub_items, category_key) = new_list[i]
# cannot move if it has sub_items; structure gets too complex
if not sub_items:
m = _fixre.match(key)
if m:
if old_key == m.group(1):
# prefixes match: add entry as subitem of the
# previous entry
old_sub_items.setdefault(
m.group(2), [[], {}, category_key])[0].extend(targets)
del new_list[i]
continue
old_key = m.group(1)
else:
old_key = key
old_sub_items = sub_items
i += 1
return [(key_, list(group))
for (key_, group) in groupby(new_list, _key_func_3)]
def _add_entry(word: str, subword: str, main: str | None, *,
dic: dict[str, list], link: str | Literal[False], key: str | None) -> None:
entry = dic.setdefault(word, [[], {}, key])
if subword:
entry = entry[1].setdefault(subword, [[], {}, key])
if link:
entry[0].append((main, link))
def METHOD_NAME(entry: tuple[str, str]) -> tuple[bool, str]:
"""sort the index entries for same keyword."""
main, uri = entry
return not main, uri # show main entries at first
def _key_func_1(entry: tuple[str, list]) -> tuple[tuple[int, str], str]:
"""Sort the index entries"""
key, (_targets, _sub_items, category_key) = entry
if category_key:
# using the specified category key to sort
key = category_key
lc_key = unicodedata.normalize('NFD', key.lower())
if lc_key.startswith('\N{RIGHT-TO-LEFT MARK}'):
lc_key = lc_key[1:]
if not lc_key[0:1].isalpha() and not lc_key.startswith('_'):
# put symbols at the front of the index (0)
group = 0
else:
# put non-symbol characters at the following group (1)
group = 1
# ensure a deterministic order *within* letters by also sorting on
# the entry itself
return (group, lc_key), entry[0]
def _key_func_2(entry: tuple[str, list]) -> str:
"""sort the sub-index entries"""
key = unicodedata.normalize('NFD', entry[0].lower())
if key.startswith('\N{RIGHT-TO-LEFT MARK}'):
key = key[1:]
if key[0:1].isalpha() or key.startswith('_'):
key = chr(127) + key
return key
def _key_func_3(entry: tuple[str, list]) -> str:
"""Group the entries by letter"""
key, (targets, sub_items, category_key) = entry
# hack: mutating the sub_items dicts to a list in the key_func
entry[1][1] = sorted(((sub_key, sub_targets)
for (sub_key, (sub_targets, _0, _sub_category_key))
in sub_items.items()), key=_key_func_2)
if category_key is not None:
return category_key
# now calculate the key
if key.startswith('\N{RIGHT-TO-LEFT MARK}'):
key = key[1:]
letter = unicodedata.normalize('NFD', key[0])[0].upper()
if letter.isalpha() or letter == '_':
return letter
# get all other symbols under one heading
return _('Symbols')
|
2,735 |
time stamp
|
"""
Copyright (C) 2014 David Boddie <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import struct, time
# Find the number of centiseconds between 1900 and 1970.
between_epochs = ((365 * 70) + 17) * 24 * 360000L
class DiskError(Exception):
pass
class Utilities:
# Little endian reading
def _read_signed_word(self, s):
return struct.unpack("<i", s)[0]
def _read_unsigned_word(self, s):
return struct.unpack("<I", s)[0]
def _read_signed_byte(self, s):
return struct.unpack("<b", s)[0]
def _read_unsigned_byte(self, s):
return struct.unpack("<B", s)[0]
def _read_unsigned_half_word(self, s):
return struct.unpack("<H", s)[0]
def _read_signed_half_word(self, s):
return struct.unpack("<h", s)[0]
def _read(self, offset, length = 1):
self.file.seek(offset, 0)
return self.file.read(length)
def _write_unsigned_word(self, v):
return struct.pack("<I", v)
def _write_unsigned_half_word(self, v):
return struct.pack("<H", v)
def _write_unsigned_byte(self, v):
return struct.pack("<B", v)
def _write(self, offset, data):
self.file.seek(offset, 0)
self.file.write(data)
def _str2num(self, s):
i = 0
n = 0
while i < len(s):
n = n | (ord(s[i]) << (i*8))
i = i + 1
return n
def _num2str(self, size, n):
i = 0
s = ""
while i < size:
s += chr(n & 0xff)
n = n >> 8
i += 1
return s
def _binary(self, size, n):
new = ""
while (n != 0) & (size > 0):
if (n & 1)==1:
new = "1" + new
else:
new = "0" + new
n = n >> 1
size = size - 1
if size > 0:
new = ("0"*size) + new
return new
def _safe(self, s, with_space = 0):
new = ""
if with_space == 1:
lower = 31
else:
lower = 32
for c in s:
if ord(c) >= 128:
i = ord(c) ^ 128
c = chr(i)
if ord(c) <= lower:
break
new = new + c
return new
def _pad(self, s, length, ch):
s = s[:length]
if len(s) < length:
s += (length - len(s)) * ch
return s
class Directory:
"""directory = Directory(name, address)
The directory created contains name and files attributes containing the
directory name and the objects it contains.
"""
def __init__(self, name, files):
self.name = name
self.files = files
def __repr__(self):
return '<%s instance, "%s", at %x>' % (self.__class__, self.name, id(self))
class File:
"""file = File(name, data, load_address, execution_address, length)
"""
def __init__(self, name, data, load_address, execution_address, length,
locked = False, disk_address = 0):
self.name = name
self.data = data
self.load_address = load_address
self.execution_address = execution_address
self.length = length
self.locked = locked
self.disk_address = disk_address
def __repr__(self):
return '<%s instance, "%s", at %x>' % (self.__class__, self.name, id(self))
def has_filetype(self):
"""Returns True if the file's meta-data contains filetype information."""
return self.load_address & 0xfff00000 == 0xfff00000
def filetype(self):
"""Returns the meta-data containing the filetype information.
Note that a filetype can be obtained for all files, though it may not
necessarily be valid. Use has_filetype() to determine whether the file
is likely to have a valid filetype."""
return "%03x" % ((self.load_address >> 8) & 0xfff)
def METHOD_NAME(self):
"""Returns the time stamp for the file as a tuple of values containing
the local time, or an empty tuple if the file does not have a time stamp."""
# RISC OS time is given as a five byte block containing the
# number of centiseconds since 1900 (presumably 1st January 1900).
# Convert the time to the time elapsed since the Epoch (assuming
# 1970 for this value).
date_num = struct.unpack("<Q",
struct.pack("<IBxxx", self.execution_address, self.load_address & 0xff))[0]
centiseconds = date_num - between_epochs
# Convert this to a value in seconds and return a time tuple.
try:
return time.localtime(centiseconds / 100.0)
except ValueError:
return ()
|
2,736 |
set up
|
# Copyright 2020 KMEE INFORMATICA LTDA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
from odoo.addons.l10n_br_fiscal.constants.fiscal import (
PROCESSADOR_NENHUM,
PROCESSADOR_OCA,
)
from ..models.document import filter_processador_edoc_nfse
class TestFiscalDocumentNFSeCommon(TransactionCase):
def METHOD_NAME(self):
super(TestFiscalDocumentNFSeCommon, self).METHOD_NAME()
self.nfse_same_state = self.env.ref("l10n_br_fiscal.demo_nfse_same_state")
self.company = self.env.ref("l10n_br_base.empresa_simples_nacional")
self.company.processador_edoc = PROCESSADOR_OCA
self.company.partner_id.inscr_mun = "35172"
self.company.partner_id.inscr_est = ""
self.company.partner_id.state_id = self.env.ref("base.state_br_mg")
self.company.partner_id.city_id = self.env.ref("l10n_br_base.city_3132404")
self.company.icms_regulation_id = self.env.ref(
"l10n_br_fiscal.tax_icms_regulation"
).id
self.company.city_taxation_code_id = self.env.ref(
"l10n_br_fiscal.city_taxation_code_itajuba"
)
self.company.document_type_id = self.env.ref("l10n_br_fiscal.document_SE")
self.nfse_same_state.company_id = self.company.id
def test_certified_nfse_same_state_(self):
"""Test Certified NFSe same state."""
self.nfse_same_state._onchange_document_serie_id()
self.nfse_same_state._onchange_fiscal_operation_id()
# RPS Number
self.assertEqual(
self.nfse_same_state.rps_number,
"50",
"Error to mappping RPS Number 50"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# RPS Type
self.assertEqual(
self.nfse_same_state.rps_type,
"1",
"Error to mappping RPS Type 1"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Operation Nature
self.assertEqual(
self.nfse_same_state.operation_nature,
"1",
"Error to mappping Operation Nature 1"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Taxation Special Regime
self.assertEqual(
self.nfse_same_state.taxation_special_regime,
"1",
"Error to mappping Taxation Special Regime 1"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# IBGE Code
self.assertEqual(
str(self.company.prepare_company_servico().get("codigo_municipio")),
"3132404",
"Error to mappping IBGE Code 3132404"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# test _prepare_dados_servico()
self.assertEqual(
str(self.nfse_same_state._prepare_dados_servico().get("codigo_municipio")),
"3132404",
"Error to mappping IBGE Code 3132404"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# test _prepare_dados_tomador()
self.assertEqual(
str(self.nfse_same_state._prepare_dados_tomador().get("codigo_municipio")),
"3550308",
"Error to mappping IBGE Code 3550308"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Test with Processador OCA
self.assertTrue(filter_processador_edoc_nfse(self.nfse_same_state))
# test without Processador
self.company.processador_edoc = PROCESSADOR_NENHUM
self.assertFalse(filter_processador_edoc_nfse(self.nfse_same_state))
# Test res.partner.prepare_partner_tomador
self.assertEqual(
str(
self.nfse_same_state.partner_id.prepare_partner_tomador(
self.company.country_id.id
).get("codigo_municipio")
),
"3550308",
"Error to mappping IBGE Code 3550308"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Test res.partner.prepare_partner_tomador (Exterior)
self.assertEqual(
str(
self.nfse_same_state.partner_id.prepare_partner_tomador(1).get(
"codigo_municipio"
)
),
"9999999",
"Error to mappping IBGE Code 9999999"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
for line in self.nfse_same_state.fiscal_line_ids:
line._onchange_product_id_fiscal()
line._onchange_commercial_quantity()
line._onchange_ncm_id()
line._onchange_fiscal_operation_id()
line._onchange_fiscal_operation_line_id()
line._onchange_fiscal_taxes()
# prepare_line_servico()
self.assertEqual(
line.prepare_line_servico().get("codigo_tributacao_municipio"),
"6311900",
"Error to mappping City Taxation Code 6311900"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Fiscal Deductions Value
self.assertEqual(
line.fiscal_deductions_value,
0.0,
"Error to mappping Fiscal Deductions Value 0.0"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# City Taxation Code
self.assertEqual(
line.city_taxation_code_id.code,
"6311900",
"Error to mappping City Taxation Code 6311900"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
# Fiscal Deductions Value
line.product_id.fiscal_deductions_value = 10
line._onchange_product_id_fiscal()
self.assertEqual(
line.fiscal_deductions_value,
10.0,
"Error to mappping Fiscal Deductions Value 10.0"
" for Venda de Serviço de Contribuinte Dentro do Estado.",
)
|
2,737 |
get obj temp
|
# coding=utf-8
#
# From https://github.com/CRImier/python-MLX90614
#
# MIT License
#
# Copyright (c) 2016 Arsenijs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
from mycodo.inputs.base_input import AbstractInput
# Measurements
measurements_dict = {
0: {
'measurement': 'temperature',
'unit': 'C',
'name': 'Ambient'
},
1: {
'measurement': 'temperature',
'unit': 'C',
'name': 'Object'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'MLX90614',
'input_manufacturer': 'Melexis',
'input_name': 'MLX90614',
'input_library': 'smbus2',
'measurements_name': 'Temperature (Ambient/Object)',
'measurements_dict': measurements_dict,
'url_manufacturer': 'https://www.melexis.com/en/product/MLX90614/Digital-Plug-Play-Infrared-Thermometer-TO-Can',
'url_datasheet': 'https://www.melexis.com/-/media/files/documents/datasheets/mlx90614-datasheet-melexis.pdf',
'url_product_purchase': 'https://www.sparkfun.com/products/9570',
'options_enabled': [
'i2c_location',
'measurements_select',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('pip-pypi', 'smbus2', 'smbus2==0.4.1')
],
'interfaces': ['I2C'],
'i2c_location': ['0x5a'],
'i2c_address_editable': True,
}
class InputModule(AbstractInput):
"""A sensor support class that measure the MLX90614's ambient and object temperature."""
def __init__(self, input_dev, testing=False):
super().__init__(input_dev, testing=testing, name=__name__)
self.MLX90614_RAWIR1=0x04
self.MLX90614_RAWIR2=0x05
self.MLX90614_TA=0x06
self.MLX90614_TOBJ1=0x07
self.MLX90614_TOBJ2=0x08
self.MLX90614_TOMAX=0x20
self.MLX90614_TOMIN=0x21
self.MLX90614_PWMCTRL=0x22
self.MLX90614_TARANGE=0x23
self.MLX90614_EMISS=0x24
self.MLX90614_CONFIG=0x25
self.MLX90614_ADDR=0x0E
self.MLX90614_ID1=0x3C
self.MLX90614_ID2=0x3D
self.MLX90614_ID3=0x3E
self.MLX90614_ID4=0x3F
if not testing:
self.try_initialize()
def initialize(self):
from smbus2 import SMBus
self.i2c_address = int(str(self.input_dev.i2c_location), 16)
self.bus = SMBus(self.input_dev.i2c_bus)
def read_reg(self, reg_addr):
return self.bus.read_word_data(self.i2c_address, reg_addr)
@staticmethod
def data_to_temp(data):
temp = (data*0.02) - 273.15
return temp
def get_amb_temp(self):
data = self.read_reg(self.MLX90614_TA)
return self.data_to_temp(data)
def METHOD_NAME(self):
data = self.read_reg(self.MLX90614_TOBJ1)
return self.data_to_temp(data)
def get_measurement(self):
"""Gets the ambient (ch0) and object (ch1) temperatures."""
self.return_dict = copy.deepcopy(measurements_dict)
if self.is_enabled(0):
self.value_set(0, self.get_amb_temp())
if self.is_enabled(1):
self.value_set(1, self.METHOD_NAME())
return self.return_dict
|
2,738 |
find entity
|
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import json
import os
import re
import zipfile
import numpy as np
def METHOD_NAME(text_raw, id_, predictions, tok_to_orig_start_index, tok_to_orig_end_index):
"""
retrieval entity mention under given predicate id for certain prediction.
this is called by the "decoding" func.
"""
entity_list = []
for i in range(len(predictions)):
if [id_] in predictions[i]:
j = 0
while i + j + 1 < len(predictions):
if [1] in predictions[i + j + 1]:
j += 1
else:
break
entity = "".join(text_raw[tok_to_orig_start_index[i] : tok_to_orig_end_index[i + j] + 1])
entity_list.append(entity)
return list(set(entity_list))
def decoding(
example_batch, id2spo, logits_batch, seq_len_batch, tok_to_orig_start_index_batch, tok_to_orig_end_index_batch
):
"""
model output logits -> formatted spo (as in data set file)
"""
formatted_outputs = []
for (i, (example, logits, seq_len, tok_to_orig_start_index, tok_to_orig_end_index)) in enumerate(
zip(example_batch, logits_batch, seq_len_batch, tok_to_orig_start_index_batch, tok_to_orig_end_index_batch)
):
logits = logits[1 : seq_len + 1] # slice between [CLS] and [SEP] to get valid logits
logits[logits >= 0.5] = 1
logits[logits < 0.5] = 0
tok_to_orig_start_index = tok_to_orig_start_index[1 : seq_len + 1]
tok_to_orig_end_index = tok_to_orig_end_index[1 : seq_len + 1]
predictions = []
for token in logits:
predictions.append(np.argwhere(token == 1).tolist())
# format predictions into example-style output
formatted_instance = {}
text_raw = example["text"]
complex_relation_label = [8, 10, 26, 32, 46]
complex_relation_affi_label = [9, 11, 27, 28, 29, 33, 47]
# flatten predictions then retrival all valid subject id
flatten_predictions = []
for layer_1 in predictions:
for layer_2 in layer_1:
flatten_predictions.append(layer_2[0])
subject_id_list = []
for cls_label in list(set(flatten_predictions)):
if 1 < cls_label <= 56 and (cls_label + 55) in flatten_predictions:
subject_id_list.append(cls_label)
subject_id_list = list(set(subject_id_list))
# fetch all valid spo by subject id
spo_list = []
for id_ in subject_id_list:
if id_ in complex_relation_affi_label:
continue # do this in the next "else" branch
if id_ not in complex_relation_label:
subjects = METHOD_NAME(text_raw, id_, predictions, tok_to_orig_start_index, tok_to_orig_end_index)
objects = METHOD_NAME(text_raw, id_ + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index)
for subject_ in subjects:
for object_ in objects:
spo_list.append(
{
"predicate": id2spo["predicate"][id_],
"object_type": {"@value": id2spo["object_type"][id_]},
"subject_type": id2spo["subject_type"][id_],
"object": {"@value": object_},
"subject": subject_,
}
)
else:
# traverse all complex relation and look through their corresponding affiliated objects
subjects = METHOD_NAME(text_raw, id_, predictions, tok_to_orig_start_index, tok_to_orig_end_index)
objects = METHOD_NAME(text_raw, id_ + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index)
for subject_ in subjects:
for object_ in objects:
object_dict = {"@value": object_}
object_type_dict = {"@value": id2spo["object_type"][id_].split("_")[0]}
if id_ in [8, 10, 32, 46] and id_ + 1 in subject_id_list:
id_affi = id_ + 1
object_dict[id2spo["object_type"][id_affi].split("_")[1]] = METHOD_NAME(
text_raw, id_affi + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index
)[0]
object_type_dict[id2spo["object_type"][id_affi].split("_")[1]] = id2spo["object_type"][
id_affi
].split("_")[0]
elif id_ == 26:
for id_affi in [27, 28, 29]:
if id_affi in subject_id_list:
object_dict[id2spo["object_type"][id_affi].split("_")[1]] = METHOD_NAME(
text_raw,
id_affi + 55,
predictions,
tok_to_orig_start_index,
tok_to_orig_end_index,
)[0]
object_type_dict[id2spo["object_type"][id_affi].split("_")[1]] = id2spo[
"object_type"
][id_affi].split("_")[0]
spo_list.append(
{
"predicate": id2spo["predicate"][id_],
"object_type": object_type_dict,
"subject_type": id2spo["subject_type"][id_],
"object": object_dict,
"subject": subject_,
}
)
formatted_instance["text"] = example["text"]
formatted_instance["spo_list"] = spo_list
formatted_outputs.append(formatted_instance)
return formatted_outputs
def write_prediction_results(formatted_outputs, file_path):
"""write the prediction results"""
with codecs.open(file_path, "w", "utf-8") as f:
for formatted_instance in formatted_outputs:
json_str = json.dumps(formatted_instance, ensure_ascii=False)
f.write(json_str)
f.write("\n")
zipfile_path = file_path + ".zip"
f = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
f.write(file_path)
return zipfile_path
def get_precision_recall_f1(golden_file, predict_file):
r = os.popen(
"python3 ./re_official_evaluation.py --golden_file={} --predict_file={}".format(golden_file, predict_file)
)
result = r.read()
r.close()
precision = float(
re.search('"precision", "value":.*?}', result).group(0).lstrip('"precision", "value":').rstrip("}")
)
recall = float(re.search('"recall", "value":.*?}', result).group(0).lstrip('"recall", "value":').rstrip("}"))
f1 = float(re.search('"f1-score", "value":.*?}', result).group(0).lstrip('"f1-score", "value":').rstrip("}"))
return precision, recall, f1
|
2,739 |
parse http url
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
"""
XML tools
"""
import logging
from typing import Tuple
from werkzeug.wrappers import Response
import pywps.configuration as config
LOGGER = logging.getLogger('PYWPS')
def get_xpath_ns(version):
"""Get xpath namespace for specified WPS version.
Versions 1.0.0 or 2.0.0 are supported.
"""
def xpath_ns(ele, path):
"""Function, which will return xpath namespace for given
element and xpath
"""
if version == "1.0.0":
from pywps import namespaces100
nsp = namespaces100
elif version == "2.0.0":
from pywps import namespaces200
nsp = namespaces200
else:
raise NotImplementedError(version)
return ele.xpath(path, namespaces=nsp)
return xpath_ns
def make_response(doc, content_type):
"""Response serializer."""
if not content_type:
content_type = get_default_response_mimetype()
response = Response(doc, content_type=content_type)
response.status_percentage = 100
return response
def get_default_response_mimetype():
default_mimetype = config.get_config_value('server', 'default_mimetype')
return default_mimetype
def get_json_indent():
json_ident = int(config.get_config_value('server', 'json_indent'))
return json_ident if json_ident >= 0 else None
def get_response_type(accept_mimetypes, default_mimetype) -> Tuple[bool, str]:
"""
This function determinate if the response should be JSON or XML based on
the accepted mimetypes of the request and the default mimetype provided,
which will be used in case both are equally accepted.
:param accept_mimetypes: determinate which mimetypes are accepted
:param default_mimetype: "text/xml", "application/json"
:return: Tuple[bool, str] -
bool - True: The response type is JSON, False: Otherwise - XML
str - The output mimetype
"""
accept_json = \
accept_mimetypes.accept_json or \
accept_mimetypes.best is None or \
'json' in accept_mimetypes.best.lower()
accept_xhtml = \
accept_mimetypes.accept_xhtml or \
accept_mimetypes.best is None or \
'xml' in accept_mimetypes.best.lower()
if not default_mimetype:
default_mimetype = get_default_response_mimetype()
json_is_default = 'json' in default_mimetype or '*' in default_mimetype
json_response = (accept_json and (not accept_xhtml or json_is_default)) or \
(json_is_default and accept_json == accept_xhtml)
mimetype = 'application/json' if json_response else 'text/xml' if accept_xhtml else ''
return json_response, mimetype
def METHOD_NAME(http_request) -> dict:
"""
This function parses the request URL and extracts the following:
default operation, process identifier, output_ids, default mimetype
info that cannot be terminated from the URL will be None (default)
The url is expected to be in the following format, all the levels are optional.
[base_url]/[identifier]/[output_ids]
:param http_request: the request URL
:return: dict with the extracted info listed:
base_url - [wps|processes|jobs|api/api_level]
default_mimetype - determinate by the base_url part:
XML - if the base url == 'wps',
JSON - if the base URL in ['api'|'jobs'|'processes']
operation - also determinate by the base_url part:
['api'|'jobs'] -> 'execute'
processes -> 'describeprocess' or 'getcapabilities'
'describeprocess' if identifier is present as the next item, 'getcapabilities' otherwise
api - api level, only expected if base_url=='api'
identifier - the process identifier
output_ids - if exist then it selects raw output with the name output_ids
"""
operation = api = identifier = output_ids = default_mimetype = base_url = None
if http_request:
parts = str(http_request.path[1:]).split('/')
i = 0
if len(parts) > i:
base_url = parts[i].lower()
if base_url == 'wps':
default_mimetype = 'xml'
elif base_url in ['api', 'processes', 'jobs']:
default_mimetype = 'json'
i += 1
if base_url == 'api':
api = parts[i]
i += 1
if len(parts) > i:
identifier = parts[i]
i += 1
if len(parts) > i:
output_ids = parts[i]
if not output_ids:
output_ids = None
if base_url in ['jobs', 'api']:
operation = 'execute'
elif base_url == 'processes':
operation = 'describeprocess' if identifier else 'getcapabilities'
d = {}
if operation:
d['operation'] = operation
if identifier:
d['identifier'] = identifier
if output_ids:
d['output_ids'] = output_ids
if default_mimetype:
d['default_mimetype'] = default_mimetype
if api:
d['api'] = api
if base_url:
d['base_url'] = base_url
return d
|
2,740 |
export sources
|
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
import os
required_conan_version = ">=1.53.0"
class RTTRConan(ConanFile):
name = "rttr"
description = "Run Time Type Reflection library"
topics = ("reflection",)
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/rttrorg/rttr"
license = "MIT"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_rtti": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_rtti": False,
}
def METHOD_NAME(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["BUILD_DOCUMENTATION"] = False
tc.variables["BUILD_EXAMPLES"] = False
tc.variables["BUILD_UNIT_TESTS"] = False
tc.variables["BUILD_WITH_RTTI"] = self.options.with_rtti
tc.variables["BUILD_PACKAGE"] = False
tc.variables["BUILD_RTTR_DYNAMIC"] = self.options.shared
tc.variables["BUILD_STATIC"] = not self.options.shared
tc.generate()
def _patch_sources(self):
apply_conandata_patches(self)
# No warnings as errors
for target in ["rttr_core", "rttr_core_lib", "rttr_core_s", "rttr_core_lib_s"]:
replace_in_file(
self,
os.path.join(self.source_folder, "src", "rttr", "CMakeLists.txt"),
f"set_compiler_warnings({target})",
"",
)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "cmake"))
rmdir(self, os.path.join(self.package_folder, "share"))
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
def package_info(self):
cmake_target = "Core" if self.options.shared else "Core_Lib"
self.cpp_info.set_property("cmake_file_name", "rttr")
self.cpp_info.set_property("cmake_target_name", f"RTTR::{cmake_target}")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
self.cpp_info.components["_rttr"].libs = collect_libs(self)
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["_rttr"].system_libs = ["dl", "pthread"]
if self.options.shared:
self.cpp_info.components["_rttr"].defines = ["RTTR_DLL"]
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "rttr"
self.cpp_info.filenames["cmake_find_package_multi"] = "rttr"
self.cpp_info.names["cmake_find_package"] = "RTTR"
self.cpp_info.names["cmake_find_package_multi"] = "RTTR"
self.cpp_info.components["_rttr"].names["cmake_find_package"] = cmake_target
self.cpp_info.components["_rttr"].names["cmake_find_package_multi"] = cmake_target
self.cpp_info.components["_rttr"].set_property("cmake_target_name", f"RTTR::{cmake_target}")
|
2,741 |
test batch download matched set report download
|
# Use fixtures from features, indexing workbook
import pytest
from encoded.tests.features.conftest import app, app_settings, index_workbook
from encoded.batch_download import lookup_column_value
from encoded.batch_download import format_row
from encoded.batch_download import _convert_camel_to_snake
pytestmark = [
pytest.mark.indexing,
pytest.mark.usefixtures('index_workbook'),
]
def test_format_row():
columns = ['col1', 'col2', 'col3']
expected = b'col1\tcol2\tcol3\r\n'
target = format_row(columns)
assert expected == target
def test_convert_camel_to_snake_with_two_words():
expected = 'camel_case'
target = _convert_camel_to_snake('CamelCase')
assert expected == target
def test_convert_camel_to_snake_with_one_words():
expected = 'camel'
target = _convert_camel_to_snake('Camel')
assert expected == target
def test_batch_download_report_download(testapp, index_workbook, threadlocals):
res = testapp.get('/report.tsv?type=Experiment&sort=accession')
assert res.headers['content-type'] == 'text/tsv; charset=UTF-8'
disposition = res.headers['content-disposition']
assert disposition.startswith('attachment;filename="experiment_report') and disposition.endswith('.tsv"')
lines = res.body.splitlines()
assert b'/report/' in lines[0]
assert lines[1].split(b'\t') == [
b'ID', b'Accession', b'Assay name', b'Assay title', b'Biosample classification', b'Target', b'Target of assay',
b'Target gene symbol', b'Biosample summary', b'Biosample term name', b'Dbxrefs', b'Description', b'Lab',
b'Project', b'Status', b'Files', b'Related series', b'Biosample accession', b'Biological replicate',
b'Technical replicate', b'Linked antibody', b'Organism', b'Life stage', b'Biosample age',
b'Biosample treatment', b'Biosample treatment ontology ID', b'Biosample treatment amount', b'Biosample treatment amount units',
b'Biosample treatment duration', b'Biosample treatment duration units', b'Synchronization',
b'Post-synchronization time', b'Post-synchronization time units',
b'Biosample modification site target organism', b'Biosample modification site introduced gene organism',
b'Replicates', b'Mixed biosamples', b'Cellular component', b'Library construction platform', b'Library construction method',
]
assert len(lines) == 93
def test_batch_download_report_download_with_cart(testapp, index_workbook, threadlocals):
res = testapp.get('/report.tsv?type=Experiment&cart=/carts/ali-mortazavi-first-cart/')
assert res.headers['content-type'] == 'text/tsv; charset=UTF-8'
disposition = res.headers['content-disposition']
assert disposition.startswith('attachment;filename="experiment_report') and disposition.endswith('.tsv"')
lines = res.body.splitlines()
assert b'/cart-report/' in lines[0]
assert lines[1].split(b'\t') == [
b'ID', b'Accession', b'Assay name', b'Assay title', b'Biosample classification', b'Target', b'Target of assay',
b'Target gene symbol', b'Biosample summary', b'Biosample term name', b'Dbxrefs', b'Description', b'Lab',
b'Project', b'Status', b'Files', b'Related series', b'Biosample accession', b'Biological replicate',
b'Technical replicate', b'Linked antibody', b'Organism', b'Life stage', b'Biosample age',
b'Biosample treatment', b'Biosample treatment ontology ID', b'Biosample treatment amount', b'Biosample treatment amount units',
b'Biosample treatment duration', b'Biosample treatment duration units', b'Synchronization',
b'Post-synchronization time', b'Post-synchronization time units',
b'Biosample modification site target organism', b'Biosample modification site introduced gene organism',
b'Replicates', b'Mixed biosamples', b'Cellular component', b'Library construction platform', b'Library construction method',
]
assert len(lines) == 7
def METHOD_NAME(testapp, index_workbook):
res = testapp.get('/report.tsv?type=MatchedSet&sort=accession')
disposition = res.headers['content-disposition']
assert disposition.startswith('attachment;filename="matched_set_report') and disposition.endswith('.tsv"')
res = testapp.get('/report.tsv?type=matched_set&sort=accession')
disposition = res.headers['content-disposition']
assert disposition.startswith('attachment;filename="matched_set_report') and disposition.endswith('.tsv"')
def test_batch_download_lookup_column_value(lookup_column_value_item, lookup_column_value_validate):
for path in lookup_column_value_validate.keys():
assert lookup_column_value_validate[path] == lookup_column_value(lookup_column_value_item, path)
def test_batch_download_is_cart_search(dummy_request):
from encoded.batch_download import is_cart_search
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&limit=all'
)
assert not is_cart_search(dummy_request)
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&cart=/carts/abc/&field=@id'
)
assert is_cart_search(dummy_request)
def test_batch_download_get_report_search_generator(dummy_request):
from encoded.batch_download import get_report_search_generator
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&limit=all'
)
assert get_report_search_generator(dummy_request).__name__ == 'search_generator'
dummy_request.environ['QUERY_STRING'] = (
'type=Experiment&limit=all&cart=1234'
)
assert get_report_search_generator(dummy_request).__name__ == 'cart_search_generator'
|
2,742 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetGCPDataConnectorResult',
'AwaitableGetGCPDataConnectorResult',
'get_gcp_data_connector',
'get_gcp_data_connector_output',
]
@pulumi.output_type
class GetGCPDataConnectorResult:
"""
Represents Google Cloud Platform data connector.
"""
def __init__(__self__, auth=None, connector_definition_name=None, dcr_config=None, etag=None, METHOD_NAME=None, kind=None, name=None, request=None, system_data=None, type=None):
if auth and not isinstance(auth, dict):
raise TypeError("Expected argument 'auth' to be a dict")
pulumi.set(__self__, "auth", auth)
if connector_definition_name and not isinstance(connector_definition_name, str):
raise TypeError("Expected argument 'connector_definition_name' to be a str")
pulumi.set(__self__, "connector_definition_name", connector_definition_name)
if dcr_config and not isinstance(dcr_config, dict):
raise TypeError("Expected argument 'dcr_config' to be a dict")
pulumi.set(__self__, "dcr_config", dcr_config)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if request and not isinstance(request, dict):
raise TypeError("Expected argument 'request' to be a dict")
pulumi.set(__self__, "request", request)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def auth(self) -> 'outputs.GCPAuthPropertiesResponse':
"""
The auth section of the connector.
"""
return pulumi.get(self, "auth")
@property
@pulumi.getter(name="connectorDefinitionName")
def connector_definition_name(self) -> str:
"""
The name of the connector definition that represents the UI config.
"""
return pulumi.get(self, "connector_definition_name")
@property
@pulumi.getter(name="dcrConfig")
def dcr_config(self) -> Optional['outputs.DCRConfigurationResponse']:
"""
The configuration of the destination of the data.
"""
return pulumi.get(self, "dcr_config")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'GCP'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def request(self) -> 'outputs.GCPRequestPropertiesResponse':
"""
The request section of the connector.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetGCPDataConnectorResult(GetGCPDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGCPDataConnectorResult(
auth=self.auth,
connector_definition_name=self.connector_definition_name,
dcr_config=self.dcr_config,
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
name=self.name,
request=self.request,
system_data=self.system_data,
type=self.type)
def get_gcp_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGCPDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getGCPDataConnector', __args__, opts=opts, typ=GetGCPDataConnectorResult).value
return AwaitableGetGCPDataConnectorResult(
auth=pulumi.get(__ret__, 'auth'),
connector_definition_name=pulumi.get(__ret__, 'connector_definition_name'),
dcr_config=pulumi.get(__ret__, 'dcr_config'),
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
request=pulumi.get(__ret__, 'request'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_gcp_data_connector)
def get_gcp_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGCPDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
...
|
2,743 |
url
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"sentinel incident relation show",
is_experimental=True,
)
class Show(AAZCommand):
"""Get an incident relation.
"""
_aaz_info = {
"version": "2022-06-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/providers/microsoft.securityinsights/incidents/{}/relations/{}", "2022-06-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.incident_id = AAZStrArg(
options=["--incident-id"],
help="Incident ID",
required=True,
id_part="child_name_1",
)
_args_schema.relation_name = AAZStrArg(
options=["-n", "--name", "--relation-name"],
help="Relation Name",
required=True,
id_part="child_name_2",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-w", "--workspace-name"],
help="The name of the workspace.",
required=True,
is_experimental=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.IncidentRelationsGet(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class IncidentRelationsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/relations/{relationName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"incidentId", self.ctx.args.incident_id,
required=True,
),
**self.serialize_url_param(
"relationName", self.ctx.args.relation_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType()
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.related_resource_id = AAZStrType(
serialized_name="relatedResourceId",
flags={"required": True},
)
properties.related_resource_kind = AAZStrType(
serialized_name="relatedResourceKind",
flags={"read_only": True},
)
properties.related_resource_name = AAZStrType(
serialized_name="relatedResourceName",
flags={"read_only": True},
)
properties.related_resource_type = AAZStrType(
serialized_name="relatedResourceType",
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
return cls._schema_on_200
__all__ = ["Show"]
|
2,744 |
p pruning
|
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import Any, Dict, Union
import hdbscan
import numpy as np
import scipy
import sklearn
import umap
from sklearn.cluster._kmeans import k_means
from modelscope.metainfo import Models
from modelscope.models import MODELS, TorchModel
from modelscope.utils.constant import Tasks
class SpectralCluster:
r"""A spectral clustering mehtod using unnormalized Laplacian of affinity matrix.
This implementation is adapted from https://github.com/speechbrain/speechbrain.
"""
def __init__(self, min_num_spks=1, max_num_spks=15, pval=0.022):
self.min_num_spks = min_num_spks
self.max_num_spks = max_num_spks
self.pval = pval
def __call__(self, X, oracle_num=None):
# Similarity matrix computation
sim_mat = self.get_sim_mat(X)
# Refining similarity matrix with pval
prunned_sim_mat = self.METHOD_NAME(sim_mat)
# Symmetrization
sym_prund_sim_mat = 0.5 * (prunned_sim_mat + prunned_sim_mat.T)
# Laplacian calculation
laplacian = self.get_laplacian(sym_prund_sim_mat)
# Get Spectral Embeddings
emb, num_of_spk = self.get_spec_embs(laplacian, oracle_num)
# Perform clustering
labels = self.cluster_embs(emb, num_of_spk)
return labels
def get_sim_mat(self, X):
# Cosine similarities
M = sklearn.metrics.pairwise.cosine_similarity(X, X)
return M
def METHOD_NAME(self, A):
if A.shape[0] * self.pval < 6:
pval = 6. / A.shape[0]
else:
pval = self.pval
n_elems = int((1 - pval) * A.shape[0])
# For each row in a affinity matrix
for i in range(A.shape[0]):
low_indexes = np.argsort(A[i, :])
low_indexes = low_indexes[0:n_elems]
# Replace smaller similarity values by 0s
A[i, low_indexes] = 0
return A
def get_laplacian(self, M):
M[np.diag_indices(M.shape[0])] = 0
D = np.sum(np.abs(M), axis=1)
D = np.diag(D)
L = D - M
return L
def get_spec_embs(self, L, k_oracle=None):
lambdas, eig_vecs = scipy.linalg.eigh(L)
if k_oracle is not None:
num_of_spk = k_oracle
else:
lambda_gap_list = self.getEigenGaps(
lambdas[self.min_num_spks - 1:self.max_num_spks + 1])
num_of_spk = np.argmax(lambda_gap_list) + self.min_num_spks
emb = eig_vecs[:, :num_of_spk]
return emb, num_of_spk
def cluster_embs(self, emb, k):
_, labels, _ = k_means(emb, k)
return labels
def getEigenGaps(self, eig_vals):
eig_vals_gap_list = []
for i in range(len(eig_vals) - 1):
gap = float(eig_vals[i + 1]) - float(eig_vals[i])
eig_vals_gap_list.append(gap)
return eig_vals_gap_list
class UmapHdbscan:
r"""
Reference:
- Siqi Zheng, Hongbin Suo. Reformulating Speaker Diarization as Community Detection With
Emphasis On Topological Structure. ICASSP2022
"""
def __init__(self,
n_neighbors=20,
n_components=60,
min_samples=10,
min_cluster_size=10,
metric='cosine'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.min_samples = min_samples
self.min_cluster_size = min_cluster_size
self.metric = metric
def __call__(self, X):
umap_X = umap.UMAP(
n_neighbors=self.n_neighbors,
min_dist=0.0,
n_components=min(self.n_components, X.shape[0] - 2),
metric=self.metric,
).fit_transform(X)
labels = hdbscan.HDBSCAN(
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
allow_single_cluster=True).fit_predict(umap_X)
return labels
@MODELS.register_module(
Tasks.speaker_diarization, module_name=Models.cluster_backend)
class ClusterBackend(TorchModel):
r"""Perfom clustering for input embeddings and output the labels.
Args:
model_dir: A model dir.
model_config: The model config.
"""
def __init__(self, model_dir, model_config: Dict[str, Any], *args,
**kwargs):
super().__init__(model_dir, model_config, *args, **kwargs)
self.model_config = model_config
self.other_config = kwargs
self.spectral_cluster = SpectralCluster()
self.umap_hdbscan_cluster = UmapHdbscan()
def forward(self, X, **params):
# clustering and return the labels
k = params['oracle_num'] if 'oracle_num' in params else None
assert len(
X.shape
) == 2, 'modelscope error: the shape of input should be [N, C]'
if X.shape[0] < 20:
return np.zeros(X.shape[0], dtype='int')
if X.shape[0] < 2048 or k is not None:
labels = self.spectral_cluster(X, k)
else:
labels = self.umap_hdbscan_cluster(X)
if k is None and 'merge_thr' in self.model_config:
labels = self.merge_by_cos(labels, X,
self.model_config['merge_thr'])
return labels
def merge_by_cos(self, labels, embs, cos_thr):
# merge the similar speakers by cosine similarity
assert cos_thr > 0 and cos_thr <= 1
while True:
spk_num = labels.max() + 1
if spk_num == 1:
break
spk_center = []
for i in range(spk_num):
spk_emb = embs[labels == i].mean(0)
spk_center.append(spk_emb)
assert len(spk_center) > 0
spk_center = np.stack(spk_center, axis=0)
norm_spk_center = spk_center / np.linalg.norm(
spk_center, axis=1, keepdims=True)
affinity = np.matmul(norm_spk_center, norm_spk_center.T)
affinity = np.triu(affinity, 1)
spks = np.unravel_index(np.argmax(affinity), affinity.shape)
if affinity[spks] < cos_thr:
break
for i in range(len(labels)):
if labels[i] == spks[1]:
labels[i] = spks[0]
elif labels[i] > spks[1]:
labels[i] -= 1
return labels
|
2,745 |
output
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"voice-service test-line wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.voiceservices/communicationsgateways/{}/testlines/{}", "2023-01-31"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.gateway_name = AAZStrArg(
options=["--gateway-name"],
help="Unique identifier for this deployment",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9-]{3,24}$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.test_line_name = AAZStrArg(
options=["-n", "--name", "--test-line-name"],
help="Unique identifier for this test line",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9-]{3,24}$",
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.TestLinesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class TestLinesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VoiceServices/communicationsGateways/{communicationsGatewayName}/testLines/{testLineName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"communicationsGatewayName", self.ctx.args.gateway_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"testLineName", self.ctx.args.test_line_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-01-31",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.phone_number = AAZStrType(
serialized_name="phoneNumber",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
properties.purpose = AAZStrType(
flags={"required": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"]
|
2,746 |
get processor
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper for jobs that have been created on the Quantum Engine."""
import abc
from typing import Dict, Iterator, List, Optional, overload, Sequence, Tuple, TYPE_CHECKING
import duet
import cirq
from cirq_google.cloud import quantum
from cirq_google.engine.engine_result import EngineResult
if TYPE_CHECKING:
import datetime
import cirq_google.engine.calibration as calibration
import cirq_google.engine.calibration_result as calibration_result
import cirq_google.engine.abstract_engine as abstract_engine
import cirq_google.engine.abstract_processor as abstract_processor
import cirq_google.engine.abstract_program as abstract_program
class AbstractJob(abc.ABC):
"""An abstract object representing a quantum job execution.
This represents the state of a possibly asynchronous Job being
executed by a simulator, the cloud Engine service, or other means.
This is an abstract interface that implementers of services or mocks
should implement. It generally represents the execution of a circuit
using a set of parameters called a sweep. It can also represent the
execution of a batch job (a list of circuit/sweep pairs) or the
execution of a calibration request.
This job may be in a variety of states. It may be scheduling, it may be
executing on a machine, or it may have entered a terminal state
(either succeeding or failing).
`AbstractJob`s can be iterated over, returning `Result`s. These
`Result`s can also be accessed by index. Note that this will block
until the results are returned.
"""
@abc.abstractmethod
def engine(self) -> 'abstract_engine.AbstractEngine':
"""Returns the parent `AbstractEngine` object."""
@abc.abstractmethod
def id(self) -> str:
"""Returns the id of this job."""
@abc.abstractmethod
def program(self) -> 'abstract_program.AbstractProgram':
"""Returns the parent `AbstractProgram`object."""
@abc.abstractmethod
def create_time(self) -> 'datetime.datetime':
"""Returns when the job was created."""
@abc.abstractmethod
def update_time(self) -> 'datetime.datetime':
"""Returns when the job was last updated."""
@abc.abstractmethod
def description(self) -> str:
"""Returns the description of the job."""
@abc.abstractmethod
def set_description(self, description: str) -> 'AbstractJob':
"""Sets the description of the job.
Params:
description: The new description for the job.
Returns:
This `AbstractJob`.
"""
@abc.abstractmethod
def labels(self) -> Dict[str, str]:
"""Returns the labels of the job."""
@abc.abstractmethod
def set_labels(self, labels: Dict[str, str]) -> 'AbstractJob':
"""Sets (overwriting) the labels for a previously created quantum job.
Params:
labels: The entire set of new job labels.
Returns:
This `AbstractJob`.
"""
@abc.abstractmethod
def add_labels(self, labels: Dict[str, str]) -> 'AbstractJob':
"""Adds new labels to a previously created quantum job.
Params:
labels: New labels to add to the existing job labels.
Returns:
This `AbstractJob`.
"""
@abc.abstractmethod
def remove_labels(self, keys: List[str]) -> 'AbstractJob':
"""Removes labels with given keys.
Params:
label_keys: Label keys to remove from the existing job labels.
Returns:
This `AbstractJob`.
"""
@abc.abstractmethod
def processor_ids(self) -> List[str]:
"""Returns the processor ids provided when the job was created."""
@abc.abstractmethod
def execution_status(self) -> quantum.ExecutionStatus.State:
"""Return the execution status of the job."""
@abc.abstractmethod
def failure(self) -> Optional[Tuple[str, str]]:
"""Return failure code and message of the job if present."""
@abc.abstractmethod
def get_repetitions_and_sweeps(self) -> Tuple[int, List[cirq.Sweep]]:
"""Returns the repetitions and sweeps for the job.
Returns:
A tuple of the repetition count and list of sweeps.
"""
@abc.abstractmethod
def METHOD_NAME(self) -> Optional['abstract_processor.AbstractProcessor']:
"""Returns the AbstractProcessor for the processor the job is/was run on,
if available, else None."""
@abc.abstractmethod
def get_calibration(self) -> Optional['calibration.Calibration']:
"""Returns the recorded calibration at the time when the job was run, if
one was captured, else None."""
@abc.abstractmethod
def cancel(self) -> Optional[bool]:
"""Cancel the job."""
@abc.abstractmethod
def delete(self) -> Optional[bool]:
"""Deletes the job and result, if any."""
@abc.abstractmethod
async def batched_results_async(self) -> Sequence[Sequence[EngineResult]]:
"""Returns the job results, blocking until the job is complete.
This method is intended for batched jobs. Instead of flattening
results into a single list, this will return a List[Result]
for each circuit in the batch.
"""
batched_results = duet.sync(batched_results_async)
@abc.abstractmethod
async def results_async(self) -> Sequence[EngineResult]:
"""Returns the job results, blocking until the job is complete."""
results = duet.sync(results_async)
@abc.abstractmethod
async def calibration_results_async(self) -> Sequence['calibration_result.CalibrationResult']:
"""Returns the results of a run_calibration() call.
This function will fail if any other type of results were returned.
"""
calibration_results = duet.sync(calibration_results_async)
def __iter__(self) -> Iterator[cirq.Result]:
yield from self.results()
# pylint: disable=function-redefined
@overload
def __getitem__(self, item: int) -> cirq.Result:
pass
@overload
def __getitem__(self, item: slice) -> Sequence[cirq.Result]:
pass
def __getitem__(self, item):
return self.results()[item]
# pylint: enable=function-redefined
def __len__(self) -> int:
return len(self.results())
|
2,747 |
test remove basic
|
from copy import deepcopy
import numpy
import pytest
from openff.toolkit import Molecule, Topology
from openff.units import unit
from openff.interchange import Interchange
from openff.interchange._tests import _BaseTest, needs_gmx
from openff.interchange.components.mdconfig import get_intermol_defaults
from openff.interchange.drivers.gromacs import _process, _run_gmx_energy
from openff.interchange.smirnoff._gromacs import _convert
@pytest.fixture()
def molecule1():
molecule = Molecule.from_smiles(
"[H][O][c]1[c]([H])[c]([O][H])[c]([H])[c]([O][H])[c]1[H]",
)
molecule.generate_conformers(n_conformers=1)
molecule.name = "MOL1"
return molecule
@pytest.fixture()
def molecule2():
molecule = Molecule.from_smiles("C1=C(C=C(C=C1C(=O)O)C(=O)O)C(=O)O")
molecule.generate_conformers(n_conformers=1)
molecule.name = "MOL2"
molecule.conformers[0] += numpy.array([5, 0, 0]) * unit.angstrom
return molecule
@pytest.fixture()
def system1(molecule1, sage):
box = 5 * numpy.eye(3) * unit.nanometer
return _convert(Interchange.from_smirnoff(sage, [molecule1], box=box))
@pytest.fixture()
def system2(molecule2, sage):
box = 5 * numpy.eye(3) * unit.nanometer
return _convert(Interchange.from_smirnoff(sage, [molecule2], box=box))
@pytest.fixture()
def combined_system(molecule1, molecule2, sage):
box = 5 * numpy.eye(3) * unit.nanometer
return _convert(
Interchange.from_smirnoff(
sage,
Topology.from_molecules([molecule1, molecule2]),
box=box,
),
)
@pytest.mark.slow()
class TestAddRemoveMoleculeType(_BaseTest):
@needs_gmx
@pytest.mark.parametrize("molecule_name", ["MOL1", "MOL2"])
def METHOD_NAME(self, combined_system, molecule_name):
combined_system.remove_molecule_type(molecule_name)
# Just a sanity check
combined_system.to_files(prefix=molecule_name, decimal=8)
get_intermol_defaults(periodic=True).write_mdp_file("tmp.mdp")
_process(
_run_gmx_energy(f"{molecule_name}.top", f"{molecule_name}.gro", "tmp.mdp"),
)
@pytest.mark.slow()
@pytest.mark.parametrize("molecule_name", ["MOL1", "MOL2"])
def test_add_existing_molecule_type(self, combined_system, molecule_name):
with pytest.raises(
ValueError,
match=f"The molecule type {molecule_name} is already present in this system.",
):
combined_system.add_molecule_type(
combined_system.molecule_types[molecule_name],
1,
)
@needs_gmx
def test_different_force_field_different_energies(
self,
combined_system,
system1,
molecule2,
sage,
parsley,
):
box = 5 * numpy.eye(3) * unit.nanometer
parsley_system = deepcopy(system1)
sage_system = deepcopy(system1)
molecule2_parsley = _convert(
Interchange.from_smirnoff(parsley, [molecule2], box=box),
)
molecule2_sage = _convert(Interchange.from_smirnoff(sage, [molecule2], box=box))
parsley_system.add_molecule_type(molecule2_parsley.molecule_types["MOL2"], 1)
sage_system.add_molecule_type(molecule2_sage.molecule_types["MOL2"], 1)
parsley_system.positions = combined_system.positions
sage_system.positions = combined_system.positions
parsley_system.to_files(prefix="parsley", decimal=8)
sage_system.to_files(prefix="sage", decimal=8)
get_intermol_defaults(periodic=True).write_mdp_file("tmp.mdp")
_parsley_energy = _process(
_run_gmx_energy("parsley.top", "parsley.gro", "tmp.mdp"),
)
_sage_energy = _process(_run_gmx_energy("sage.top", "sage.gro", "tmp.mdp"))
assert _parsley_energy != _sage_energy
@needs_gmx
def test_molecule_order_independent(self, system1, system2):
positions1 = numpy.vstack([system1.positions, system2.positions])
positions2 = numpy.vstack([system2.positions, system1.positions])
system1.add_molecule_type(system2.molecule_types["MOL2"], 1)
system1.positions = positions1
system2.add_molecule_type(system1.molecule_types["MOL1"], 1)
system2.positions = positions2
system1.to_files(prefix="order1", decimal=8)
system2.to_files(prefix="order2", decimal=8)
get_intermol_defaults(periodic=True).write_mdp_file("tmp.mdp")
_process(_run_gmx_energy("order1.top", "order1.gro", "tmp.mdp")).compare(
_process(_run_gmx_energy("order2.top", "order2.gro", "tmp.mdp")),
)
@pytest.mark.slow()
def test_clashing_atom_types(self, combined_system, system1, system2):
with pytest.raises(
ValueError,
match="The molecule type MOL1 is already present in this system.",
):
combined_system.add_molecule_type(system1.molecule_types["MOL1"], 1)
with pytest.raises(
ValueError,
match="The molecule type MOL2 is already present in this system.",
):
combined_system.add_molecule_type(system2.molecule_types["MOL2"], 1)
with pytest.raises(
ValueError,
match="The molecule type MOL1 is already present in this system.",
):
system1.add_molecule_type(system1.molecule_types["MOL1"], 1)
with pytest.raises(
ValueError,
match="The molecule type MOL2 is already present in this system.",
):
system2.add_molecule_type(system2.molecule_types["MOL2"], 1)
class TestToFiles(_BaseTest):
@needs_gmx
def test_identical_outputs(self, system1):
system1.to_files(prefix="1", decimal=8)
system1.to_top("2.top")
system1.to_gro("2.gro", decimal=8)
get_intermol_defaults(periodic=True).write_mdp_file("tmp.mdp")
_process(_run_gmx_energy("1.top", "1.gro", "tmp.mdp")).compare(
_process(_run_gmx_energy("2.top", "2.gro", "tmp.mdp")),
)
|
2,748 |
start reader threads
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for handling side inputs."""
# pytype: skip-file
import logging
import queue
import threading
import traceback
from collections import abc
from apache_beam.coders import observable
from apache_beam.io import iobase
from apache_beam.runners.worker import opcounters
from apache_beam.transforms import window
from apache_beam.utils.sentinel import Sentinel
# Maximum number of reader threads for reading side input sources, per side
# input.
MAX_SOURCE_READER_THREADS = 15
# Number of slots for elements in side input element queue. Note that this
# value is intentionally smaller than MAX_SOURCE_READER_THREADS so as to reduce
# memory pressure of holding potentially-large elements in memory. Note that
# the number of pending elements in memory is equal to the sum of
# MAX_SOURCE_READER_THREADS and ELEMENT_QUEUE_SIZE.
ELEMENT_QUEUE_SIZE = 10
# Special element value sentinel for signaling reader state.
READER_THREAD_IS_DONE_SENTINEL = Sentinel.sentinel
# Used to efficiently window the values of non-windowed side inputs.
_globally_windowed = window.GlobalWindows.windowed_value(None).with_value
_LOGGER = logging.getLogger(__name__)
class PrefetchingSourceSetIterable(object):
"""Value iterator that reads concurrently from a set of sources."""
def __init__(
self,
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
self.sources = sources
self.num_reader_threads = min(max_reader_threads, len(self.sources))
# Queue for sources that are to be read.
self.sources_queue = queue.Queue()
for source in sources:
self.sources_queue.put(source)
# Queue for elements that have been read.
self.element_queue = queue.Queue(ELEMENT_QUEUE_SIZE)
# Queue for exceptions encountered in reader threads; to be rethrown.
self.reader_exceptions = queue.Queue()
# Whether we have already iterated; this iterable can only be used once.
self.already_iterated = False
# Whether an error was encountered in any source reader.
self.has_errored = False
self.read_counter = read_counter or opcounters.NoOpTransformIOCounter()
self.element_counter = element_counter
self.reader_threads = []
self.METHOD_NAME()
def add_byte_counter(self, reader):
"""Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
"""
def update_bytes_read(record_size, is_record_size=False, **kwargs):
# Let the reader report block size.
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
def METHOD_NAME(self):
for _ in range(0, self.num_reader_threads):
t = threading.Thread(target=self._reader_thread)
t.daemon = True
t.start()
self.reader_threads.append(t)
def _reader_thread(self):
# pylint: disable=too-many-nested-blocks
try:
while True:
try:
source = self.sources_queue.get_nowait()
if isinstance(source, iobase.BoundedSource):
for value in source.read(source.get_range_tracker(None, None)):
if self.has_errored:
# If any reader has errored, just return.
return
if isinstance(value, window.WindowedValue):
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
else:
# Native dataflow source.
with source.reader() as reader:
# The tracking of time spend reading and bytes read from side
# inputs is kept behind an experiment flag to test performance
# impact.
self.add_byte_counter(reader)
returns_windowed_values = reader.returns_windowed_values
for value in reader:
if self.has_errored:
# If any reader has errored, just return.
return
if returns_windowed_values:
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
except queue.Empty:
return
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Encountered exception in PrefetchingSourceSetIterable '
'reader thread: %s',
traceback.format_exc())
self.reader_exceptions.put(e)
self.has_errored = True
finally:
self.element_queue.put(READER_THREAD_IS_DONE_SENTINEL)
def __iter__(self):
# pylint: disable=too-many-nested-blocks
if self.already_iterated:
raise RuntimeError(
'Can only iterate once over PrefetchingSourceSetIterable instance.')
self.already_iterated = True
# The invariants during execution are:
# 1) A worker thread always posts the sentinel as the last thing it does
# before exiting.
# 2) We always wait for all sentinels and then join all threads.
num_readers_finished = 0
try:
while True:
try:
with self.read_counter:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
if num_readers_finished == self.num_reader_threads:
return
else:
if self.element_counter:
self.element_counter.update_from(element)
yield element
self.element_counter.update_collect()
else:
yield element
finally:
if self.has_errored:
raise self.reader_exceptions.get()
except GeneratorExit:
self.has_errored = True
raise
finally:
while num_readers_finished < self.num_reader_threads:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
for t in self.reader_threads:
t.join()
def get_iterator_fn_for_sources(
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
"""Returns callable that returns iterator over elements for given sources."""
def _inner():
return iter(
PrefetchingSourceSetIterable(
sources,
max_reader_threads=max_reader_threads,
read_counter=read_counter,
element_counter=element_counter))
return _inner
class EmulatedIterable(abc.Iterable):
"""Emulates an iterable for a side input."""
def __init__(self, iterator_fn):
self.iterator_fn = iterator_fn
def __iter__(self):
return self.iterator_fn()
|
2,749 |
main
|
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
"""
Stress tests for switching the master storage domain.
Usage:
1. Have two storage domains: one is the current master domain,
and the other will be the new master domain.
2. Run this on the SPM host:
python3 switch_master.py --pool-id XXX --new-master YYY --old-master ZZZ
If there were no errors, the master role switched to the specified new-master
storage domain.
In order to run the script:
- Stop the engine
- Run the switch-master script, specifying the pool-id, old-master,
and new-master parameters:
python3 switch_master.py --pool-id 11d54412-9232-43af-a51c-74078e7d03ce
--new-master d165c4d9-eae1-44cc-ad16-07ea595c383f
--old-master c5fabee4-b350-4393-8964-8437278ff70f
- Edit the engine DB by setting the storage_domain_type for the new master
to '0' (Master), the old master to '1' (Data), and update the pool's
master-version:
$ sudo -u postgres psql -d engine
# update storage_domain_static set storage_domain_type = 0
where id = 'new-master';
# update storage_domain_static set storage_domain_type = 1
where id = 'old-master';
# update storage_pool set master_domain_version = master-version
where id = 'pool-id';
- Start the engine and verify 'reconstructMaster' isn't being called,
and the new master is now the master.
WARNING: The script runs only on the SPM.
Once the SPM stops, the script fails with "Not SPM" message.
NOTE: Once the switchMaster command is being performed and the Master domain
not synced between the engine's DB and VDSM, the engine will reconstruct
the master domain and overwrite the masterVersion.
See https://bugzilla.redhat.com/1576923
"""
import argparse
import logging
import time
from contextlib import closing
from vdsm import client
log = logging.getLogger()
def METHOD_NAME():
args = parse_args()
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)-7s (%(threadName)s) %(message)s")
old_master = args.old_master
new_master = args.new_master
cli = client.connect("localhost", 54321)
with closing(cli):
if args.master_version:
master_ver = args.master_version
else:
pool_info = cli.StoragePool.getInfo(storagepoolID=args.pool_id)
master_ver = int(pool_info['info']['master_ver']) + 1
for i in range(1, args.iterations + 1):
log.info("Cycle %s/%s, switching master from %s to %s version %s",
i, args.iterations, old_master, new_master, master_ver)
task_id = cli.StoragePool.switchMaster(
storagepoolID=args.pool_id,
oldMasterUUID=old_master,
newMasterUUID=new_master,
masterVersion=master_ver)
log.info("Task id: %s", task_id)
# Example Task.getStatus response:
# {'taskID': '5e7b6cd0-d9d7-4e48-b525-7f1f0a612ff7',
# 'taskState': 'running', 'taskResult': '', 'code': 0,
# 'message': 'running job 1 of 1'}
while True:
time.sleep(5)
status = cli.Task.getStatus(taskID=task_id)
log.debug("Task status: %s", status)
if status["taskState"] != "running":
break
log.debug("Clearing task %s", task_id)
cli.Task.clear(taskID=task_id)
if status["code"] != 0:
raise RuntimeError("Task failed: %s", status["message"])
pool_info = cli.StoragePool.getInfo(storagepoolID=args.pool_id)
if pool_info['info']['master_ver'] != master_ver:
raise RuntimeError(
"Unexpected master_ver value: expecting: {} actual: {}"
.format(master_ver, pool_info['info']['master_ver']))
if pool_info['info']['master_uuid'] != new_master:
raise RuntimeError(
"Unexpected master_uuid value: expecting: {} actual: {}"
.format(new_master, pool_info['info']['master_uuid']))
new_master_info = cli.StorageDomain.getInfo(
storagedomainID=new_master)
if new_master_info['role'] != "Master":
raise RuntimeError(
"Role for new master domain didn't change to Master")
old_master_info = cli.StorageDomain.getInfo(
storagedomainID=old_master)
if old_master_info['role'] != "Regular":
raise RuntimeError(
"Role for old master domain didn't change to Regular")
log.info("Master switched successfully")
new_master, old_master = old_master, new_master
master_ver += 1
def parse_args():
p = argparse.ArgumentParser('Switch master domain from the command line')
p.add_argument(
"--pool-id",
type=str,
required=True,
help="The storage pool associated with the storage domains")
p.add_argument(
"--old-master",
type=str,
required=True,
help="The current master storage domain UUID")
p.add_argument(
"--new-master",
type=str,
required=True,
help="The new master storage domain UUID")
p.add_argument(
"--master-version",
type=int,
help="The new master's version (default is current version + 1)")
p.add_argument(
"--iterations",
default=1,
type=int,
help="The iterations number for switching the master (default is 1)")
return p.parse_args()
if __name__ == '__main__':
METHOD_NAME()
|
2,750 |
skip config
|
import logging
import pytest
from ipaddress import ip_interface
from constants import ENI, VM_VNI, VNET1_VNI, VNET2_VNI, REMOTE_CA_IP, LOCAL_CA_IP, REMOTE_ENI_MAC,\
LOCAL_ENI_MAC, REMOTE_CA_PREFIX, LOOPBACK_IP, DUT_MAC, LOCAL_PA_IP, LOCAL_PTF_INTF, LOCAL_PTF_MAC,\
REMOTE_PA_IP, REMOTE_PTF_INTF, REMOTE_PTF_MAC, REMOTE_PA_PREFIX, VNET1_NAME, VNET2_NAME, ROUTING_ACTION, \
ROUTING_ACTION_TYPE, LOOKUP_OVERLAY_IP
from dash_utils import render_template_to_host, apply_swssconfig_file
logger = logging.getLogger(__name__)
def pytest_addoption(parser):
"""
Adds pytest options that are used by DASH tests
"""
parser.addoption(
"--skip_config",
action="store_true",
help="Don't apply configurations on DUT"
)
parser.addoption(
"--config_only",
action="store_true",
help="Apply new configurations on DUT without running tests"
)
parser.addoption(
"--skip_cleanup",
action="store_true",
help="Skip config cleanup after test"
)
@pytest.fixture(scope="module")
def config_only(request):
return request.config.getoption("--config_only")
@pytest.fixture(scope="module")
def METHOD_NAME(request):
return request.config.getoption("--skip_config")
@pytest.fixture(scope="module")
def skip_cleanup(request):
return request.config.getoption("--skip_cleanup")
@pytest.fixture(scope="module")
def config_facts(duthost):
return duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
@pytest.fixture(scope="module")
def minigraph_facts(duthosts, rand_one_dut_hostname, tbinfo):
"""
Fixture to get minigraph facts
Args:
duthost: DUT host object
Returns:
Dictionary containing minigraph information
"""
duthost = duthosts[rand_one_dut_hostname]
return duthost.get_extended_minigraph_facts(tbinfo)
def get_intf_from_ip(local_ip, config_facts):
for intf, config in list(config_facts["INTERFACE"].items()):
for ip in config:
intf_ip = ip_interface(ip)
if str(intf_ip.ip) == local_ip:
return intf, intf_ip
@pytest.fixture(params=["no-underlay-route", "with-underlay-route"])
def use_underlay_route(request):
if request.param == "with-underlay-route":
pytest.skip("Underlay route not supported yet")
return request.param == "with-underlay-route"
@pytest.fixture(scope="function")
def dash_config_info(duthost, config_facts, minigraph_facts):
dash_info = {
ENI: "F4939FEFC47E",
VM_VNI: 4321,
VNET1_VNI: 1000,
VNET1_NAME: "Vnet1",
VNET2_VNI: 2000,
VNET2_NAME: "Vnet2",
REMOTE_CA_IP: "20.2.2.2",
LOCAL_CA_IP: "11.1.1.1",
REMOTE_ENI_MAC: "F9:22:83:99:22:A2",
LOCAL_ENI_MAC: "F4:93:9F:EF:C4:7E",
REMOTE_CA_PREFIX: "20.2.2.0/24",
}
loopback_intf_ip = ip_interface(list(list(config_facts["LOOPBACK_INTERFACE"].values())[0].keys())[0])
dash_info[LOOPBACK_IP] = str(loopback_intf_ip.ip)
dash_info[DUT_MAC] = config_facts["DEVICE_METADATA"]["localhost"]["mac"]
neigh_table = duthost.switch_arptable()['ansible_facts']['arptable']
for neigh_ip, config in list(config_facts["BGP_NEIGHBOR"].items()):
# Pick the first two BGP neighbor IPs since these should already be learned on the DUT
if ip_interface(neigh_ip).version == 4:
if LOCAL_PA_IP not in dash_info:
dash_info[LOCAL_PA_IP] = neigh_ip
intf, _ = get_intf_from_ip(config['local_addr'], config_facts)
dash_info[LOCAL_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf]
dash_info[LOCAL_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"]
elif REMOTE_PA_IP not in dash_info:
dash_info[REMOTE_PA_IP] = neigh_ip
intf, intf_ip = get_intf_from_ip(config['local_addr'], config_facts)
dash_info[REMOTE_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf]
dash_info[REMOTE_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"]
dash_info[REMOTE_PA_PREFIX] = str(intf_ip.network)
break
return dash_info
@pytest.fixture(scope="function")
def apply_config(duthost, METHOD_NAME, skip_cleanup):
configs = []
op = "SET"
def _apply_config(config_info):
if METHOD_NAME:
return
if config_info not in configs:
configs.append(config_info)
config = "dash_basic_config"
template_name = "{}.j2".format(config)
dest_path = "/tmp/{}.json".format(config)
render_template_to_host(template_name, duthost, dest_path, config_info, op=op)
apply_swssconfig_file(duthost, dest_path)
yield _apply_config
op = "DEL"
if not skip_cleanup:
for config_info in reversed(configs):
_apply_config(config_info)
@pytest.fixture(scope="function")
def dash_inbound_configs(dash_config_info, use_underlay_route, minigraph_facts):
if use_underlay_route:
dash_config_info[LOCAL_PA_IP] = u"30.30.30.30"
dash_config_info[LOCAL_PTF_INTF] = list(minigraph_facts["minigraph_ptf_indices"].values())
else:
dash_config_info[LOCAL_PTF_INTF] = [dash_config_info[LOCAL_PTF_INTF]]
logger.info("Testing with config {}".format(dash_config_info))
return dash_config_info
@pytest.fixture(scope="function")
def apply_inbound_configs(dash_inbound_configs, apply_config):
dash_inbound_configs[ROUTING_ACTION] = "vnet"
apply_config(dash_inbound_configs)
@pytest.fixture(scope="function")
def dash_outbound_configs(dash_config_info, use_underlay_route, minigraph_facts):
if use_underlay_route:
dash_config_info[REMOTE_PA_IP] = u"30.30.30.30"
dash_config_info[REMOTE_PA_PREFIX] = "30.30.30.30/32"
dash_config_info[REMOTE_PTF_INTF] = list(minigraph_facts["minigraph_ptf_indices"].values())
else:
dash_config_info[REMOTE_PTF_INTF] = [dash_config_info[REMOTE_PTF_INTF]]
logger.info("Testing with config {}".format(dash_config_info))
return dash_config_info
@pytest.fixture(scope="function")
def apply_vnet_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "vnet"
apply_config(dash_outbound_configs)
@pytest.fixture(scope="function")
def apply_vnet_direct_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "vnet_direct"
dash_outbound_configs[ROUTING_ACTION_TYPE] = "maprouting"
dash_outbound_configs[LOOKUP_OVERLAY_IP] = "1.1.1.1"
apply_config(dash_outbound_configs)
@pytest.fixture(scope="function")
def apply_direct_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "direct"
del dash_outbound_configs[VNET2_NAME]
apply_config(dash_outbound_configs)
|
2,751 |
deregister all callbacks
|
import sys
from qtpy import QtWidgets, QtCore, QtGui
from openpype.tools.utils import host_tools
from openpype.style import load_stylesheet
from openpype.lib import register_event_callback
from openpype.hosts.fusion.scripts import (
duplicate_with_inputs,
)
from openpype.hosts.fusion.api.lib import (
set_asset_framerange,
set_asset_resolution,
)
from openpype.pipeline import get_current_asset_name
from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
self = sys.modules[__name__]
self.menu = None
class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(OpenPypeMenu, self).__init__(*args, **kwargs)
self.setObjectName("OpenPypeMenu")
icon_path = get_openpype_icon_filepath()
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
self.setWindowTitle("OpenPype")
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet(
"""QLabel {
font-size: 14px;
font-weight: 600;
color: #5f9fb8;
}"""
)
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
manager_btn = QtWidgets.QPushButton("Manage...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self)
set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self)
duplicate_with_inputs_btn = QtWidgets.QPushButton(
"Duplicate with input connections", self
)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(asset_label)
layout.addSpacing(20)
layout.addWidget(workfiles_btn)
layout.addSpacing(20)
layout.addWidget(create_btn)
layout.addWidget(load_btn)
layout.addWidget(publish_btn)
layout.addWidget(manager_btn)
layout.addSpacing(20)
layout.addWidget(libload_btn)
layout.addSpacing(20)
layout.addWidget(set_framerange_btn)
layout.addWidget(set_resolution_btn)
layout.addSpacing(20)
layout.addWidget(duplicate_with_inputs_btn)
self.setLayout(layout)
# Store reference so we can update the label
self.asset_label = asset_label
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
load_btn.clicked.connect(self.on_load_clicked)
manager_btn.clicked.connect(self.on_manager_clicked)
libload_btn.clicked.connect(self.on_libload_clicked)
duplicate_with_inputs_btn.clicked.connect(
self.on_duplicate_with_inputs_clicked
)
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
self._callbacks = []
self.register_callback("taskChanged", self.on_task_changed)
self.on_task_changed()
# Force close current process if Fusion is closed
self._pulse = FusionPulse(parent=self)
self._pulse.start()
# Detect Fusion events as OpenPype events
self._event_handler = FusionEventHandler(parent=self)
self._event_handler.start()
def on_task_changed(self):
# Update current context label
label = get_current_asset_name()
self.asset_label.setText(label)
def register_callback(self, name, fn):
# Create a wrapper callback that we only store
# for as long as we want it to persist as callback
def _callback(*args):
fn()
self._callbacks.append(_callback)
register_event_callback(name, _callback)
def METHOD_NAME(self):
self._callbacks[:] = []
def on_workfile_clicked(self):
host_tools.show_workfiles()
def on_create_clicked(self):
host_tools.show_publisher(tab="create")
def on_publish_clicked(self):
host_tools.show_publisher(tab="publish")
def on_load_clicked(self):
host_tools.show_loader(use_context=True)
def on_manager_clicked(self):
host_tools.show_scene_inventory()
def on_libload_clicked(self):
host_tools.show_library_loader()
def on_duplicate_with_inputs_clicked(self):
duplicate_with_inputs.duplicate_with_input_connections()
def on_set_resolution_clicked(self):
set_asset_resolution()
def on_set_framerange_clicked(self):
set_asset_framerange()
def launch_openpype_menu():
app = QtWidgets.QApplication(sys.argv)
pype_menu = OpenPypeMenu()
stylesheet = load_stylesheet()
pype_menu.setStyleSheet(stylesheet)
pype_menu.show()
self.menu = pype_menu
result = app.exec_()
print("Shutting down..")
sys.exit(result)
|
2,752 |
simple tag func
|
#!/usr/bin/env python
"""
This example shows how to use the parametrization
feature of `@testcase` decorator.
"""
import sys
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import test_plan
from testplan.report.testing.styles import Style
@testsuite
class SimpleTest:
# This will generate 4 new testcase methods, using a tuple for each one.
@testcase(
parameters=((5, 5, 10), (3, 2, 5), (0, 0, 0), ("foo", "bar", "foobar"))
)
def addition(self, env, result, a, b, expected):
result.equal(a + b, expected)
# Parametrization context for the generated testcases will be:
# result.equal(5 + 5, 10)
# result.equal(3 + 2, 5)
# result.equal(0 + 0, 0)()
# result.equal('foo' + 'bar', 'foobar')
# Combinatorial parametrization example
# Associativity check of addition operation, (a + b = b + a)
# This will generate 25 (5 x 5) methods.
@testcase(
parameters={
"a": [1, 10, -5, -3.2, 3e12],
"b": [0, 42, 4.2, -0.231, 5.5e5],
}
)
def addition_associativity(self, env, result, a, b):
# It's a good practice to generate a description
# with the parametrized arguments as well.
# So that you can have more context when you inspect the test report.
result.equal(
actual=a + b,
expected=b + a,
description="{a} + {b} == {b} + {a}".format(a=a, b=b),
)
# Generated testcases will have the following contexts:
# result.equal(1 + 0, 0 + 1, ...)
# result.equal(10 + 0, 0 + 10, ...)
# result.equal(-5 + 0, 0 + -5, ...)
# ...
# ...
# result.equal(3e12 + -.231, 3e12 + -.231, ...)
# result.equal(3e12 + 5.5e5, 3e12 + 5.5e5, ...)
# Shortcut notation that uses single values
# for single argument parametrization
# Assigns 1, 2, 3, 4 to `value` for each generated test case
# Verbose notation would be
# `parameters=((2,), (4,), (6,), (8,))` which is not that readable.
@testcase(
parameters=(
2, # first testcase
4, # second testcase
6, # third testcase
8, # fourth testcase
)
)
def is_even(self, env, result, value):
result.equal(value % 2, 0)
# The example below makes use of a custom name
# generation function for parametrization.
# This way we can come up with more readable testcase
# method names on the test reports.
# If we didn't use a custom name function, we'd end up with method name
# like `func_raises_error <func=.., error=...>`, but instead, the custom
# function will give us names like `func_raises_error__ValueError`.
def custom_error_name_func(func_name, kwargs):
"""Disregard `func` argument, use the error only."""
return "{func_name}__{error_type}".format(
func_name=func_name, error_type=kwargs["error"].__name__
)
@testsuite
class ErrorTest:
# The lambda functions in the parameters below try to
# execute invalid Python code that raises certain errors.
# The parametrized test method checks if the function
# raises the expected error when it is run.
# This will generate 5 methods, for each item in the tuple.
@testcase(
parameters=(
# tuple notation, using default error value (TypeError)
(lambda: "foo" + 5,),
(lambda: object().b, AttributeError),
(lambda: {"a": 5}["b"], KeyError),
(lambda: int("a"), ValueError),
(lambda: 10 / 0, ZeroDivisionError),
),
# comment out the line below line to see how
# Testplan falls back to simple method names with integer suffixes
name_func=custom_error_name_func,
)
def func_raises_error(self, env, result, func, error=TypeError):
with result.raises(error):
func()
# This function returns the value of the product directly
# which will be interpreted as a simple tag.
def METHOD_NAME(kwargs):
return kwargs["product"].title()
# This function returns a dictionary that is interpreted as a named tag.
def named_tag_func(kwargs):
return {"product": kwargs["product"].title()}
@testsuite
class ProductTest:
"""Sample testsuite that demonstrates how `tag_func` works."""
@testcase(
tags={"category": "CategoryA"},
parameters=((2, 3, "productA"), (3, 4, "productB")),
tag_func=METHOD_NAME,
)
def simple_tag_func_test(self, env, result, a, b, product):
result.true(True)
@testcase(
tags={"category": "CategoryB"},
parameters=((2, 3, "productA"), (3, 4, "productB")),
tag_func=named_tag_func,
)
def named_tag_func_test(self, env, result, a, b, product):
result.true(True)
# Discard the original docstring, convert kwargs to str
def kwargs_to_string(docstring, kwargs):
return "\n".join([docstring, str(kwargs)])
# Use the original docstring, formatting
# it using kwargs via string interpolation.
# e.g. `foo: {foo}, bar: {bar}`.format(foo=2, bar=5)` -> 'foo: 2, bar: 5'
def interpolate_docstring(docstring, kwargs):
return docstring.format(**kwargs)
@testsuite
class DocStringTest:
@testcase(
parameters=((2, 3, 5), (5, 10, 15)), docstring_func=kwargs_to_string
)
def addition_one(self, env, result, first, second, expected):
"""Test addition of two numbers."""
return result.equal(first + second, expected)
@testcase(
parameters=((2, 3, 5), (5, 10, 15)),
docstring_func=interpolate_docstring,
)
def addition_two(self, env, result, first, second, expected):
"""
Testing addition with: {first} + {second}
Expected value: {expected}
"""
return result.equal(first + second, expected)
@test_plan(
name="Parametrization Example",
# Using detailed assertions so we can
# see testcase context for generated testcases
stdout_style=Style("assertion-detail", "assertion-detail"),
)
def main(plan):
plan.add(
MultiTest(
name="Primary",
suites=[SimpleTest(), ErrorTest(), ProductTest(), DocStringTest()],
)
)
if __name__ == "__main__":
sys.exit(not main())
|
2,753 |
test arccosh
|
# Owner(s): ["module: dynamo"]
# this file is autogenerated via gen_ufuncs.py
# do not edit manually!
import numpy as np
from torch._numpy._ufuncs import * # noqa: F403
from torch._numpy.testing import assert_allclose
def test_absolute():
assert_allclose(np.absolute(0.5), absolute(0.5), atol=1e-14, check_dtype=False)
def test_arccos():
assert_allclose(np.arccos(0.5), arccos(0.5), atol=1e-14, check_dtype=False)
def METHOD_NAME():
assert_allclose(np.arccosh(1.5), arccosh(1.5), atol=1e-14, check_dtype=False)
def test_arcsin():
assert_allclose(np.arcsin(0.5), arcsin(0.5), atol=1e-14, check_dtype=False)
def test_arcsinh():
assert_allclose(np.arcsinh(0.5), arcsinh(0.5), atol=1e-14, check_dtype=False)
def test_arctan():
assert_allclose(np.arctan(0.5), arctan(0.5), atol=1e-14, check_dtype=False)
def test_arctanh():
assert_allclose(np.arctanh(0.5), arctanh(0.5), atol=1e-14, check_dtype=False)
def test_cbrt():
assert_allclose(np.cbrt(0.5), cbrt(0.5), atol=1e-14, check_dtype=False)
def test_ceil():
assert_allclose(np.ceil(0.5), ceil(0.5), atol=1e-14, check_dtype=False)
def test_conjugate():
assert_allclose(np.conjugate(0.5), conjugate(0.5), atol=1e-14, check_dtype=False)
def test_cos():
assert_allclose(np.cos(0.5), cos(0.5), atol=1e-14, check_dtype=False)
def test_cosh():
assert_allclose(np.cosh(0.5), cosh(0.5), atol=1e-14, check_dtype=False)
def test_deg2rad():
assert_allclose(np.deg2rad(0.5), deg2rad(0.5), atol=1e-14, check_dtype=False)
def test_degrees():
assert_allclose(np.degrees(0.5), degrees(0.5), atol=1e-14, check_dtype=False)
def test_exp():
assert_allclose(np.exp(0.5), exp(0.5), atol=1e-14, check_dtype=False)
def test_exp2():
assert_allclose(np.exp2(0.5), exp2(0.5), atol=1e-14, check_dtype=False)
def test_expm1():
assert_allclose(np.expm1(0.5), expm1(0.5), atol=1e-14, check_dtype=False)
def test_fabs():
assert_allclose(np.fabs(0.5), fabs(0.5), atol=1e-14, check_dtype=False)
def test_floor():
assert_allclose(np.floor(0.5), floor(0.5), atol=1e-14, check_dtype=False)
def test_isfinite():
assert_allclose(np.isfinite(0.5), isfinite(0.5), atol=1e-14, check_dtype=False)
def test_isinf():
assert_allclose(np.isinf(0.5), isinf(0.5), atol=1e-14, check_dtype=False)
def test_isnan():
assert_allclose(np.isnan(0.5), isnan(0.5), atol=1e-14, check_dtype=False)
def test_log():
assert_allclose(np.log(0.5), log(0.5), atol=1e-14, check_dtype=False)
def test_log10():
assert_allclose(np.log10(0.5), log10(0.5), atol=1e-14, check_dtype=False)
def test_log1p():
assert_allclose(np.log1p(0.5), log1p(0.5), atol=1e-14, check_dtype=False)
def test_log2():
assert_allclose(np.log2(0.5), log2(0.5), atol=1e-14, check_dtype=False)
def test_logical_not():
assert_allclose(
np.logical_not(0.5), logical_not(0.5), atol=1e-14, check_dtype=False
)
def test_negative():
assert_allclose(np.negative(0.5), negative(0.5), atol=1e-14, check_dtype=False)
def test_positive():
assert_allclose(np.positive(0.5), positive(0.5), atol=1e-14, check_dtype=False)
def test_rad2deg():
assert_allclose(np.rad2deg(0.5), rad2deg(0.5), atol=1e-14, check_dtype=False)
def test_radians():
assert_allclose(np.radians(0.5), radians(0.5), atol=1e-14, check_dtype=False)
def test_reciprocal():
assert_allclose(np.reciprocal(0.5), reciprocal(0.5), atol=1e-14, check_dtype=False)
def test_rint():
assert_allclose(np.rint(0.5), rint(0.5), atol=1e-14, check_dtype=False)
def test_sign():
assert_allclose(np.sign(0.5), sign(0.5), atol=1e-14, check_dtype=False)
def test_signbit():
assert_allclose(np.signbit(0.5), signbit(0.5), atol=1e-14, check_dtype=False)
def test_sin():
assert_allclose(np.sin(0.5), sin(0.5), atol=1e-14, check_dtype=False)
def test_sinh():
assert_allclose(np.sinh(0.5), sinh(0.5), atol=1e-14, check_dtype=False)
def test_sqrt():
assert_allclose(np.sqrt(0.5), sqrt(0.5), atol=1e-14, check_dtype=False)
def test_square():
assert_allclose(np.square(0.5), square(0.5), atol=1e-14, check_dtype=False)
def test_tan():
assert_allclose(np.tan(0.5), tan(0.5), atol=1e-14, check_dtype=False)
def test_tanh():
assert_allclose(np.tanh(0.5), tanh(0.5), atol=1e-14, check_dtype=False)
def test_trunc():
assert_allclose(np.trunc(0.5), trunc(0.5), atol=1e-14, check_dtype=False)
|
2,754 |
start
|
"""
Riak Salt Module
"""
import salt.utils.path
def __virtual__():
"""
Only available on systems with Riak installed.
"""
if salt.utils.path.which("riak"):
return True
return (
False,
"The riak execution module failed to load: the riak binary is not in the path.",
)
def __execute_cmd(name, cmd):
"""
Execute Riak commands
"""
return __salt__["cmd.run_all"]("{} {}".format(salt.utils.path.which(name), cmd))
def METHOD_NAME():
"""
Start Riak
CLI Example:
.. code-block:: bash
salt '*' riak.start
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak", "start")
if cmd["retcode"] != 0:
ret["comment"] = cmd["stderr"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def stop():
"""
Stop Riak
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.stop
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak", "stop")
if cmd["retcode"] != 0:
ret["comment"] = cmd["stderr"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def cluster_join(username, hostname):
"""
Join a Riak cluster
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_join <user> <host>
username - The riak username to join the cluster
hostname - The riak hostname you are connecting to
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak-admin", "cluster join {}@{}".format(username, hostname))
if cmd["retcode"] != 0:
ret["comment"] = cmd["stdout"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def cluster_leave(username, hostname):
"""
Leave a Riak cluster
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_leave <username> <host>
username - The riak username to join the cluster
hostname - The riak hostname you are connecting to
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak-admin", "cluster leave {}@{}".format(username, hostname))
if cmd["retcode"] != 0:
ret["comment"] = cmd["stdout"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def cluster_plan():
"""
Review Cluster Plan
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_plan
"""
cmd = __execute_cmd("riak-admin", "cluster plan")
if cmd["retcode"] != 0:
return False
return True
def cluster_commit():
"""
Commit Cluster Changes
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.cluster_commit
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak-admin", "cluster commit")
if cmd["retcode"] != 0:
ret["comment"] = cmd["stdout"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def member_status():
"""
Get cluster member status
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.member_status
"""
ret = {
"membership": {},
"summary": {"Valid": 0, "Leaving": 0, "Exiting": 0, "Joining": 0, "Down": 0},
}
out = __execute_cmd("riak-admin", "member-status")["stdout"].splitlines()
for line in out:
if line.startswith(("=", "-", "Status")):
continue
if "/" in line:
# We're in the summary line
for item in line.split("/"):
key, val = item.split(":")
ret["summary"][key.strip()] = val.strip()
if len(line.split()) == 4:
# We're on a node status line
(status, ring, pending, node) = line.split()
ret["membership"][node] = {
"Status": status,
"Ring": ring,
"Pending": pending,
}
return ret
def status():
"""
Current node status
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.status
"""
ret = {}
cmd = __execute_cmd("riak-admin", "status")
for i in cmd["stdout"].splitlines():
if ":" in i:
(name, val) = i.split(":", 1)
ret[name.strip()] = val.strip()
return ret
def test():
"""
Runs a test of a few standard Riak operations
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.test
"""
ret = {"comment": "", "success": False}
cmd = __execute_cmd("riak-admin", "test")
if cmd["retcode"] != 0:
ret["comment"] = cmd["stdout"]
else:
ret["comment"] = cmd["stdout"]
ret["success"] = True
return ret
def services():
"""
List available services on a node
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.services
"""
cmd = __execute_cmd("riak-admin", "services")
return cmd["stdout"][1:-1].split(",")
|
2,755 |
test assign none to enum or none
|
# -*- coding: UTF-8 -*-
# pylint: disable=missing-docstring, too-few-public-methods
"""
Test the trait-type ``UseEnum``.
"""
import unittest
import enum
from ipython_genutils.py3compat import string_types
from traitlets import HasTraits, TraitError, UseEnum
# -----------------------------------------------------------------------------
# TEST SUPPORT:
# -----------------------------------------------------------------------------
class Color(enum.Enum):
red = 1
green = 2
blue = 3
yellow = 4
class OtherColor(enum.Enum):
red = 0
green = 1
# -----------------------------------------------------------------------------
# TESTSUITE:
# -----------------------------------------------------------------------------
class TestUseEnum(unittest.TestCase):
# pylint: disable=invalid-name
class Example(HasTraits):
color = UseEnum(Color, help="Color enum")
def test_assign_enum_value(self):
example = self.Example()
example.color = Color.green
self.assertEqual(example.color, Color.green)
def test_assign_all_enum_values(self):
# pylint: disable=no-member
enum_values = [value for value in Color.__members__.values()]
for value in enum_values:
self.assertIsInstance(value, Color)
example = self.Example()
example.color = value
self.assertEqual(example.color, value)
self.assertIsInstance(value, Color)
def test_assign_enum_value__with_other_enum_raises_error(self):
example = self.Example()
with self.assertRaises(TraitError):
example.color = OtherColor.green
def test_assign_enum_name_1(self):
# -- CONVERT: string => Enum value (item)
example = self.Example()
example.color = "red"
self.assertEqual(example.color, Color.red)
def test_assign_enum_value_name(self):
# -- CONVERT: string => Enum value (item)
# pylint: disable=no-member
enum_names = [enum_val.name for enum_val in Color.__members__.values()]
for value in enum_names:
self.assertIsInstance(value, string_types)
example = self.Example()
enum_value = Color.__members__.get(value)
example.color = value
self.assertIs(example.color, enum_value)
self.assertEqual(example.color.name, value)
def test_assign_scoped_enum_value_name(self):
# -- CONVERT: string => Enum value (item)
scoped_names = ["Color.red", "Color.green", "Color.blue", "Color.yellow"]
for value in scoped_names:
example = self.Example()
example.color = value
self.assertIsInstance(example.color, Color)
self.assertEqual(str(example.color), value)
def test_assign_bad_enum_value_name__raises_error(self):
# -- CONVERT: string => Enum value (item)
bad_enum_names = ["UNKNOWN_COLOR", "RED", "Green", "blue2"]
for value in bad_enum_names:
example = self.Example()
with self.assertRaises(TraitError):
example.color = value
def test_assign_enum_value_number_1(self):
# -- CONVERT: number => Enum value (item)
example = self.Example()
example.color = 1 # == Color.red.value
example.color = Color.red.value
self.assertEqual(example.color, Color.red)
def test_assign_enum_value_number(self):
# -- CONVERT: number => Enum value (item)
# pylint: disable=no-member
enum_numbers = [enum_val.value
for enum_val in Color.__members__.values()]
for value in enum_numbers:
self.assertIsInstance(value, int)
example = self.Example()
example.color = value
self.assertIsInstance(example.color, Color)
self.assertEqual(example.color.value, value)
def test_assign_bad_enum_value_number__raises_error(self):
# -- CONVERT: number => Enum value (item)
bad_numbers = [-1, 0, 5]
for value in bad_numbers:
self.assertIsInstance(value, int)
assert UseEnum(Color).select_by_number(value, None) is None
example = self.Example()
with self.assertRaises(TraitError):
example.color = value
def test_ctor_without_default_value(self):
# -- IMPLICIT: default_value = Color.red (first enum-value)
class Example2(HasTraits):
color = UseEnum(Color)
example = Example2()
self.assertEqual(example.color, Color.red)
def test_ctor_with_default_value_as_enum_value(self):
# -- CONVERT: number => Enum value (item)
class Example2(HasTraits):
color = UseEnum(Color, default_value=Color.green)
example = Example2()
self.assertEqual(example.color, Color.green)
def test_ctor_with_default_value_none_and_not_allow_none(self):
# -- IMPLICIT: default_value = Color.red (first enum-value)
class Example2(HasTraits):
color1 = UseEnum(Color, default_value=None, allow_none=False)
color2 = UseEnum(Color, default_value=None)
example = Example2()
self.assertEqual(example.color1, Color.red)
self.assertEqual(example.color2, Color.red)
def test_ctor_with_default_value_none_and_allow_none(self):
class Example2(HasTraits):
color1 = UseEnum(Color, default_value=None, allow_none=True)
color2 = UseEnum(Color, allow_none=True)
example = Example2()
self.assertIs(example.color1, None)
self.assertIs(example.color2, None)
def test_assign_none_without_allow_none_resets_to_default_value(self):
class Example2(HasTraits):
color1 = UseEnum(Color, allow_none=False)
color2 = UseEnum(Color)
example = Example2()
example.color1 = None
example.color2 = None
self.assertIs(example.color1, Color.red)
self.assertIs(example.color2, Color.red)
def METHOD_NAME(self):
class Example2(HasTraits):
color = UseEnum(Color, allow_none=True)
example = Example2()
example.color = None
self.assertIs(example.color, None)
def test_assign_bad_value_with_to_enum_or_none(self):
class Example2(HasTraits):
color = UseEnum(Color, allow_none=True)
example = Example2()
with self.assertRaises(TraitError):
example.color = "BAD_VALUE"
|
2,756 |
base64 encode
|
#
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = [
"replace_patch_version",
"verify_type",
"is_stream",
"is_bool",
"is_int",
"is_float",
"is_string",
"is_float_like",
"is_dict_like",
"is_string_like",
"is_stringify_value",
"verify_collection_type",
"is_collection",
"base64_encode",
"base64_decode",
"get_absolute_paths",
"get_common_root",
"does_paths_share_common_drive",
"is_ipython",
"as_list",
]
import base64
import logging
import os
from glob import glob
from io import IOBase
from typing import (
Iterable,
List,
Mapping,
Optional,
Set,
TypeVar,
Union,
)
from neptune.internal.types.stringify_value import StringifyValue
T = TypeVar("T")
_logger = logging.getLogger(__name__)
def replace_patch_version(version: str):
return version[: version.index(".", version.index(".") + 1)] + ".0"
def verify_type(var_name: str, var, expected_type: Union[type, tuple]):
try:
if isinstance(expected_type, tuple):
type_name = " or ".join(get_type_name(t) for t in expected_type)
else:
type_name = get_type_name(expected_type)
except Exception as e:
# Just to be sure that nothing weird will be raised here
raise TypeError("Incorrect type of {}".format(var_name)) from e
if not isinstance(var, expected_type):
raise TypeError("{} must be a {} (was {})".format(var_name, type_name, type(var)))
if isinstance(var, IOBase) and not hasattr(var, "read"):
raise TypeError("{} is a stream, which does not implement read method".format(var_name))
def is_stream(var):
return isinstance(var, IOBase) and hasattr(var, "read")
def is_bool(var):
return isinstance(var, bool)
def is_int(var):
return isinstance(var, int)
def is_float(var):
return isinstance(var, (float, int))
def is_string(var):
return isinstance(var, str)
def is_float_like(var):
try:
_ = float(var)
return True
except (ValueError, TypeError):
return False
def is_dict_like(var):
return isinstance(var, (dict, Mapping))
def is_string_like(var):
try:
_ = str(var)
return True
except ValueError:
return False
def is_stringify_value(var):
return isinstance(var, StringifyValue)
def get_type_name(_type: Union[type, tuple]):
return _type.__name__ if hasattr(_type, "__name__") else str(_type)
def verify_collection_type(var_name: str, var, expected_type: Union[type, tuple]):
verify_type(var_name, var, (list, set, tuple))
for value in var:
verify_type("elements of collection '{}'".format(var_name), value, expected_type)
def is_collection(var) -> bool:
return isinstance(var, (list, set, tuple))
def METHOD_NAME(data: bytes) -> str:
return base64.b64encode(data).decode("utf-8")
def base64_decode(data: str) -> bytes:
return base64.b64decode(data.encode("utf-8"))
def get_absolute_paths(file_globs: Iterable[str]) -> List[str]:
expanded_paths: Set[str] = set()
for file_glob in file_globs:
expanded_paths |= set(glob(file_glob, recursive=True))
return list(os.path.abspath(expanded_file) for expanded_file in expanded_paths)
def get_common_root(absolute_paths: List[str]) -> Optional[str]:
try:
common_root = os.path.commonpath(absolute_paths)
if os.path.isfile(common_root):
common_root = os.path.dirname(common_root)
if common_root.startswith(os.getcwd() + os.sep):
common_root = os.getcwd()
return common_root
except ValueError:
return None
def does_paths_share_common_drive(paths: List[str]) -> bool:
return len(set(map(lambda path: os.path.splitdrive(path)[0], paths))) == 1
def is_ipython() -> bool:
try:
import IPython
ipython = IPython.core.getipython.get_ipython()
return ipython is not None
except ImportError:
return False
def as_list(name: str, value: Optional[Union[str, Iterable[str]]]) -> Optional[Iterable[str]]:
verify_type(name, value, (type(None), str, Iterable))
if value is None:
return []
if isinstance(value, str):
return [value]
verify_collection_type(name, value, str)
return value
|
2,757 |
monitoring cleanup
|
from pysys.basetest import BaseTest
import time
import re
import json
"""
Validate tedge-mapper-collectd messages that are published
on tedge/measurements
Given a configured system
When we start the collectd with sudo in the background
When we start the tedge-mapper-collectd with sudo in the background
When we start tedge sub with sudo in the background
Wait for couple of seconds to publish couple of batch of messages
Then we kill tedge sub with sudo as it is running with a different user account
Then we validate the messages in the output of tedge sub,
"""
class MonitoringWithCollectd(BaseTest):
def setup(self):
self.js_msg = ""
self.cpu_cnt = 0
self.memory_cnt = 0
self.time_cnt = 0
self.disk_cnt = 0
self.tedge = "/usr/bin/tedge"
self.sudo = "/usr/bin/sudo"
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "collectd"],
stdouterr="collectd",
)
collectd_mapper = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
self.addCleanupFunction(self.METHOD_NAME)
def execute(self):
time.sleep(0.1)
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "--no-topic", "tedge/#"],
stdouterr="tedge_sub",
background=True,
)
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "collectd/#"],
stdouterr="collectd_sub",
background=True,
)
# Wait for a small amount of time to give tedge sub time
# to initialize and capture couple of batches of messages
# that are published by tedge-mapper-collectd.
time.sleep(12)
# Kill the subscriber process explicitly with sudo as PySys does
# not have the rights to do it
kill = self.startProcess(
command=self.sudo,
arguments=["killall", "tedge"],
stdouterr="kill_out",
)
def validate(self):
self.assertGrep("tedge_sub.out", r"time|cpu|memory|df-root")
self.assertThat(
"collectd_msg_validation_result == expected_result",
collectd_msg_validation_result=self.validate_json(),
expected_result=True,
)
def validate_json(self):
f = open(self.output + "/tedge_sub.out", "r")
lines = f.readlines()
for line in lines:
self.js_msg = json.loads(line)
if not self.validate_cpu():
reason = "cpu stat validation failed in message: " + str(line)
self.abort(False, reason)
if not self.validate_time():
reason = "time validation failed in message: " + str(line)
self.abort(False, reason)
if not self.validate_memory():
reason = "memory stat validation failed in message: " + str(line)
self.abort(False, reason)
# validate disk stats if the entries are present, as the disk stats collection window is bigger
if "df-root" in self.js_msg:
if not self.validate_disk():
reason = "disk stat validation failed in message: " + str(line)
self.abort(False, reason)
if (
self.time_cnt == self.cpu_cnt == self.memory_cnt
and self.disk_cnt > 0
and self.disk_cnt <= 3
):
return True
else:
return False
def validate_cpu(self):
if self.js_msg["cpu"]:
if "percent-active" in self.js_msg["cpu"]:
self.cpu_cnt += 1
return True
else:
return False
else:
return False
def validate_time(self):
if self.js_msg["time"]:
self.time_cnt += 1
return True
else:
return False
def validate_memory(self):
if self.js_msg["memory"]:
if "percent-used" in self.js_msg["memory"]:
self.memory_cnt += 1
return True
else:
return False
else:
return False
def validate_disk(self):
if "percent_bytes-used" in self.js_msg["df-root"]:
self.disk_cnt += 1
return True
else:
return False
def METHOD_NAME(self):
self.log.info("monitoring_cleanup")
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "collectd"],
stdouterr="collectd",
)
|
2,758 |
test excluded user repo
|
"""Test cases for MagicLink."""
from .. import util
class TestMagicLinkShortner(util.MdCase):
"""Test cases for repo link shortening."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'repo_url_shortener': True
}
}
def test_user(self):
"""Test user shortening."""
# Test #! original syntax
self.check_markdown(
r'https://github.com/facelessuser',
r'<p><a class="magiclink magiclink-github magiclink-mention" href="https://github.com/facelessuser" title="GitHub User: facelessuser">@facelessuser</a></p>' # noqa: E501
)
def test_repo(self):
"""Test repository shortening."""
# Test #! original syntax
self.check_markdown(
r'https://github.com/facelessuser/pymdown-extensions',
r'<p><a class="magiclink magiclink-github magiclink-repository" href="https://github.com/facelessuser/pymdown-extensions" title="GitHub Repository: facelessuser/pymdown-extensions">facelessuser/pymdown-extensions</a></p>' # noqa: E501
)
def test_no_social(self):
"""Test that social shortening does not happen."""
self.check_markdown(
r'https://twitter.com/someuser',
r'<p><a href="https://twitter.com/someuser">https://twitter.com/someuser</a></p>'
)
def test_excluded_user(self):
"""Test excluded."""
self.check_markdown(
r'https://github.com/support',
r'<p><a href="https://github.com/support">https://github.com/support</a></p>'
)
def METHOD_NAME(self):
"""Test excluded."""
self.check_markdown(
r'https://github.com/support/repo',
r'<p><a href="https://github.com/support/repo">https://github.com/support/repo</a></p>'
)
def test_discuss(self):
"""Test discuss."""
self.check_markdown(
r'https://github.com/facelessuser/pymdown-extensions/discussions/1173',
r'<p><a class="magiclink magiclink-github magiclink-discussion" href="https://github.com/facelessuser/pymdown-extensions/discussions/1173" title="GitHub Discussion: facelessuser/pymdown-extensions #1173">facelessuser/pymdown-extensions?1173</a></p>' # noqa: E501
)
class TestMagicLinkShorthand(util.MdCase):
"""Test cases for repo link shortening."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'repo_url_shorthand': True,
'user': 'facelessuser',
'repo': 'pymdown-extensions'
}
}
def test_discuss(self):
"""Test discuss."""
self.check_markdown(
r'?1173',
r'<p><a class="magiclink magiclink-github magiclink-discussion" href="https://github.com/facelessuser/pymdown-extensions/discussions/1173" title="GitHub Discussion: facelessuser/pymdown-extensions #1173">?1173</a></p>' # noqa: E501
)
def test_bad_discss(self):
"""Test repo that doesn't support discussions."""
self.check_markdown(
r'gitlab:user/repo?1173',
r'<p>gitlab:user/repo?1173</p>'
)
class TestMagicLinkExternalShorthand(util.MdCase):
"""Test cases for repo link shortening."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'repo_url_shorthand': True,
'user': 'facelessuser',
'repo': 'pymdown-extensions',
'provider': 'gitlab'
}
}
def test_bad_discss(self):
"""Test repo that doesn't support discussions."""
self.check_markdown(
r'?1173',
r'<p>?1173</p>'
)
class TestMagicLinkShortnerSocial(util.MdCase):
"""Test cases for social link shortener."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'social_url_shortener': True
}
}
def test_user(self):
"""Test user shortening."""
# Test #! original syntax
self.check_markdown(
r'https://twitter.com/someuser',
r'<p><a class="magiclink magiclink-twitter magiclink-mention" href="https://twitter.com/someuser" title="Twitter User: someuser">@someuser</a></p>' # noqa: E501
)
def test_no_repo(self):
"""Test that repository shortening does not happen."""
self.check_markdown(
r'https://github.com/facelessuser',
r'<p><a href="https://github.com/facelessuser">https://github.com/facelessuser</a></p>'
)
def test_excluded(self):
"""Test excluded user."""
self.check_markdown(
r'https://twitter.com/home',
r'<p><a href="https://twitter.com/home">https://twitter.com/home</a></p>'
)
|
2,759 |
prepare request
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_07_01.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_07_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-07-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"}
|
2,760 |
set value
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import typing
import uuid
from opentelemetry.context import Context
from opentelemetry.trace.propagation import _SPAN_KEY
from elasticapm.contrib.opentelemetry.span import Span as OtelSpan
from elasticapm.traces import Transaction, execution_context
logger = logging.getLogger("elasticapm.otel")
def create_key(keyname: str) -> str:
"""To allow cross-cutting concern to control access to their local state,
the RuntimeContext API provides a function which takes a keyname as input,
and returns a unique key.
Args:
keyname: The key name is for debugging purposes and is not required to be unique.
Returns:
A unique string representing the newly created key.
"""
return keyname + "-" + str(uuid.uuid4())
def get_value(key: str, context: typing.Optional[Context] = None) -> "object":
"""To access the local state of a concern, the RuntimeContext API
provides a function which takes a context and a key as input,
and returns a value.
Args:
key: The key of the value to retrieve.
context: The context from which to retrieve the value, if None, the current context is used.
Returns:
The value associated with the key.
"""
return context.get(key) if context is not None else get_current().get(key)
def METHOD_NAME(key: str, value: "object", context: typing.Optional[Context] = None) -> Context:
"""To record the local state of a cross-cutting concern, the
RuntimeContext API provides a function which takes a context, a
key, and a value as input, and returns an updated context
which contains the new value.
Args:
key: The key of the entry to set.
value: The value of the entry to set.
context: The context to copy, if None, the current context is used.
Returns:
A new `Context` containing the value set.
"""
if context is None:
context = get_current()
new_values = context.copy()
new_values[key] = value
return Context(new_values)
def get_current() -> Context:
"""To access the context associated with program execution,
the Context API provides a function which takes no arguments
and returns a Context.
Returns:
The current `Context` object.
"""
span = execution_context.get_span()
if not span:
span = execution_context.get_transaction()
if not span:
return Context()
otel_span = getattr(span, "otel_wrapper", OtelSpan(span.name, span))
context = otel_span.otel_context
return context
def attach(context: Context) -> object:
"""Associates a Context with the caller's current execution unit.
Due to limitations in the Elastic APM context management, a token is not
returned by this method, nor required to detach() a Context later.
Note that a Context will not be attached if it doesn't have an OtelSpan at _SPAN_KEY
Args:
context: The Context to set as current.
Returns:
None
"""
span = context.get(_SPAN_KEY)
if not span:
logger.error("Attempted to attach a context without a valid OtelSpan")
return None
span.otel_context = context
elastic_span = span.elastic_span
if isinstance(elastic_span, Transaction):
execution_context.set_transaction(elastic_span)
else:
execution_context.set_span(elastic_span)
return None
def detach(token: typing.Optional[object] = None) -> None:
"""Resets the Context associated with the caller's current execution unit
to the value it had before attaching a specified Context.
Due to limitations in the Elastic APM context management, a token is not
returned by attach(), nor required to detach() a Context later.
Args:
token: Tokens are not supported in this bridge, this argument is unused
"""
if execution_context.get_span():
execution_context.unset_span()
else:
logger.warning("Can't detach a running transaction. Please end the transaction instead.")
|
2,761 |
plot corr
|
import json
import plotly.subplots
from dash import html
from dash import dcc
from dash.dependencies import Input, Output, State
from NuRadioReco.eventbrowser.app import app
from NuRadioReco.utilities import units
from NuRadioReco.eventbrowser.default_layout import default_layout
from NuRadioReco.framework.parameters import stationParameters as stnp
import NuRadioReco.eventbrowser.dataprovider
provider = NuRadioReco.eventbrowser.dataprovider.DataProvider()
layout = [
html.Div([
html.Div([
dcc.RadioItems(
id='xcorrelation-event-type',
options=[
{'label': 'Neutrino', 'value': 'nu'},
{'label': 'Cosmic Ray', 'value': 'cr'}
],
value='nu'
)
], style={'flex': 'none', 'padding-right': '20px'}),
html.Div([
dcc.Dropdown(
id='cr-xcorrelation-dropdown',
options=[]
)
], style={'flex': '1'})
], style={'display': 'flex'}),
html.Div([
html.Div([
dcc.Graph(id='cr-xcorrelation'),
], style={'flex': '1'}),
html.Div([
dcc.Graph(id='cr-xcorrelation-amplitude'),
], style={'flex': '1'})
], style={'display': 'flex'})
]
cr_xcorr_options = [
{'label': 'maximum cr x-corr all channels', 'value': 'cr_max_xcorr'},
{'label': 'maximum of avg cr x-corr in parallel cr channels', 'value': 'cr_avg_xcorr_parallel_crchannels'},
{'label': 'maximum cr x-corr cr channels', 'value': 'cr_max_xcorr_crchannels'},
{'label': 'average cr x-corr cr channels', 'value': 'cr_avg_xcorr_crchannels'},
]
nu_xcorr_options = [
{'label': 'maximum nu x-corr all channels', 'value': 'nu_max_xcorr'},
{'label': 'maximum of avg nu x-corr in parallel nu channels', 'value': 'nu_avg_xcorr_parallel_nuchannels'},
{'label': 'maximum nu x-corr nu channels', 'value': 'nu_max_xcorr_nuchannels'},
{'label': 'average nu x-corr nu channels', 'value': 'nu_avg_xcorr_nuchannels'}
]
@app.callback(Output('cr-xcorrelation-dropdown', 'options'),
[Input('xcorrelation-event-type', 'value')])
def set_xcorrelation_options(event_type):
if event_type == 'nu':
return nu_xcorr_options
else:
return cr_xcorr_options
@app.callback(Output('cr-xcorrelation', 'figure'),
[Input('cr-xcorrelation-dropdown', 'value'),
Input('filename', 'value'),
Input('event-ids', 'children'),
Input('station-id-dropdown', 'value'),
Input('xcorrelation-event-type', 'value')],
[State('user_id', 'children')])
def METHOD_NAME(xcorr_type, filename, jcurrent_selection, station_id, event_type, juser_id):
if filename is None or station_id is None or xcorr_type is None:
return {}
user_id = json.loads(juser_id)
nurio = provider.get_file_handler(user_id, filename)
fig = plotly.subplots.make_subplots(rows=1, cols=1)
keys = nurio.get_header()[station_id].keys()
if event_type == 'nu':
if stnp.nu_xcorrelations not in keys:
return {}
xcorrs = nurio.get_header()[station_id][stnp.nu_xcorrelations]
else:
if stnp.cr_xcorrelations not in keys:
return {}
xcorrs = nurio.get_header()[station_id][stnp.cr_xcorrelations]
if stnp.station_time in keys:
times = []
for time in nurio.get_header()[station_id][stnp.station_time]:
times.append(time.value)
current_selection = json.loads(jcurrent_selection)
fig.append_trace(plotly.graph_objs.Scatter(
x=times,
y=[xcorrs[i][xcorr_type] for i in range(len(xcorrs))],
text=[str(x) for x in nurio.get_event_ids()],
customdata=[x for x in range(nurio.get_n_events())],
mode='markers',
opacity=1,
selectedpoints=current_selection
), 1, 1)
else:
return {}
fig['layout'].update(default_layout)
fig['layout']['yaxis'].update({'title': xcorr_type, 'range': [0, 1]})
fig['layout']['hovermode'] = 'closest'
return fig
@app.callback(Output('cr-xcorrelation-amplitude', 'figure'),
[Input('cr-xcorrelation-dropdown', 'value'),
Input('filename', 'value'),
Input('event-ids', 'children'),
Input('xcorrelation-event-type', 'value'),
Input('station-id-dropdown', 'value')],
[State('user_id', 'children')])
def plot_corr_amplitude(xcorr_type, filename, jcurrent_selection, event_type, station_id, juser_id):
if filename is None or station_id is None or xcorr_type is None:
return {}
user_id = json.loads(juser_id)
nurio = provider.get_file_handler(user_id, filename)
fig = plotly.subplots.make_subplots(rows=1, cols=1)
keys = nurio.get_header()[station_id].keys()
if event_type == 'nu':
if stnp.nu_xcorrelations not in keys:
return {}
xcorrs = nurio.get_header()[station_id][stnp.nu_xcorrelations]
else:
if stnp.cr_xcorrelations not in keys:
return {}
xcorrs = nurio.get_header()[station_id][stnp.cr_xcorrelations]
if stnp.channels_max_amplitude in keys:
current_selection = json.loads(jcurrent_selection)
fig.append_trace(plotly.graph_objs.Scatter(
x=nurio.get_header()[station_id][stnp.channels_max_amplitude] / units.mV,
y=[xcorrs[i][xcorr_type] for i in range(len(xcorrs))],
text=[str(x) for x in nurio.get_event_ids()],
customdata=[x for x in range(nurio.get_n_events())],
mode='markers',
opacity=1,
selectedpoints=current_selection
), 1, 1)
else:
return {}
fig['layout'].update(default_layout)
fig['layout']['xaxis'].update({'type': 'log', 'title': 'maximum amplitude [mV]'})
fig['layout']['yaxis'].update({'title': xcorr_type, 'range': [0, 1]})
fig['layout']['hovermode'] = 'closest'
return fig
|
2,762 |
run command
|
# -*- coding: utf-8 -*-
# (c) 2020, Alexei Znamensky <[email protected]>
# Copyright (c) 2020, Ansible Project
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from functools import partial
class ArgFormat(object):
"""
Argument formatter for use as a command line parameter. Used in CmdMixin.
"""
BOOLEAN = 0
PRINTF = 1
FORMAT = 2
BOOLEAN_NOT = 3
@staticmethod
def stars_deco(num):
if num == 1:
def deco(f):
return lambda v: f(*v)
return deco
elif num == 2:
def deco(f):
return lambda v: f(**v)
return deco
return lambda f: f
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
"""
THIS CLASS IS BEING DEPRECATED.
It was never meant to be used outside the scope of CmdMixin, and CmdMixin is being deprecated.
See the deprecation notice in ``CmdMixin.__init__()`` below.
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
the CLI command execution.
:param name: Name of the argument to be formatted
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
:param style: Whether arg_format (as str) should use printf-style formatting.
Ignored if arg_format is None or not a str (should be callable).
:param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
"""
def printf_fmt(_fmt, v):
try:
return [_fmt % v]
except TypeError as e:
if e.args[0] != 'not all arguments converted during string formatting':
raise
return [_fmt]
_fmts = {
ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]),
ArgFormat.PRINTF: printf_fmt,
ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
}
self.name = name
self.stars = stars
self.style = style
if fmt is None:
fmt = "{0}"
style = ArgFormat.FORMAT
if isinstance(fmt, str):
func = _fmts[style]
self.arg_format = partial(func, fmt)
elif isinstance(fmt, list) or isinstance(fmt, tuple):
self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
elif hasattr(fmt, '__call__'):
self.arg_format = fmt
else:
raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
if stars:
self.arg_format = (self.stars_deco(stars))(self.arg_format)
def to_text(self, value):
if value is None and self.style != ArgFormat.BOOLEAN_NOT:
return []
func = self.arg_format
return [str(p) for p in func(value)]
class CmdMixin(object):
"""
THIS CLASS IS BEING DEPRECATED.
See the deprecation notice in ``CmdMixin.__init__()`` below.
Mixin for mapping module options to running a CLI command with its arguments.
"""
command = None
command_args_formats = {}
run_command_fixed_options = {}
check_rc = False
force_lang = "C"
@property
def module_formats(self):
result = {}
for param in self.module.params.keys():
result[param] = ArgFormat(param)
return result
@property
def custom_formats(self):
result = {}
for param, fmt_spec in self.command_args_formats.items():
result[param] = ArgFormat(param, **fmt_spec)
return result
def __init__(self, *args, **kwargs):
super(CmdMixin, self).__init__(*args, **kwargs)
self.module.deprecate(
'The CmdMixin used in classes CmdModuleHelper and CmdStateModuleHelper is being deprecated. '
'Modules should use community.general.plugins.module_utils.cmd_runner.CmdRunner instead.',
version='8.0.0',
collection_name='community.general',
)
def _calculate_args(self, extra_params=None, params=None):
def add_arg_formatted_param(_cmd_args, arg_format, _value):
args = list(arg_format.to_text(_value))
return _cmd_args + args
def find_format(_param):
return self.custom_formats.get(_param, self.module_formats.get(_param))
extra_params = extra_params or dict()
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
try:
cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
except ValueError:
pass
param_list = params if params else self.vars.keys()
for param in param_list:
if isinstance(param, dict):
if len(param) != 1:
self.do_raise("run_command parameter as a dict must contain only one key: {0}".format(param))
_param = list(param.keys())[0]
fmt = find_format(_param)
value = param[_param]
elif isinstance(param, str):
if param in self.vars.keys():
fmt = find_format(param)
value = self.vars[param]
elif param in extra_params:
fmt = find_format(param)
value = extra_params[param]
else:
self.do_raise('Cannot determine value for parameter: {0}'.format(param))
else:
self.do_raise("run_command parameter must be either a str or a dict: {0}".format(param))
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
return cmd_args
def process_command_output(self, rc, out, err):
return rc, out, err
def METHOD_NAME(self,
extra_params=None,
params=None,
process_output=None,
publish_rc=True,
publish_out=True,
publish_err=True,
publish_cmd=True,
*args, **kwargs):
cmd_args = self._calculate_args(extra_params, params)
options = dict(self.run_command_fixed_options)
options['check_rc'] = options.get('check_rc', self.check_rc)
options.update(kwargs)
env_update = dict(options.get('environ_update', {}))
if self.force_lang:
env_update.update({
'LANGUAGE': self.force_lang,
'LC_ALL': self.force_lang,
})
self.update_output(force_lang=self.force_lang)
options['environ_update'] = env_update
rc, out, err = self.module.METHOD_NAME(cmd_args, *args, **options)
if publish_rc:
self.update_output(rc=rc)
if publish_out:
self.update_output(stdout=out)
if publish_err:
self.update_output(stderr=err)
if publish_cmd:
self.update_output(cmd_args=cmd_args)
if process_output is None:
_process = self.process_command_output
else:
_process = process_output
return _process(rc, out, err)
|
2,763 |
set up
|
#
# @file TestL3Parameter.py
# @brief L3 Parameter unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestL3Parameter.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def isnan(x):
return (x != x)
pass
class TestL3Parameter(unittest.TestCase):
global P
P = None
def METHOD_NAME(self):
self.P = libsbml.Parameter(3,1)
if (self.P == None):
pass
pass
def tearDown(self):
_dummyList = [ self.P ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Parameter_NS(self):
self.assertTrue( self.P.getNamespaces() != None )
self.assertTrue( self.P.getNamespaces().getLength() == 1 )
self.assertTrue(( "http://www.sbml.org/sbml/level3/version1/core" == self.P.getNamespaces().getURI(0) ))
pass
def test_L3_Parameter_constant(self):
self.assertTrue( self.P.isSetConstant() == False )
self.P.setConstant(True)
self.assertTrue( self.P.getConstant() == True )
self.assertTrue( self.P.isSetConstant() == True )
self.P.setConstant(False)
self.assertTrue( self.P.getConstant() == False )
self.assertTrue( self.P.isSetConstant() == True )
pass
def test_L3_Parameter_create(self):
self.assertTrue( self.P.getTypeCode() == libsbml.SBML_PARAMETER )
self.assertTrue( self.P.getMetaId() == "" )
self.assertTrue( self.P.getNotes() == None )
self.assertTrue( self.P.getAnnotation() == None )
self.assertTrue( self.P.getId() == "" )
self.assertTrue( self.P.getName() == "" )
self.assertTrue( self.P.getUnits() == "" )
self.assertEqual( True, isnan(self.P.getValue()) )
self.assertTrue( self.P.getConstant() == True )
self.assertEqual( False, self.P.isSetId() )
self.assertEqual( False, self.P.isSetName() )
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( False, self.P.isSetUnits() )
self.assertEqual( False, self.P.isSetConstant() )
pass
def test_L3_Parameter_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(3,1)
sbmlns.addNamespaces(xmlns)
p = libsbml.Parameter(sbmlns)
self.assertTrue( p.getTypeCode() == libsbml.SBML_PARAMETER )
self.assertTrue( p.getMetaId() == "" )
self.assertTrue( p.getNotes() == None )
self.assertTrue( p.getAnnotation() == None )
self.assertTrue( p.getLevel() == 3 )
self.assertTrue( p.getVersion() == 1 )
self.assertTrue( p.getNamespaces() != None )
self.assertTrue( p.getNamespaces().getLength() == 2 )
self.assertTrue( p.getId() == "" )
self.assertTrue( p.getName() == "" )
self.assertTrue( p.getUnits() == "" )
self.assertEqual( True, isnan(p.getValue()) )
self.assertTrue( p.getConstant() == True )
self.assertEqual( False, p.isSetId() )
self.assertEqual( False, p.isSetName() )
self.assertEqual( False, p.isSetValue() )
self.assertEqual( False, p.isSetUnits() )
self.assertEqual( False, p.isSetConstant() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Parameter_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Parameter_hasRequiredAttributes(self):
p = libsbml.Parameter(3,1)
self.assertEqual( False, p.hasRequiredAttributes() )
p.setId( "id")
self.assertEqual( False, p.hasRequiredAttributes() )
p.setConstant(False)
self.assertEqual( True, p.hasRequiredAttributes() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Parameter_id(self):
id = "mitochondria";
self.assertEqual( False, self.P.isSetId() )
self.P.setId(id)
self.assertTrue(( id == self.P.getId() ))
self.assertEqual( True, self.P.isSetId() )
if (self.P.getId() == id):
pass
pass
def test_L3_Parameter_name(self):
name = "My_Favorite_Factory";
self.assertEqual( False, self.P.isSetName() )
self.P.setName(name)
self.assertTrue(( name == self.P.getName() ))
self.assertEqual( True, self.P.isSetName() )
if (self.P.getName() == name):
pass
self.P.unsetName()
self.assertEqual( False, self.P.isSetName() )
if (self.P.getName() != None):
pass
pass
def test_L3_Parameter_units(self):
units = "volume";
self.assertEqual( False, self.P.isSetUnits() )
self.P.setUnits(units)
self.assertTrue(( units == self.P.getUnits() ))
self.assertEqual( True, self.P.isSetUnits() )
if (self.P.getUnits() == units):
pass
self.P.unsetUnits()
self.assertEqual( False, self.P.isSetUnits() )
if (self.P.getUnits() != None):
pass
pass
def test_L3_Parameter_value(self):
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( True, isnan(self.P.getValue()) )
self.P.setValue(1.5)
self.assertEqual( True, self.P.isSetValue() )
self.assertTrue( self.P.getValue() == 1.5 )
self.P.unsetValue()
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( True, isnan(self.P.getValue()) )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestL3Parameter))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
2,764 |
package info
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd, stdcpp_library
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class VkBootstrapConan(ConanFile):
name = "vk-bootstrap"
description = "Vulkan bootstraping library."
license = "MIT"
topics = ("vulkan", "bootstrap", "setup")
homepage = "https://github.com/charles-lunarg/vk-bootstrap"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _min_cppstd(self):
return "14"
@property
def _compilers_minimum_version(self):
return {
"gcc": "5",
"Visual Studio": "15",
"msvc": "191",
"clang": "3.7" if stdcpp_library(self) == "stdc++" else "6",
"apple-clang": "10",
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if Version(self.version) < "0.7":
self.requires("vulkan-headers/1.3.236.0", transitive_headers=True)
else:
self.requires("vulkan-headers/1.3.239.0", transitive_headers=True)
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
def loose_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support.",
)
if is_msvc(self) and self.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} shared not supported with Visual Studio")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["VK_BOOTSTRAP_TEST"] = False
vulkan_headers = self.dependencies["vulkan-headers"]
includedirs = ";".join(
[os.path.join(vulkan_headers.package_folder, includedir).replace("\\", "/")
for includedir in vulkan_headers.cpp_info.includedirs],
)
if Version(self.version) < "0.3.0":
tc.variables["Vulkan_INCLUDE_DIR"] = includedirs
else:
tc.variables["VK_BOOTSTRAP_VULKAN_HEADER_DIR"] = includedirs
if Version(self.version) >= "0.4.0":
tc.variables["VK_BOOTSTRAP_WERROR"] = False
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def METHOD_NAME(self):
self.cpp_info.libs = ["vk-bootstrap"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs = ["dl"]
|
2,765 |
test prepare twice raises
|
from pathlib import Path
import pytest
import qcodes as qc
from qcodes.dataset import connect, load_by_guid, load_or_create_experiment
from qcodes.dataset.data_set_in_memory import DataSetInMem
from qcodes.dataset.descriptions.dependencies import InterDependencies_
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
def test_create_dataset_in_memory_explicit_db(empty_temp_db) -> None:
default_db_location = qc.config["core"]["db_location"]
extra_db_location = str(Path(default_db_location).parent / "extra.db")
load_or_create_experiment(
conn=connect(extra_db_location), experiment_name="myexp", sample_name="mysample"
)
ds = DataSetInMem._create_new_run(name="foo", path_to_db=str(extra_db_location))
assert ds.path_to_db == extra_db_location
assert default_db_location != extra_db_location
def test_empty_ds_parameters(experiment) -> None:
ds = DataSetInMem._create_new_run(name="foo")
assert ds._parameters is None
ds._perform_start_actions()
assert ds._parameters is None
ds.mark_completed()
assert ds._parameters is None
def test_write_metadata_to_explicit_db(empty_temp_db) -> None:
default_db_location = qc.config["core"]["db_location"]
extra_db_location = str(Path(default_db_location).parent / "extra.db")
load_or_create_experiment(experiment_name="myexp", sample_name="mysample")
load_or_create_experiment(
conn=connect(extra_db_location), experiment_name="myexp", sample_name="mysample"
)
ds = DataSetInMem._create_new_run(name="foo")
assert ds._parameters is None
assert ds.path_to_db == default_db_location
ds.export("netcdf")
ds.write_metadata_to_db(path_to_db=extra_db_location)
loaded_ds = load_by_guid(ds.guid, conn=connect(extra_db_location))
ds.the_same_dataset_as(loaded_ds)
def test_no_interdeps_raises_in_prepare(experiment) -> None:
ds = DataSetInMem._create_new_run(name="foo")
with pytest.raises(RuntimeError, match="No parameters supplied"):
ds.prepare(interdeps=InterDependencies_(), snapshot={})
def METHOD_NAME(experiment) -> None:
ds = DataSetInMem._create_new_run(name="foo")
pss: list[ParamSpecBase] = []
for n in range(3):
pss.append(ParamSpecBase(f"ps{n}", paramtype="numeric"))
idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])})
ds.prepare(interdeps=idps, snapshot={})
with pytest.raises(
RuntimeError, match="Cannot prepare a dataset that is not pristine."
):
ds.prepare(interdeps=idps, snapshot={})
def test_timestamps(experiment) -> None:
ds = DataSetInMem._create_new_run(name="foo")
assert ds.run_timestamp() is None
assert ds.run_timestamp_raw is None
assert ds.completed_timestamp() is None
assert ds.completed_timestamp_raw is None
pss: list[ParamSpecBase] = []
for n in range(3):
pss.append(ParamSpecBase(f"ps{n}", paramtype="numeric"))
idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])})
ds.prepare(interdeps=idps, snapshot={})
assert ds.run_timestamp() is not None
assert ds.run_timestamp_raw is not None
assert ds.completed_timestamp() is None
assert ds.completed_timestamp_raw is None
ds.mark_completed()
assert ds.run_timestamp() is not None
assert ds.run_timestamp_raw is not None
assert ds.completed_timestamp() is not None
assert ds.completed_timestamp_raw is not None
ds.mark_completed()
def test_mark_pristine_completed_raises(experiment) -> None:
ds = DataSetInMem._create_new_run(name="foo")
with pytest.raises(
RuntimeError, match="Can not mark a dataset as complete before it"
):
ds.mark_completed()
def test_load_from_non_existing_guid(experiment) -> None:
guid = "This is not a guid"
with pytest.raises(
RuntimeError, match="Could not find the requested run with GUID"
):
_ = DataSetInMem._load_from_db(conn=experiment.conn, guid=guid)
|
2,766 |
logout
|
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Andrew Hopkinson (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "__init__"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import os
from flask import Flask
from flask import send_from_directory
# HERE
from flask import Response, session, redirect, url_for, render_template
from authlib.integrations.flask_client import OAuth
import base64, secrets, socket, urllib
from common.okitCommon import getOkitHome
from common.okitLogging import getLogger
# Configure logging
logger = getLogger()
def create_local_app(test_config=None):
# Create and Configure OKIT Web Designer App
app = Flask(__name__, instance_relative_config=True, instance_path=f'{getOkitHome()}/instance')
# Load Config
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
# Ensure if instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Add Upload location
app.config['UPLOADS_FOLDER'] = f'{getOkitHome()}/uploads'
# Set local
app.config['LOCAL'] = True
# Redirect / to designer page
@app.route('/')
def base():
return redirect("/okit/designer")
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
from . import okitWebDesigner
app.register_blueprint(okitWebDesigner.bp)
try:
from . import okitPricing
app.register_blueprint(okitPricing.bp)
except Exception as e:
logger.debug(e)
from . import okitOci
app.register_blueprint(okitOci.bp)
from . import okitImport
app.register_blueprint(okitImport.bp)
from . import okitExport
app.register_blueprint(okitExport.bp)
from . import okitPca
app.register_blueprint(okitPca.bp)
return app
def create_authenticated_app(test_config=None):
# Create and Configure OKIT Web Designer App
app = Flask(__name__, instance_relative_config=True, instance_path=f'{getOkitHome()}/instance')
# Load Config
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
# Ensure if instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Add Upload location
app.config['UPLOADS_FOLDER'] = f'{getOkitHome()}/uploads'
# The secret key must be static to be the same for all gunicorn workers
app.secret_key = '8980ffsd675747jjjh'
idcs_metadata_url = app.config['IDCS_API_BASE_URL'] + '/.well-known/openid-configuration'
oauth = OAuth(app)
idcs = oauth.register(name='idcs', server_metadata_url=idcs_metadata_url, client_kwargs={'scope':'openid email profile'})
if 'OKIT_SERVER_BASE' not in app.config:
app.config['OKIT_SERVER_BASE'] = 'http://' + socket.getfqdn()
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
from . import okitWebDesigner
# Login Step 1 - Redirect to IDCS
@okitWebDesigner.bp.route('/login', methods=(['GET', 'POST']))
def login():
return idcs.authorize_redirect(app.config['OKIT_SERVER_BASE'] + url_for('okit.postlogin'))
# Login Step 2 - Local local token handling
@okitWebDesigner.bp.route('/postlogin', methods=(['GET', 'POST']))
def postlogin():
token = idcs.authorize_access_token()
userinfo = idcs.parse_id_token(token)
session['username'] = userinfo['user_displayname']
session['home_region'] = app.config['OCI_HOME_REGION']
session['tenant'] = app.config['OCI_TENANT']
logger.info(f"App Config {app.config}")
end_session_endpoint = idcs.server_metadata['end_session_endpoint']
logout_redirect_url = {
'post_logout_redirect_url' : app.config['OKIT_SERVER_BASE'] + url_for('okit.postlogout'),
'id_token_hint' : token['id_token']
}
logout_url = end_session_endpoint + '?post_logout_redirect_url=' + str(logout_redirect_url['post_logout_redirect_url']) + '&id_token_hint=' + str(logout_redirect_url['id_token_hint'])
session['logout'] = logout_url
return redirect(url_for('okit.designer'), code=302)
# Logout Step 1 - Handled by IDCS
# Logout Step 2 - Local cleanup
@okitWebDesigner.bp.route('/logout', methods=(['GET', 'POST']))
def METHOD_NAME():
session.pop('username', None)
session.pop('logout', None)
session.pop('tenant', None)
session.pop('home_region', None)
return Response(status=200)
# Logout Step 3 - Local redirect to home page
@okitWebDesigner.bp.route('/postlogout', methods=(['GET', 'POST']))
def postlogout():
session.pop('username', None)
session.pop('logout', None)
session.pop('tenant', None)
session.pop('home_region', None)
return redirect(url_for('okit.designer'), code=302)
app.register_blueprint(okitWebDesigner.bp)
from . import okitPricing
app.register_blueprint(okitPricing.bp)
from . import okitOci
app.register_blueprint(okitOci.bp)
from . import okitImport
app.register_blueprint(okitImport.bp)
from . import okitExport
app.register_blueprint(okitExport.bp)
@app.route('/')
def index():
return login()
return ap
|
2,767 |
list
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.webpubsub.aio.WebPubSubManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, location: str, **kwargs: Any) -> AsyncIterable["_models.SignalRServiceUsage"]:
"""List resource usage quotas by location.
:param location: the location like "eastus". Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SignalRServiceUsage or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.webpubsub.models.SignalRServiceUsage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.SignalRServiceUsageList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SignalRServiceUsageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}/usages"
}
|
2,768 |
test500x500
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: molecule.py
#
# Tests: mesh - 3D points
# plots - Molecule
# operators - CreateBonds, Replicate
#
# Programmer: Kathleen Biagas
# Date: June 15, 2021
#
# Modifications:
#
# Kathleen Biagas, Tue Jul 13 09:51:58 PDT 2021
# Changed retrieval of renAtts from 'RenderingAttributes' to
# 'GetRenderingAttributes' when turning off specular highlighting. This
# fixes a bug in scalable,parallel,icet mode where molecule_04 test would
# fail to plot.
#
# ----------------------------------------------------------------------------
def SetGradientBackground():
annot = GetAnnotationAttributes()
annot.backgroundMode = annot.Gradient
annot.gradientBackgroundStyle = annot.Radial
annot.gradientColor1 = (102, 102, 153, 255)
annot.gradientColor2 = (0, 0, 0, 255)
annot.backgroundColor = (0, 0, 0, 255)
annot.foregroundColor = (255, 255, 255, 255)
SetAnnotationAttributes(annot)
def SetWhiteBackground():
annot = GetAnnotationAttributes()
annot.backgroundMode = annot.Solid
annot.foregroundColor = (0, 0, 0, 255)
annot.backgroundColor = (255, 255, 255, 255)
SetAnnotationAttributes(annot)
def METHOD_NAME(name):
# Save these images somewhat larger than a regular test case image
# to better see the molecules
backup = GetSaveWindowAttributes()
swa = SaveWindowAttributes()
swa.width = 500
swa.height = 500
swa.screenCapture = 1
Test(name, swa)
SetSaveWindowAttributes(backup)
def MoleculeOnly():
# images similar to those in Molecule Plot docs
SetGradientBackground()
# add specular highlighting
renAtts = GetRenderingAttributes()
renAtts.specularFlag = 1
SetRenderingAttributes(renAtts)
OpenDatabase(data_path("ProteinDataBank_test_data/crotamine.pdb"))
# color by element, Covalent radius, no bonds
AddPlot("Molecule", "element")
mol = MoleculeAttributes()
mol.drawAtomsAs = mol.SphereAtoms
mol.scaleRadiusBy = mol.Covalent
mol.drawBondsAs = mol.NoBonds
SetPlotOptions(mol)
DrawPlots()
v3d = GetView3D()
v3d.viewNormal = (0.784142, -0.592494, -0.184587)
v3d.viewUp = (-0.554863, -0.536159, -0.636129)
v3d.imageZoom = 1.77156
SetView3D(v3d)
METHOD_NAME("molecule_01")
# color by residue, cylinder bonds, radius proportional to covalent radius
ChangeActivePlotsVar("restype")
mol.drawBondsAs = mol.CylinderBonds
mol.radiusScaleFactor = 0.5
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("molecule_02")
# color by scalar (x-coord), no bonds
DefineScalarExpression("x", "coord(mesh)[0]")
ChangeActivePlotsVar("x")
mol.drawAtomsAs = mol.NoAtoms
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("molecule_03")
# Create a blue-purple color table
ccpl = ColorControlPointList()
ccpl.discreteFlag=1
ccpl.smoothing=ccpl.NONE
ccpl.equalSpacingFlag=1
p1 = ColorControlPoint()
p1.colors = (51, 51, 153, 255)
p1.position = 0
ccpl.AddControlPoints(p1)
p2 = ColorControlPoint()
p2.colors = (204,153, 255, 255)
p2.position = 1
ccpl.AddControlPoints(p2)
AddColorTable("BluePurp2", ccpl)
# color by backbone, atom and cylinder-bonds same fixed radius
# special color table
ChangeActivePlotsVar("backbone")
mol.continuousColorTable="BluePurp2"
mol.drawAtomsAs = mol.SphereAtoms
mol.drawBondsAs = mol.CylinderBonds
mol.scaleRadiusBy = mol.Fixed
mol.radiusFixed = 0.2
mol.bondRadius = 0.2
SetPlotOptions(mol)
DrawPlots()
# turn off specular highlighting
renAtts = GetRenderingAttributes()
renAtts.specularFlag = 0
SetRenderingAttributes(renAtts)
METHOD_NAME("molecule_04")
DeleteAllPlots()
# ensure engine/viewer doesn't crash when using ImposterAtoms and scaleRadiusBy option is changed
AddPlot("Molecule", "element")
# get Fresh atts
mol = MoleculeAttributes()
# set up sphere imposters
mol.drawAtomsAs = mol.ImposterAtoms
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("molecule_05")
# now change scaleRadiusBy
mol.scaleRadiusBy = mol.Covalent
mol.radiusScaleFactor=4
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("molecule_06")
# changing radiusScaleFactor while using Imposter atoms used to crash the viewer
mol.radiusScaleFactor=0.002
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("molecule_07")
DeleteAllPlots()
CloseDatabase(data_path("ProteinDataBank_test_data/crotamine.pdb"))
def ReplicateAddBonds():
TestSection("Replicate and CreateBonds operators with Molecule plot")
SetWhiteBackground()
OpenDatabase(data_path("vasp_test_data", "GaO40W12", "OUTCAR"))
AddPlot("Mesh", "unitCell")
AddPlot("Molecule", "element")
mol = MoleculeAttributes()
mol.radiusFixed=0.5
SetPlotOptions(mol)
DrawPlots()
ResetView()
v3D = GetView3D()
v3D.viewNormal = (0.0378647, -0.776117, -0.62945) #(-0.465303, -0.758273, 0.456634)
v3D.viewUp = (0.839533, -0.366352, -0.401212) #(-0.0844518, 0.55156, 0.829849)
v3D.imageZoom=2.177156
v3D.imagePan=(0.0217552, 0.0165363)
SetView3D(v3D)
METHOD_NAME("mol_rep_bonds_01")
# Add replicate operator
SetActivePlots((0,1))
AddOperator("Replicate", 1)
repl = ReplicateAttributes()
repl.useUnitCellVectors = 1
repl.mergeResults = 1
# replicate along unit cell boundaries
repl.replicateUnitCellAtoms = 1
SetOperatorOptions(repl, 0, 1)
DrawPlots()
METHOD_NAME("mol_rep_bonds_02")
# create a replication along y
repl.yReplications=2
SetOperatorOptions(repl, 1, 1)
DrawPlots()
METHOD_NAME("mol_rep_bonds_03")
# Create bonds
SetActivePlots(1)
AddOperator("CreateBonds")
cb = CreateBondsAttributes()
cb.atomicNumber1 = (31, 74)
cb.atomicNumber2 = (8, 8)
cb.minDist = (0.4, 0.4)
cb.maxDist = (1.9, 2.4)
SetOperatorOptions(cb)
DrawPlots()
METHOD_NAME("mol_rep_bonds_04")
# have the CreateBonds operator create periodic bonds
cb.addPeriodicBonds = 1
cb.useUnitCellVectors = 1
SetOperatorOptions(cb)
DrawPlots()
METHOD_NAME("mol_rep_bonds_05")
# Change bond style to lines
mol.drawBondsAs = mol.LineBonds
mol.bondLineWidth = 5
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("mol_rep_bonds_06")
# Change atom radius
mol.drawBondsAs = mol.CylinderBonds
mol.scaleRadiusBy = mol.Atomic
mol.radiusScaleFactor = 0.7
SetPlotOptions(mol)
DrawPlots()
METHOD_NAME("mol_rep_bonds_07")
DeleteAllPlots()
CloseDatabase(data_path("vasp_test_data", "GaO40W12", "OUTCAR"))
MoleculeOnly()
ReplicateAddBonds()
Exit()
|
2,769 |
test attribute not member type
|
import pytest
from vyper.exceptions import (
ArrayIndexException,
InvalidOperation,
InvalidReference,
TypeMismatch,
UndeclaredDefinition,
UnknownAttribute,
)
from vyper.semantics.analysis.base import VarInfo
from vyper.semantics.analysis.utils import get_possible_types_from_node
from vyper.semantics.types import AddressT, BoolT, DArrayT, SArrayT
from vyper.semantics.types.shortcuts import INT128_T
INTEGER_LITERALS = [(42, 31337), (-1, 1), (69, 2**128)]
DECIMAL_LITERALS = [("4.2", "-1.337")]
BOOL_LITERALS = [(True, False), (True, True), (False, False)]
STRING_LITERALS = [("'hi'", "'there'"), ("'foo'", "'bar'"), ("'longer'", "'short'")]
def test_attribute(build_node, namespace):
node = build_node("self.foo")
with namespace.enter_scope():
namespace["self"].typ.add_member("foo", INT128_T)
assert get_possible_types_from_node(node) == [INT128_T]
def test_attribute_missing_self(build_node, namespace):
node = build_node("foo")
with namespace.enter_scope():
namespace["self"].typ.add_member("foo", INT128_T)
with pytest.raises(InvalidReference):
get_possible_types_from_node(node)
def test_attribute_not_in_self(build_node, namespace):
node = build_node("self.foo")
with namespace.enter_scope():
namespace["foo"] = INT128_T
with pytest.raises(InvalidReference):
get_possible_types_from_node(node)
def test_attribute_unknown(build_node, namespace):
node = build_node("foo.bar")
with namespace.enter_scope():
namespace["foo"] = AddressT()
with pytest.raises(UnknownAttribute):
get_possible_types_from_node(node)
def METHOD_NAME(build_node, namespace):
node = build_node("foo.bar")
with namespace.enter_scope():
namespace["foo"] = INT128_T
with pytest.raises(UnknownAttribute):
get_possible_types_from_node(node)
@pytest.mark.parametrize("op", "+-*/%")
@pytest.mark.parametrize("left,right", INTEGER_LITERALS + DECIMAL_LITERALS)
def test_binop(build_node, namespace, op, left, right):
node = build_node(f"{left}{op}{right}")
with namespace.enter_scope():
get_possible_types_from_node(node)
@pytest.mark.parametrize("op", "+-*/%")
@pytest.mark.parametrize("left,right", [(42, "2.3"), (-1, 2**255)])
def test_binop_type_mismatch(build_node, namespace, op, left, right):
node = build_node(f"{left}{op}{right}")
with namespace.enter_scope():
with pytest.raises(TypeMismatch):
get_possible_types_from_node(node)
def test_binop_invalid_decimal_pow(build_node, namespace):
node = build_node("2.1 ** 2.1")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", STRING_LITERALS + BOOL_LITERALS)
@pytest.mark.parametrize("op", "+-*/%")
def test_binop_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", BOOL_LITERALS)
@pytest.mark.parametrize("op", ["and", "or"])
def test_boolop(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert types_list == [BoolT()]
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS + STRING_LITERALS)
@pytest.mark.parametrize("op", ["and", "or"])
def test_boolop_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
def test_compare_lt_gt(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert types_list == [BoolT()]
@pytest.mark.parametrize(
"left, right", INTEGER_LITERALS + DECIMAL_LITERALS + BOOL_LITERALS + STRING_LITERALS
)
@pytest.mark.parametrize("op", ["==", "!="])
def test_compare_eq_ne(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert types_list == [BoolT()]
@pytest.mark.parametrize("left, right", BOOL_LITERALS + STRING_LITERALS)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
def test_compare_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
def test_name(build_node, namespace):
node = build_node("foo")
type_def = INT128_T
namespace["foo"] = VarInfo(type_def)
assert get_possible_types_from_node(node) == [type_def]
def test_name_unknown(build_node, namespace):
node = build_node("foo")
with pytest.raises(UndeclaredDefinition):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS + BOOL_LITERALS)
def test_list(build_node, namespace, left, right):
node = build_node(f"[{left}, {right}]")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert types_list
for item in types_list:
assert isinstance(item, (DArrayT, SArrayT))
def test_subscript(build_node, namespace):
node = build_node("foo[1]")
type_ = INT128_T
namespace["foo"] = VarInfo(SArrayT(type_, 3))
assert get_possible_types_from_node(node) == [type_]
def test_subscript_out_of_bounds(build_node, namespace):
node = build_node("foo[5]")
type_def = INT128_T
namespace["foo"] = VarInfo(SArrayT(type_def, 3))
with pytest.raises(ArrayIndexException):
get_possible_types_from_node(node)
def test_subscript_negative(build_node, namespace):
node = build_node("foo[-1]")
type_def = INT128_T
namespace["foo"] = VarInfo(SArrayT(type_def, 3))
with pytest.raises(ArrayIndexException):
get_possible_types_from_node(node)
def test_tuple(build_node, namespace):
node = build_node("(foo, bar)")
namespace["foo"] = VarInfo(INT128_T)
namespace["bar"] = VarInfo(AddressT())
types_list = get_possible_types_from_node(node)
assert types_list[0].member_types == [namespace["foo"].typ, namespace["bar"].typ]
def test_tuple_subscript(build_node, namespace):
node = build_node("(foo, bar)[1]")
namespace["foo"] = VarInfo(INT128_T)
namespace["bar"] = VarInfo(AddressT())
types_list = get_possible_types_from_node(node)
assert types_list == [namespace["bar"].typ]
|
2,770 |
firebase auth user
|
from random import randint
from typing import Any, Dict, Generator
import pydash
import pytest
from firebase_admin import auth
from firebase_admin.auth import UserNotFoundError
from firebase_admin.exceptions import FirebaseError
from sqlalchemy.orm import Session
from fides.api.cryptography import cryptographic_util
from fides.api.db import session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.service.saas_request.override_implementations.firebase_auth_request_overrides import (
initialize_firebase,
)
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("firebase_auth")
from fides.api.models.sql_models import Dataset as CtlDataset
@pytest.fixture
def firebase_auth_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/firebase_auth_config.yml",
"<instance_fides_key>",
"firebase_auth_instance",
)
@pytest.fixture
def firebase_auth_dataset() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/firebase_auth_dataset.yml",
"<instance_fides_key>",
"firebase_auth_instance",
)[0]
@pytest.fixture(scope="session")
def firebase_auth_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "firebase_auth.domain") or secrets["domain"],
"type": pydash.get(saas_config, "firebase_auth.type") or secrets["type"],
"project_id": pydash.get(saas_config, "firebase_auth.project_id")
or secrets["project_id"],
"private_key_id": pydash.get(saas_config, "firebase_auth.private_key_id")
or secrets["private_key_id"],
"private_key": pydash.get(saas_config, "firebase_auth.private_key")
or secrets["private_key"],
"client_email": pydash.get(saas_config, "firebase_auth.client_email")
or secrets["client_email"],
"client_id": pydash.get(saas_config, "firebase_auth.client_id")
or secrets["client_id"],
"auth_uri": pydash.get(saas_config, "firebase_auth.auth_uri")
or secrets["auth_uri"],
"token_uri": pydash.get(saas_config, "firebase_auth.token_uri")
or secrets["token_uri"],
"auth_provider_x509_cert_url": pydash.get(
saas_config, "firebase_auth.auth_provider_x509_cert_url"
)
or secrets["auth_provider_x509_cert_url"],
"client_x509_cert_url": pydash.get(
saas_config, "firebase_auth.client_x509_cert_url"
)
or secrets["client_x509_cert_url"],
}
@pytest.fixture(scope="function")
def METHOD_NAME(firebase_auth_secrets) -> Generator:
app = initialize_firebase(firebase_auth_secrets)
# create a user provider
uid = cryptographic_util.generate_secure_random_string(28)
email = f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
provider_id = "facebook.com"
display_name = "John Doe #1"
photo_url = "http://www.facebook.com/12345678/photo.png"
up1 = auth.UserProvider(
uid,
email=email,
provider_id=provider_id,
display_name=display_name,
photo_url=photo_url,
)
# create another user provider
uid = cryptographic_util.generate_secure_random_string(28)
email = f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
provider_id = "google.com"
display_name = "John Doe #2"
up2 = auth.UserProvider(
uid,
email=email,
provider_id=provider_id,
display_name=display_name,
)
# create the user
email = f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
uid = cryptographic_util.generate_secure_random_string(28)
user = auth.ImportUserRecord(
uid=uid,
email=email,
email_verified=False,
display_name="John Doe",
photo_url="http://www.example.com/12345678/photo.png",
phone_number="+1" + str(randint(1000000000, 9999999999)),
disabled=False,
provider_data=[up1, up2],
)
auth.import_users([user], app=app)
yield user
try:
auth.delete_user(user.uid, app=app)
except FirebaseError as e:
# user may have already been deleted, so catch the possible exception
if not isinstance(e, UserNotFoundError):
raise e
@pytest.fixture(scope="function")
def firebase_auth_connection_config(
db: session, firebase_auth_config, firebase_auth_secrets
) -> Generator:
fides_key = firebase_auth_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": firebase_auth_secrets,
"saas_config": firebase_auth_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def firebase_auth_dataset_config(
db: Session,
firebase_auth_connection_config: ConnectionConfig,
firebase_auth_dataset: Dict[str, Any],
) -> Generator:
fides_key = firebase_auth_dataset["fides_key"]
firebase_auth_connection_config.name = fides_key
firebase_auth_connection_config.key = fides_key
firebase_auth_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, firebase_auth_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": firebase_auth_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
|
2,771 |
compute expected freqs
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.prints import rank_zero_warn
def _nominal_input_validation(nan_strategy: str, nan_replace_value: Optional[Union[int, float]]) -> None:
if nan_strategy not in ["replace", "drop"]:
raise ValueError(
f"Argument `nan_strategy` is expected to be one of `['replace', 'drop']`, but got {nan_strategy}"
)
if nan_strategy == "replace" and not isinstance(nan_replace_value, (int, float)):
raise ValueError(
"Argument `nan_replace` is expected to be of a type `int` or `float` when `nan_strategy = 'replace`, "
f"but got {nan_replace_value}"
)
def METHOD_NAME(confmat: Tensor) -> Tensor:
"""Compute the expected frequenceis from the provided confusion matrix."""
margin_sum_rows, margin_sum_cols = confmat.sum(1), confmat.sum(0)
return torch.einsum("r, c -> rc", margin_sum_rows, margin_sum_cols) / confmat.sum()
def _compute_chi_squared(confmat: Tensor, bias_correction: bool) -> Tensor:
"""Chi-square test of independenc of variables in a confusion matrix table.
Adapted from: https://github.com/scipy/scipy/blob/v1.9.2/scipy/stats/contingency.py.
"""
expected_freqs = METHOD_NAME(confmat)
# Get degrees of freedom
df = expected_freqs.numel() - sum(expected_freqs.shape) + expected_freqs.ndim - 1
if df == 0:
return torch.tensor(0.0, device=confmat.device)
if df == 1 and bias_correction:
diff = expected_freqs - confmat
direction = diff.sign()
confmat += direction * torch.minimum(0.5 * torch.ones_like(direction), direction.abs())
return torch.sum((confmat - expected_freqs) ** 2 / expected_freqs)
def _drop_empty_rows_and_cols(confmat: Tensor) -> Tensor:
"""Drop all rows and columns containing only zeros.
Example:
>>> import torch
>>> from torchmetrics.functional.nominal.utils import _drop_empty_rows_and_cols
>>> _ = torch.manual_seed(22)
>>> matrix = torch.randint(10, size=(3, 3))
>>> matrix[1, :] = matrix[:, 1] = 0
>>> matrix
tensor([[9, 0, 6],
[0, 0, 0],
[2, 0, 8]])
>>> _drop_empty_rows_and_cols(matrix)
tensor([[9, 6],
[2, 8]])
"""
confmat = confmat[confmat.sum(1) != 0]
return confmat[:, confmat.sum(0) != 0]
def _compute_phi_squared_corrected(
phi_squared: Tensor,
n_rows: int,
n_cols: int,
confmat_sum: Tensor,
) -> Tensor:
"""Compute bias-corrected Phi Squared."""
return torch.max(
torch.tensor(0.0, device=phi_squared.device), phi_squared - ((n_rows - 1) * (n_cols - 1)) / (confmat_sum - 1)
)
def _compute_rows_and_cols_corrected(n_rows: int, n_cols: int, confmat_sum: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute bias-corrected number of rows and columns."""
rows_corrected = n_rows - (n_rows - 1) ** 2 / (confmat_sum - 1)
cols_corrected = n_cols - (n_cols - 1) ** 2 / (confmat_sum - 1)
return rows_corrected, cols_corrected
def _compute_bias_corrected_values(
phi_squared: Tensor, n_rows: int, n_cols: int, confmat_sum: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute bias-corrected Phi Squared and number of rows and columns."""
phi_squared_corrected = _compute_phi_squared_corrected(phi_squared, n_rows, n_cols, confmat_sum)
rows_corrected, cols_corrected = _compute_rows_and_cols_corrected(n_rows, n_cols, confmat_sum)
return phi_squared_corrected, rows_corrected, cols_corrected
def _handle_nan_in_data(
preds: Tensor,
target: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tuple[Tensor, Tensor]:
"""Handle ``NaN`` values in input data.
If ``nan_strategy = 'replace'``, all ``NaN`` values are replaced with ``nan_replace_value``.
If ``nan_strategy = 'drop'``, all rows containing ``NaN`` in any of two vectors are dropped.
Args:
preds: 1D tensor of categorical (nominal) data
target: 1D tensor of categorical (nominal) data
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Updated ``preds`` and ``target`` tensors which contain no ``Nan``
Raises:
ValueError: If ``nan_strategy`` is not from ``['replace', 'drop']``.
ValueError: If ``nan_strategy = replace`` and ``nan_replace_value`` is not of a type ``int`` or ``float``.
"""
if nan_strategy == "replace":
return preds.nan_to_num(nan_replace_value), target.nan_to_num(nan_replace_value)
rows_contain_nan = torch.logical_or(preds.isnan(), target.isnan())
return preds[~rows_contain_nan], target[~rows_contain_nan]
def _unable_to_use_bias_correction_warning(metric_name: str) -> None:
rank_zero_warn(
f"Unable to compute {metric_name} using bias correction. Please consider to set `bias_correction=False`."
)
|
2,772 |
add unit to tree
|
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
from translate.storage import xliff
class XPathTree:
def __init__(self, unit=None):
self.unit = unit
self.children = {}
def __eq__(self, other):
return (
isinstance(other, XPathTree)
and self.unit == other.unit
and self.children == other.children
)
def _split_xpath_component(xpath_component):
"""Split an xpath component into a tag-index tuple.
>>> split_xpath_component('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content[0]')
('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content', 0).
"""
lbrac = xpath_component.rfind("[")
rbrac = xpath_component.rfind("]")
tag = xpath_component[:lbrac]
index = int(xpath_component[lbrac + 1 : rbrac])
return tag, index
def _split_xpath(xpath):
"""Split an 'xpath' string separated by / into a reversed list of its components. Thus:
>>> split_xpath('document-content[1]/body[2]/text[3]/p[4]')
[('p', 4), ('text', 3), ('body', 2), ('document-content', 1)]
The list is reversed so that it can be used as a stack, where the top of the stack is
the first component.
"""
if xliff.ID_SEPARATOR in xpath:
xpath = xpath.split(xliff.ID_SEPARATOR)[-1]
components = xpath.split("/")
components = [_split_xpath_component(component) for component in components]
return list(reversed(components))
def METHOD_NAME(node, xpath_components, unit):
"""Walk down the tree rooted a node, and follow nodes which correspond to the
components of xpath_components. When reaching the end of xpath_components,
set the reference of the node to unit.
With reference to the tree diagram in build_unit_tree::
add_unit_to_tree(node, [('p', 2), ('text', 3), ('body', 2), ('document-content', 1)], unit)
would begin by popping ('document-content', 1) from the path and
following the node marked ``('document-content', 1)`` in the tree.
Likewise, will descend down the nodes marked ``('body', 2)``
and ``('text', 3)``.
Since the node marked ``('text', 3)`` has no child node marked
``('p', 2)``, this node is created. Then the ``add_unit_to_tree``
descends down this node. When this happens, there are no xpath
components left to pop. Thus, ``node.unit = unit`` is executed.
"""
if len(xpath_components) > 0:
component = (
xpath_components.pop()
) # pop the stack; is a component such as ('p', 4)
# if the current node does not have any children indexed by
# the current component, add such a child
if component not in node.children:
node.children[component] = XPathTree()
METHOD_NAME(node.children[component], xpath_components, unit)
else:
node.unit = unit
def build_unit_tree(store, filename=None):
"""Enumerate a translation store and build a tree with XPath components as nodes
and where a node contains a unit if a path from the root of the tree to the node
containing the unit, is equal to the XPath of the unit.
The tree looks something like this::
root
`- ('document-content', 1)
`- ('body', 2)
|- ('text', 1)
| `- ('p', 1)
| `- <reference to a unit>
|- ('text', 2)
| `- ('p', 1)
| `- <reference to a unit>
`- ('text', 3)
`- ('p', 1)
`- <reference to a unit>
"""
tree = XPathTree()
is_xliff = isinstance(store, xliff.xlifffile)
for unit in store.units:
if unit.source and not unit.isfuzzy():
if is_xliff:
locations = [unit.getid()]
else:
locations = unit.getlocations()
if filename is not None and len(locations) > 1 and filename != locations[1]:
# Skip units that don't come from the filename we are currently
# trying to get units for.
# This is not used for ODF, right now only for IDML.
continue
location = _split_xpath(locations[0])
METHOD_NAME(tree, location, unit)
return tree
|
2,773 |
test when install does not error installed
|
"""
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.modules.npm as npmmod
import salt.states.npm as npm
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {npm: {"__opts__": {"test": False}}}
@pytest.fixture(params=["", {}, []])
def fake_install(request):
fake_install = create_autospec(npmmod.install, return_value=request.param)
with patch.dict(
npm.__salt__,
{
"npm.list": create_autospec(npmmod.list_, return_value={}),
"npm.install": fake_install,
},
):
yield fake_install
def METHOD_NAME(fake_install):
ret = npm.installed("fnord")
assert ret["result"] is True
def test_installed():
"""
Test to verify that the given package is installed
and is at the correct version.
"""
name = "coffee-script"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock_err = MagicMock(side_effect=CommandExecutionError)
mock_dict = MagicMock(return_value={name: {"version": "1.2"}})
with patch.dict(npm.__salt__, {"npm.list": mock_err}):
comt = "Error looking up 'coffee-script': "
ret.update({"comment": comt})
assert npm.installed(name) == ret
with patch.dict(npm.__salt__, {"npm.list": mock_dict, "npm.install": mock_err}):
with patch.dict(npm.__opts__, {"test": True}):
comt = "Package(s) 'coffee-script' satisfied by [email protected]"
ret.update({"comment": comt, "result": True})
assert npm.installed(name) == ret
with patch.dict(npm.__opts__, {"test": False}):
comt = "Package(s) 'coffee-script' satisfied by [email protected]"
ret.update({"comment": comt, "result": True})
assert npm.installed(name) == ret
comt = "Error installing 'n, p, m': "
ret.update({"comment": comt, "result": False})
assert npm.installed(name, "npm") == ret
with patch.dict(npm.__salt__, {"npm.install": mock_dict}):
comt = "Package(s) 'n, p, m' successfully installed"
ret.update(
{
"comment": comt,
"result": True,
"changes": {"new": ["n", "p", "m"], "old": []},
}
)
assert npm.installed(name, "npm") == ret
def test_removed():
"""
Test to verify that the given package is not installed.
"""
name = "coffee-script"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock_err = MagicMock(
side_effect=[CommandExecutionError, {}, {name: ""}, {name: ""}]
)
mock_t = MagicMock(return_value=True)
with patch.dict(npm.__salt__, {"npm.list": mock_err, "npm.uninstall": mock_t}):
comt = "Error uninstalling 'coffee-script': "
ret.update({"comment": comt})
assert npm.removed(name) == ret
comt = "Package 'coffee-script' is not installed"
ret.update({"comment": comt, "result": True})
assert npm.removed(name) == ret
with patch.dict(npm.__opts__, {"test": True}):
comt = "Package 'coffee-script' is set to be removed"
ret.update({"comment": comt, "result": None})
assert npm.removed(name) == ret
with patch.dict(npm.__opts__, {"test": False}):
comt = "Package 'coffee-script' was successfully removed"
ret.update({"comment": comt, "result": True, "changes": {name: "Removed"}})
assert npm.removed(name) == ret
def test_bootstrap():
"""
Test to bootstraps a node.js application.
"""
name = "coffee-script"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock_err = MagicMock(side_effect=[CommandExecutionError, False, True])
with patch.dict(npm.__salt__, {"npm.install": mock_err}):
comt = "Error Bootstrapping 'coffee-script': "
ret.update({"comment": comt})
assert npm.bootstrap(name) == ret
comt = "Directory is already bootstrapped"
ret.update({"comment": comt, "result": True})
assert npm.bootstrap(name) == ret
comt = "Directory was successfully bootstrapped"
ret.update({"comment": comt, "result": True, "changes": {name: "Bootstrapped"}})
assert npm.bootstrap(name) == ret
def test_cache_cleaned():
"""
Test to verify that the npm cache is cleaned.
"""
name = "coffee-script"
pkg_ret = {"name": name, "result": False, "comment": "", "changes": {}}
ret = {"name": None, "result": False, "comment": "", "changes": {}}
mock_list = MagicMock(return_value=["~/.npm", "~/.npm/{}/".format(name)])
mock_cache_clean_success = MagicMock(return_value=True)
mock_cache_clean_failure = MagicMock(return_value=False)
mock_err = MagicMock(side_effect=CommandExecutionError)
with patch.dict(npm.__salt__, {"npm.cache_list": mock_err}):
comt = "Error looking up cached packages: "
ret.update({"comment": comt})
assert npm.cache_cleaned() == ret
with patch.dict(npm.__salt__, {"npm.cache_list": mock_err}):
comt = "Error looking up cached {}: ".format(name)
pkg_ret.update({"comment": comt})
assert npm.cache_cleaned(name) == pkg_ret
mock_data = {"npm.cache_list": mock_list, "npm.cache_clean": MagicMock()}
with patch.dict(npm.__salt__, mock_data):
non_cached_pkg = "salt"
comt = "Package {} is not in the cache".format(non_cached_pkg)
pkg_ret.update({"name": non_cached_pkg, "result": True, "comment": comt})
assert npm.cache_cleaned(non_cached_pkg) == pkg_ret
pkg_ret.update({"name": name})
with patch.dict(npm.__opts__, {"test": True}):
comt = "Cached packages set to be removed"
ret.update({"result": None, "comment": comt})
assert npm.cache_cleaned() == ret
with patch.dict(npm.__opts__, {"test": True}):
comt = "Cached {} set to be removed".format(name)
pkg_ret.update({"result": None, "comment": comt})
assert npm.cache_cleaned(name) == pkg_ret
with patch.dict(npm.__opts__, {"test": False}):
comt = "Cached packages successfully removed"
ret.update(
{"result": True, "comment": comt, "changes": {"cache": "Removed"}}
)
assert npm.cache_cleaned() == ret
with patch.dict(npm.__opts__, {"test": False}):
comt = "Cached {} successfully removed".format(name)
pkg_ret.update(
{"result": True, "comment": comt, "changes": {name: "Removed"}}
)
assert npm.cache_cleaned(name) == pkg_ret
mock_data = {
"npm.cache_list": mock_list,
"npm.cache_clean": MagicMock(return_value=False),
}
with patch.dict(npm.__salt__, mock_data):
with patch.dict(npm.__opts__, {"test": False}):
comt = "Error cleaning cached packages"
ret.update({"result": False, "comment": comt})
ret["changes"] = {}
assert npm.cache_cleaned() == ret
with patch.dict(npm.__opts__, {"test": False}):
comt = "Error cleaning cached {}".format(name)
pkg_ret.update({"result": False, "comment": comt})
pkg_ret["changes"] = {}
assert npm.cache_cleaned(name) == pkg_ret
|
2,774 |
doit
|
"""Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except IOError:
r = random.random
result = [r() for i in xrange(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except os.error:
pass
except IOError, msg:
print "can't write", fn, ":", msg
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def METHOD_NAME(L):
t0 = time.clock()
L.sort()
t1 = time.clock()
print "%6.2f" % (t1-t0),
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print fmt % (("i", "2**i") + cases)
for i in r:
n = 1 << i
L = randfloats(n)
print "%2d %7d" % (i, n),
flush()
METHOD_NAME(L) # *sort
L.reverse()
METHOD_NAME(L) # \sort
METHOD_NAME(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
METHOD_NAME(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
METHOD_NAME(L) # +sort
# Replace 1% of the elements at random.
for dummy in xrange(n // 100):
L[random.randrange(n)] = random.random()
METHOD_NAME(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = map(lambda x: --x, L)
METHOD_NAME(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = map(abs, [-0.5] * n)
METHOD_NAME(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = range(half - 1, -1, -1)
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = map(float, L)
METHOD_NAME(L) # !sort
print
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
|
2,775 |
dereference
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, ClassVar, List, Optional, Union
import attrs
from .. import errors, settings
from ..exception import FrictionlessException
from ..metadata import Metadata
from .dataset import Dataset
from .factory import Factory
if TYPE_CHECKING:
from .. import types
from ..dialect import Control
@attrs.define(kw_only=True, repr=False)
class Catalog(Metadata, metaclass=Factory):
"""Catalog representation"""
source: Optional[Any] = attrs.field(default=None, kw_only=False)
"""
# TODO: add docs
"""
control: Optional[Control] = None
"""
# TODO: add docs
"""
name: Optional[str] = None
"""
A short url-usable (and preferably human-readable) name.
This MUST be lower-case and contain only alphanumeric characters
along with “.”, “_” or “-” characters.
"""
type: ClassVar[Union[str, None]] = None
"""
Type of the object
"""
title: Optional[str] = None
"""
A Catalog title according to the specs. It should be a
human-oriented title of the resource.
"""
description: Optional[str] = None
"""
A Catalog description according to the specs. It should be a
human-oriented description of the resource.
"""
datasets: List[Dataset] = attrs.field(factory=list)
"""
A list of datasets. Each package in the list is a Data Dataset.
"""
basepath: Optional[str] = None
"""
A basepath of the catalog. The normpath of the resource is joined
`basepath` and `/path`
"""
def __attrs_post_init__(self):
for dataset in self.datasets:
dataset.catalog = self
dataset.package.dataset = dataset
super().__attrs_post_init__()
# Datasets
@property
def dataset_names(self) -> List[str]:
"""Return names of datasets"""
return [dataset.name for dataset in self.datasets]
def add_dataset(self, dataset: Union[Dataset, str]) -> Dataset:
"""Add new dataset to the catalog"""
if isinstance(dataset, str):
dataset = Dataset.from_descriptor(dataset, basepath=self.basepath)
self.datasets.append(dataset)
dataset.catalog = self
return dataset
def has_dataset(self, name: str) -> bool:
"""Check if a dataset is present"""
for dataset in self.datasets:
if dataset.name == name:
return True
return False
def get_dataset(self, name: str) -> Dataset:
"""Get dataset by name"""
for dataset in self.datasets:
if dataset.name == name:
return dataset
error = errors.CatalogError(note=f'dataset "{name}" does not exist')
raise FrictionlessException(error)
def set_dataset(self, dataset: Dataset) -> Optional[Dataset]:
"""Set dataset by name"""
assert dataset.name
if self.has_dataset(dataset.name):
prev_dataset = self.get_dataset(dataset.name)
index = self.datasets.index(prev_dataset)
self.datasets[index] = dataset
dataset.dataset = self
return prev_dataset
self.add_dataset(dataset)
def remove_dataset(self, name: str) -> Dataset:
"""Remove dataset by name"""
dataset = self.get_dataset(name)
self.datasets.remove(dataset)
return dataset
def clear_datasets(self):
"""Remove all the datasets"""
self.datasets = []
def deduplicate_datasets(self):
if len(self.dataset_names) != len(set(self.dataset_names)):
seen_names: List[str] = []
for index, dataset in enumerate(self.datasets):
name = dataset.name
count = seen_names.count(name) + 1
if count > 1:
self.datasets[index].name = "%s%s" % (name, count)
seen_names.append(name)
# Infer
def infer(self, *, stats: bool = False):
"""Infer catalog's metadata
Parameters:
stats? (bool): stream files completely and infer stats
"""
for dataset in self.datasets:
dataset.infer(stats=stats)
# Dereference
def METHOD_NAME(self):
"""Dereference underlaying metadata
If some of underlaying metadata is provided as a string
it will replace it by the metadata object
"""
for dataset in self.datasets:
dataset.METHOD_NAME()
# Convert
def to_copy(self, **options: Any):
"""Create a copy of the catalog"""
return super().to_copy(
basepath=self.basepath,
**options,
)
# Metadata
metadata_type = "catalog"
metadata_Error = errors.CatalogError
metadata_profile = {
"type": "object",
"required": ["datasets"],
"properties": {
"name": {"type": "string", "pattern": settings.NAME_PATTERN},
"type": {"type": "string", "pattern": settings.TYPE_PATTERN},
"title": {"type": "string"},
"description": {"type": "string"},
"datasets": {
"type": "array",
"items": {"type": "object"},
},
},
}
@classmethod
def metadata_select_property_class(cls, name: str):
if name == "datasets":
return Dataset
@classmethod
def metadata_import(cls, descriptor: types.IDescriptor, **options: Any): # type: ignore
return super().metadata_import(
descriptor=descriptor,
with_basepath=True,
**options,
)
|
2,776 |
remote setup
|
# coding=utf-8
"""flask views that deal with user authentication."""
import json
import logging
import flask_login
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask.blueprints import Blueprint
from mycodo.databases.models import DisplayOrder
from mycodo.databases.models import Input
from mycodo.databases.models import Remote
from mycodo.mycodo_flask.forms import forms_authentication
from mycodo.mycodo_flask.routes_static import inject_variables
from mycodo.mycodo_flask.utils import utils_general
from mycodo.mycodo_flask.utils import utils_remote_host
from mycodo.mycodo_flask.utils.utils_remote_host import remote_host_page
from mycodo.mycodo_flask.utils.utils_remote_host import remote_log_in
blueprint = Blueprint(
'routes_remote_admin',
__name__,
static_folder='../static',
template_folder='../templates'
)
logger = logging.getLogger(__name__)
@blueprint.context_processor
@flask_login.login_required
def inject_dictionary():
return inject_variables()
@blueprint.route('/remote/input', methods=('GET', 'POST'))
@flask_login.login_required
def remote_input():
"""Returns input information for remote administration."""
if not utils_general.user_has_permission('edit_settings'):
return redirect(url_for('routes_general.home'))
remote_hosts = Remote.query.all()
display_order_unsplit = DisplayOrder.query.first().remote_host
if display_order_unsplit:
display_order = display_order_unsplit.split(",")
else:
display_order = []
host_auth = {}
host_inputs = {}
for each_host in remote_hosts:
# Return input information about each host
headers = remote_log_in(
each_host.host, each_host.username, each_host.password_hash)
_, host_inputs[each_host.host] = remote_host_page(
each_host.host, headers, 'remote_get_inputs')
host_inputs[each_host.host] = json.loads(host_inputs[each_host.host])
return render_template('remote/input.html',
display_order=display_order,
remote_hosts=remote_hosts,
host_auth=host_auth,
host_inputs=host_inputs)
@blueprint.route('/remote/setup', methods=('GET', 'POST'))
@flask_login.login_required
def METHOD_NAME():
"""Return pages for remote administration."""
if not utils_general.user_has_permission('edit_settings'):
return redirect(url_for('routes_general.home'))
remote_hosts = Remote.query.all()
display_order_unsplit = DisplayOrder.query.first().remote_host
if display_order_unsplit:
display_order = display_order_unsplit.split(",")
else:
display_order = []
form_setup = forms_authentication.RemoteSetup()
if request.method == 'POST':
if form_setup.add.data:
utils_remote_host.remote_host_add(form_setup,
display_order)
elif form_setup.delete.data:
utils_remote_host.remote_host_del(form_setup)
return redirect('/remote/setup')
host_auth = {}
for each_host in remote_hosts:
headers = remote_log_in(
each_host.host, each_host.username, each_host.password_hash)
_, host_auth[each_host.host] = remote_host_page(
each_host.host, headers, 'auth')
return render_template('remote/setup.html',
form_setup=form_setup,
display_order=display_order,
remote_hosts=remote_hosts,
host_auth=host_auth)
@blueprint.route('/remote_get_inputs/')
@flask_login.login_required
def remote_get_inputs():
"""Checks authentication for remote admin."""
inputs = Input.query.all()
return_inputs = {}
for each_input in inputs:
return_inputs[each_input.id] = {}
return_inputs[each_input.id]['name'] = each_input.name
return_inputs[each_input.id]['device'] = each_input.device
return_inputs[each_input.id]['is_activated'] = each_input.is_activated
return jsonify(return_inputs)
|
2,777 |
fill cache
|
# Copyright (C) 2011 by Calvin Spealman ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Other authors:
# Calvin Spealman [email protected] @ironfroggy http://github.com/ironfroggy
# Dustin Lacewell [email protected] @dustinlacewell http://github.com/dustinlacewell
# Erik Youngren [email protected] http://artanis00.blogspot.com/ http://github.com/Artanis
# From: https://github.com/ironfroggy/straight.plugin
# Commit: be4c0113629557e02602f9720adb07634eb9d274
# Only ModuleLoader is here since we only care about it.
"""Facility to load plugins."""
from __future__ import print_function
import sys
import os
from importlib import import_module
from kwiver.vital import vital_logging
logger = vital_logging.getLogger(__name__)
class Loader(object):
def __init__(self, *args, **kwargs):
self._cache = []
def load(self, namespace):
self.METHOD_NAME(namespace)
self._post_fill()
self._order()
return self._cache
def _meta(self, plugin):
meta = getattr(plugin, '__plugin__', None)
return meta
def _post_fill(self):
for plugin in self._cache:
meta = self._meta(plugin)
if not getattr(meta, 'load', True):
self._cache.remove(plugin)
for implied_namespace in getattr(meta, 'imply_plugins', []):
plugins = self._cache
self._cache = self.load(implied_namespace)
self._post_fill()
self._cache = plugins + self._cache
def _order(self):
self._cache.sort(key=self._plugin_priority, reverse=True)
def _plugin_priority(self, plugin):
meta = self._meta(plugin)
return getattr(meta, 'priority', 0.0)
class ModuleLoader(Loader):
"""Performs the work of locating and loading straight plugins.
This looks for plugins in every location in the import path.
"""
def _isPackage(self, path):
pkg_init = os.path.join(path, '__init__.py')
if os.path.exists(pkg_init):
return True
return False
def _findPluginFilePaths(self, namespace):
"""
Searches for modules in `namespace` that are reachable from the paths
defined in the `PYTHONPATH` environment variable.
Args:
namespace (str): the importable name of a python module or package
Yields:
str: mod_rel_path - the paths (relative to PYTHONPATH) of
the modules in the namespace.
"""
already_seen = set()
py_exts = ['.py', '.pyc', '.pyo']
for ext in py_exts:
if namespace.endswith(ext):
logger.warn(('do not specify .py extension for the {} '
'sprokit python module').format(namespace))
namespace = namespace[:-len(ext)]
namespace_rel_path = namespace.replace('.', os.path.sep)
# Look in each location in the path
for path in sys.path:
# Within this, we want to look for a package for the namespace
namespace_path = os.path.join(path, namespace_rel_path)
if os.path.isdir(namespace_path):
# Find all top-level modules in the namespace package
for possible in os.listdir(namespace_path):
poss_path = os.path.join(namespace_path, possible)
if os.path.isdir(poss_path):
if not self._isPackage(poss_path):
continue
base = possible
else:
base, ext = os.path.splitext(possible)
if base == '__init__' or ext != '.py':
continue
if base not in already_seen:
already_seen.add(base)
mod_rel_path = os.path.join(namespace_rel_path,
possible)
yield mod_rel_path
else:
# namespace was not a package, check if it was a pyfile
base = namespace_path
if base not in already_seen:
for ext in py_exts:
mod_fpath = base + ext
if os.path.isfile(mod_fpath):
already_seen.add(base)
mod_rel_path = namespace_rel_path + ext
yield mod_rel_path
# Dont test remaining pyc / pyo extensions.
break
def _findPluginModules(self, namespace):
for filepath in self._findPluginFilePaths(namespace):
path_segments = list(filepath.split(os.path.sep))
path_segments = [p for p in path_segments if p]
path_segments[-1] = os.path.splitext(path_segments[-1])[0]
module_name = '.'.join(path_segments)
try:
module = import_module(module_name)
except ImportError as e:
logger.warn('Could not import: {}, Reason: {}'.format(module_name, e))
import traceback
exc_info = sys.exc_info()
tbtext = ''.join(traceback.format_exception(*exc_info))
logger.debug(tbtext)
module = None
if module is not None:
yield module
def METHOD_NAME(self, namespace):
"""Load all modules found in a namespace"""
modules = self._findPluginModules(namespace)
self._cache = list(modules)
|
2,778 |
test urlfile iterable
|
import io
import pickle
import pytest
import responses
from cog.types import URLFile, get_filename
@responses.activate
def test_urlfile_acts_like_response():
responses.get(
"https://example.com/some/url",
json={"message": "hello world"},
status=200,
)
u = URLFile("https://example.com/some/url")
assert isinstance(u, io.IOBase)
assert u.read() == b'{"message": "hello world"}'
@responses.activate
def METHOD_NAME():
responses.get(
"https://example.com/some/url",
body="one\ntwo\nthree\n",
status=200,
)
u = URLFile("https://example.com/some/url")
result = list(u)
assert result == [b"one\n", b"two\n", b"three\n"]
@responses.activate
def test_urlfile_no_request_if_not_used():
# This test would be failed by responses if the request were actually made,
# as we've not registered the handler for it.
URLFile("https://example.com/some/url")
@responses.activate
def test_urlfile_can_be_pickled():
u = URLFile("https://example.com/some/url")
result = pickle.loads(pickle.dumps(u))
assert isinstance(result, URLFile)
@responses.activate
def test_urlfile_can_be_pickled_even_once_loaded():
responses.get(
"https://example.com/some/url",
json={"message": "hello world"},
status=200,
)
u = URLFile("https://example.com/some/url")
u.read()
result = pickle.loads(pickle.dumps(u))
assert isinstance(result, URLFile)
@pytest.mark.parametrize(
"url,filename",
[
# Simple URLs
("https://example.com/test", "test"),
("https://example.com/test.jpg", "test.jpg"),
(
"https://example.com/ហ_ត_អ_វ_ប_នជ_ក_រស_គតរបស_ព_រ_យ_ស_ម_នអ_ណ_ចម_ល_Why_Was_The_Death_Of_Jesus_So_Powerful_.m4a",
"ហ_ត_អ_វ_ប_នជ_ក_រស_គតរបស_ព_រ_យ_ស_ម_នអ_ណ_ចម_ល_Why_Was_The_Death_Of_Jesus_So_Powerful_.m4a",
),
# Data URIs
(
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==",
"file.png",
),
(
"data:text/plain,hello world",
"file.txt",
),
(
"data:application/data;base64,aGVsbG8gd29ybGQ=",
"file",
),
# URL-encoded filenames
(
"https://example.com/thing+with+spaces.m4a",
"thing with spaces.m4a",
),
(
"https://example.com/thing%20with%20spaces.m4a",
"thing with spaces.m4a",
),
(
"https://example.com/%E1%9E%A0_%E1%9E%8F_%E1%9E%A2_%E1%9E%9C_%E1%9E%94_%E1%9E%93%E1%9E%87_%E1%9E%80_%E1%9E%9A%E1%9E%9F_%E1%9E%82%E1%9E%8F%E1%9E%9A%E1%9E%94%E1%9E%9F_%E1%9E%96_%E1%9E%9A_%E1%9E%99_%E1%9E%9F_%E1%9E%98_%E1%9E%93%E1%9E%A2_%E1%9E%8E_%E1%9E%85%E1%9E%98_%E1%9E%9B_Why_Was_The_Death_Of_Jesus_So_Powerful_.m4a",
"ហ_ត_អ_វ_ប_នជ_ក_រស_គតរបស_ព_រ_យ_ស_ម_នអ_ណ_ចម_ល_Why_Was_The_Death_Of_Jesus_So_Powerful_.m4a",
),
# Illegal characters
("https://example.com/nulbytes\u0000.wav", "nulbytes_.wav"),
("https://example.com/nulbytes%00.wav", "nulbytes_.wav"),
("https://example.com/path%2Ftraversal.dat", "path_traversal.dat"),
# Long filenames
(
"https://example.com/some/path/Biden_Trump_sows_chaos_makes_things_worse_U_S_hits_more_than_six_million_COVID_cases_WAPO_Trump_health_advisor_is_pushing_herd_immunity_strategy_despite_warnings_from_Fauci_medical_officials_Biden_says_he_hopes_to_be_able_to_visit_Wisconsin_as_governor_tells_Trump_to_stay_home_.mp3",
"Biden_Trump_sows_chaos_makes_things_worse_U_S_hits_more_than_six_million_COVID_cases_WAPO_Trump_health_advisor_is_pushing_herd_immunity_strategy_despite_warnings_from_Fauci_medical_officials_Bide~.mp3",
),
(
"https://coppermerchants.example/complaints/𒀀𒈾𒂍𒀀𒈾𒍢𒅕𒆠𒉈𒈠𒌝𒈠𒈾𒀭𒉌𒈠𒀀𒉡𒌑𒈠𒋫𒀠𒇷𒆪𒆠𒀀𒄠𒋫𒀝𒁉𒄠𒌝𒈠𒀜𒋫𒀀𒈠𒄖𒁀𒊑𒁕𒄠𒆪𒁴𒀀𒈾𒄀𒅖𒀭𒂗𒍪𒀀𒈾𒀜𒁲𒅔𒋫𒀠𒇷𒅅𒈠𒋫𒀝𒁉𒀀𒄠.tablet",
"𒀀𒈾𒂍𒀀𒈾𒍢𒅕𒆠𒉈𒈠𒌝𒈠𒈾𒀭𒉌𒈠𒀀𒉡𒌑𒈠𒋫𒀠𒇷𒆪𒆠𒀀𒄠𒋫𒀝𒁉𒄠𒌝𒈠𒀜𒋫𒀀𒈠𒄖𒁀𒊑𒁕𒄠𒆪𒁴𒀀𒈾𒄀𒅖~.tablet",
),
],
)
def test_get_filename(url, filename):
assert get_filename(url) == filename
|
2,779 |
token
|
from mage_ai.api.errors import ApiError
from mage_ai.api.resources.DatabaseResource import DatabaseResource
from mage_ai.api.utils import get_access_for_roles
from mage_ai.authentication.oauth2 import encode_token, generate_access_token
from mage_ai.authentication.passwords import (
create_bcrypt_hash,
generate_salt,
verify_password,
)
from mage_ai.data_preparation.repo_manager import get_project_uuid
from mage_ai.orchestration.constants import Entity
from mage_ai.orchestration.db import safe_db_query
from mage_ai.orchestration.db.models.oauth import Permission, Role, User
from mage_ai.shared.hash import extract, ignore_keys
from mage_ai.usage_statistics.logger import UsageStatisticLogger
class UserResource(DatabaseResource):
model_class = User
def __init__(self, model, current_user, **kwargs):
super().__init__(model, current_user, **kwargs)
self.group = None
@classmethod
@safe_db_query
def collection(self, query_arg, meta, user, **kwargs):
results = (
User.
query.
order_by(User.username.asc())
)
if user and user.is_admin:
results = list(filter(lambda user: user.project_access & 3 == 0, results))
return results
@classmethod
@safe_db_query
async def create(self, payload, user, **kwargs):
email = payload.get('email')
password = payload.get('password')
password_confirmation = payload.get('password_confirmation')
username = payload.get('username')
error = ApiError.RESOURCE_INVALID.copy()
role_ids = payload.get('roles_new', [])
roles_new = self.check_roles(role_ids)
payload['roles_new'] = roles_new
missing_values = []
for key in ['email', 'password']:
if not payload.get(key):
missing_values.append(key)
if len(missing_values) >= 1:
error.update(
{'message': 'Missing required values: {}.'.format(', '.join(missing_values))})
raise ApiError(error)
if email:
user = User.query.filter(User.email == email).first()
if user:
error.update(
{'message': f'Account with same email is already taken: {email}.'})
raise ApiError(error)
if username:
user = User.query.filter(User.username == username).first()
if user:
error.update(
{'message': f'Account with same username is already taken: {username}.'})
raise ApiError(error)
if len(password) < 8:
error.update(
{'message': 'Password must be 8 characters or longer.'})
raise ApiError(error)
if password != password_confirmation:
error.update(
{'message': 'Password and password confirmation do not match.'})
raise ApiError(error)
password_salt = generate_salt()
payload['email'] = email
payload['password_hash'] = create_bcrypt_hash(password, password_salt)
payload['password_salt'] = password_salt
resource = super().create(extract(payload, [
'avatar',
'email',
'first_name',
'last_name',
'password_hash',
'password_salt',
'roles',
'roles_new',
'username',
]), user, **kwargs)
if 'oauth_client' in kwargs:
oauth_token = generate_access_token(
resource.model, kwargs['oauth_client'])
resource.model_options['oauth_token'] = oauth_token
async def _create_callback(resource):
await UsageStatisticLogger().users_impression()
self.on_create_callback = _create_callback
return resource
@safe_db_query
def update(self, payload, **kwargs):
error = ApiError.RESOURCE_INVALID.copy()
if 'roles_new' in payload:
role_ids = payload.get('roles_new', [])
roles_new = self.check_roles(role_ids)
payload['roles_new'] = roles_new
access = get_access_for_roles(
roles_new,
Entity.PROJECT,
get_project_uuid(),
)
if self.current_user.is_admin:
if self.owner:
error.update(
{'message': 'Admins cannot update users who are Owners.'})
raise ApiError(error)
elif self.is_admin and self.current_user.id != self.id:
error.update(
{'message': 'Admins cannot update users who are Admins.'})
raise ApiError(error)
elif access & Permission.Access.ADMIN != 0:
error.update(
{'message': 'Admins cannot make other users Admins.'})
raise ApiError(error)
elif access & Permission.Access.OWNER != 0:
error.update(
{'message': 'Admins cannot make other users Owners.'})
raise ApiError(error)
password = payload.get('password')
if password:
password_current = payload.get('password_current')
password_confirmation = payload.get('password_confirmation')
if self.current_user.id == self.id or \
(not self.current_user.owner and self.current_user.roles & 1 == 0):
if not password_current or not verify_password(
password_current,
self.password_hash,
):
error.update(
{'message': 'Current password is incorrect.'})
raise ApiError(error)
if len(password) < 8:
error.update(
{'message': 'Password must be 8 characters or longer.'})
raise ApiError(error)
if password != password_confirmation:
error.update(
{'message': 'Password and password confirmation do not match.'})
raise ApiError(error)
password_salt = generate_salt()
payload['password_hash'] = create_bcrypt_hash(password, password_salt)
payload['password_salt'] = password_salt
return super().update(ignore_keys(payload, [
'password',
'password_confirmation',
'password_current',
'owner',
'project_access',
'roles_display',
]), **kwargs)
@safe_db_query
def METHOD_NAME(self):
oauth_token = self.model_options.get('oauth_token')
if oauth_token:
return encode_token(oauth_token.METHOD_NAME, oauth_token.expires)
@classmethod
@safe_db_query
def check_roles(self, role_ids):
missing_ids = []
roles_new = []
for role_id in role_ids:
role = Role.query.get(int(role_id))
if role is None:
missing_ids.append(role_id)
else:
roles_new.append(role)
if len(missing_ids) > 0:
error = ApiError.RESOURCE_INVALID.copy()
error.update(
{'message': f'Roles with ids: {missing_ids} do not exist'})
raise ApiError(error)
return roles_new
|
2,780 |
get presences for realm
|
import datetime
import time
from collections import defaultdict
from typing import Any, Dict, Mapping, Optional, Sequence, Set
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import PushDeviceToken, Realm, UserPresence, UserProfile, query_for_ids
def get_presence_dicts_for_rows(
all_rows: Sequence[Mapping[str, Any]], mobile_user_ids: Set[int], slim_presence: bool
) -> Dict[str, Dict[str, Any]]:
if slim_presence:
# Stringify user_id here, since it's gonna be turned
# into a string anyway by JSON, and it keeps mypy happy.
get_user_key = lambda row: str(row["user_profile_id"])
get_user_presence_info = get_modern_user_presence_info
else:
get_user_key = lambda row: row["user_profile__email"]
get_user_presence_info = get_legacy_user_presence_info
user_statuses: Dict[str, Dict[str, Any]] = {}
for presence_row in all_rows:
user_key = get_user_key(presence_row)
last_active_time = user_presence_datetime_with_date_joined_default(
presence_row["last_active_time"], presence_row["user_profile__date_joined"]
)
last_connected_time = user_presence_datetime_with_date_joined_default(
presence_row["last_connected_time"], presence_row["user_profile__date_joined"]
)
info = get_user_presence_info(
last_active_time,
last_connected_time,
)
user_statuses[user_key] = info
return user_statuses
def user_presence_datetime_with_date_joined_default(
dt: Optional[datetime.datetime], date_joined: datetime.datetime
) -> datetime.datetime:
"""
Our data models support UserPresence objects not having None
values for last_active_time/last_connected_time. The legacy API
however has always sent timestamps, so for backward
compatibility we cannot send such values through the API and need
to default to a sane datetime.
This helper functions expects to take a last_active_time or
last_connected_time value and the date_joined of the user, which
will serve as the default value if the first argument is None.
"""
if dt is None:
return date_joined
return dt
def get_modern_user_presence_info(
last_active_time: datetime.datetime, last_connected_time: datetime.datetime
) -> Dict[str, Any]:
# TODO: Do further bandwidth optimizations to this structure.
result = {}
result["active_timestamp"] = datetime_to_timestamp(last_active_time)
result["idle_timestamp"] = datetime_to_timestamp(last_connected_time)
return result
def get_legacy_user_presence_info(
last_active_time: datetime.datetime, last_connected_time: datetime.datetime
) -> Dict[str, Any]:
"""
Reformats the modern UserPresence data structure so that legacy
API clients can still access presence data.
We expect this code to remain mostly unchanged until we can delete it.
"""
# Now we put things together in the legacy presence format with
# one client + an `aggregated` field.
#
# TODO: Look at whether we can drop to just the "aggregated" field
# if no clients look at the rest.
most_recent_info = format_legacy_presence_dict(last_active_time, last_connected_time)
result = {}
# The word "aggregated" here is possibly misleading.
# It's really just the most recent client's info.
result["aggregated"] = dict(
client=most_recent_info["client"],
status=most_recent_info["status"],
timestamp=most_recent_info["timestamp"],
)
result["website"] = most_recent_info
return result
def format_legacy_presence_dict(
last_active_time: datetime.datetime, last_connected_time: datetime.datetime
) -> Dict[str, Any]:
"""
This function assumes it's being called right after the presence object was updated,
and is not meant to be used on old presence data.
"""
if (
last_active_time
+ datetime.timedelta(seconds=settings.PRESENCE_LEGACY_EVENT_OFFSET_FOR_ACTIVITY_SECONDS)
>= last_connected_time
):
status = UserPresence.LEGACY_STATUS_ACTIVE
timestamp = datetime_to_timestamp(last_active_time)
else:
status = UserPresence.LEGACY_STATUS_IDLE
timestamp = datetime_to_timestamp(last_connected_time)
# This field was never used by clients of the legacy API, so we
# just set it to a fixed value for API format compatibility.
pushable = False
return dict(client="website", status=status, timestamp=timestamp, pushable=pushable)
def get_presence_for_user(
user_profile_id: int, slim_presence: bool = False
) -> Dict[str, Dict[str, Any]]:
query = UserPresence.objects.filter(user_profile_id=user_profile_id).values(
"last_active_time",
"last_connected_time",
"user_profile__email",
"user_profile_id",
"user_profile__enable_offline_push_notifications",
"user_profile__date_joined",
)
presence_rows = list(query)
mobile_user_ids: Set[int] = set()
if PushDeviceToken.objects.filter(user_id=user_profile_id).exists(): # nocoverage
# TODO: Add a test, though this is low priority, since we don't use mobile_user_ids yet.
mobile_user_ids.add(user_profile_id)
return get_presence_dicts_for_rows(presence_rows, mobile_user_ids, slim_presence)
def get_presence_dict_by_realm(
realm_id: int, slim_presence: bool = False
) -> Dict[str, Dict[str, Any]]:
two_weeks_ago = timezone_now() - datetime.timedelta(weeks=2)
query = UserPresence.objects.filter(
realm_id=realm_id,
last_connected_time__gte=two_weeks_ago,
user_profile__is_active=True,
user_profile__is_bot=False,
).values(
"last_active_time",
"last_connected_time",
"user_profile__email",
"user_profile_id",
"user_profile__enable_offline_push_notifications",
"user_profile__date_joined",
)
presence_rows = list(query)
mobile_query = PushDeviceToken.objects.distinct("user_id").values_list(
"user_id",
flat=True,
)
user_profile_ids = [presence_row["user_profile_id"] for presence_row in presence_rows]
if len(user_profile_ids) == 0:
# This conditional is necessary because query_for_ids
# throws an exception if passed an empty list.
#
# It's not clear this condition is actually possible,
# though, because it shouldn't be possible to end up with
# a realm with 0 active users.
return {}
mobile_query_ids = query_for_ids(
query=mobile_query,
user_ids=user_profile_ids,
field="user_id",
)
mobile_user_ids = set(mobile_query_ids)
return get_presence_dicts_for_rows(presence_rows, mobile_user_ids, slim_presence)
def METHOD_NAME(
realm: Realm, slim_presence: bool
) -> Dict[str, Dict[str, Dict[str, Any]]]:
if realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return get_presence_dict_by_realm(realm.id, slim_presence)
def get_presence_response(
requesting_user_profile: UserProfile, slim_presence: bool
) -> Dict[str, Any]:
realm = requesting_user_profile.realm
server_timestamp = time.time()
presences = METHOD_NAME(realm, slim_presence)
return dict(presences=presences, server_timestamp=server_timestamp)
|
2,781 |
should be trimmed
|
from typing import Optional
from robot.api.parsing import CommentSection, EmptyLine, Token
try:
from robot.api.parsing import Config # from RF 6.0
except ImportError:
Config = None
from robotidy.disablers import skip_section_if_disabled
from robotidy.skip import Skip
from robotidy.transformers import Transformer
from robotidy.utils import is_suite_templated
class NormalizeNewLines(Transformer):
"""
Normalize new lines.
Ensure that there is exactly:
- ``section_lines = 1`` empty lines between sections,
- ``test_case_lines = 1`` empty lines between test cases,
- ``keyword_lines = test_case_lines`` empty lines between keywords.
Removes empty lines after section (and before any data) and appends 1 empty line at the end of file.
Consecutive empty lines inside settings, variables, keywords and test cases are also removed
(configurable via ``consecutive_lines = 1``). If set to 0 all empty lines will be removed.
If the suite contains Test Template tests will not be separated by empty lines unless ``separate_templated_tests``
is set to True.
"""
HANDLES_SKIP = frozenset({"skip_sections"})
WHITESPACE_TOKENS = {Token.EOL, Token.SEPARATOR}
def __init__(
self,
test_case_lines: int = 1,
keyword_lines: Optional[int] = None,
section_lines: int = 2,
separate_templated_tests: bool = False,
consecutive_lines: int = 1,
skip: Skip = None,
):
super().__init__(skip)
self.test_case_lines = test_case_lines
self.keyword_lines = keyword_lines if keyword_lines is not None else test_case_lines
self.section_lines = section_lines
self.separate_templated_tests = separate_templated_tests
self.consecutive_lines = consecutive_lines
self.last_section = None
self.last_test = None
self.last_keyword = None
self.templated = False
def visit_File(self, node): # noqa
self.templated = not self.separate_templated_tests and is_suite_templated(node)
self.last_section = node.sections[-1] if node.sections else None
return self.generic_visit(node)
def METHOD_NAME(self, node):
"""
Check whether given section should have empty lines trimmed.
Section should not be trimmed if it contains only language marker and there is no more than
allowed section empty lines.
"""
if not isinstance(node, CommentSection) or not Config:
return True
language_marker_only = False
empty_lines = 0
for statement in node.body:
if isinstance(statement, Config):
language_marker_only = True
elif isinstance(statement, EmptyLine):
empty_lines += 1
if empty_lines > self.section_lines:
return True
else:
return True
return not language_marker_only
@skip_section_if_disabled
def visit_Section(self, node): # noqa
METHOD_NAME = self.METHOD_NAME(node)
if METHOD_NAME:
self.trim_empty_lines(node)
if node is self.last_section:
return self.generic_visit(node)
if METHOD_NAME:
empty_line = EmptyLine.from_params()
node.body.extend([empty_line] * self.section_lines)
return self.generic_visit(node)
def visit_TestCaseSection(self, node): # noqa
self.last_test = node.body[-1] if node.body else None
return self.visit_Section(node)
def visit_KeywordSection(self, node): # noqa
self.last_keyword = node.body[-1] if node.body else None
return self.visit_Section(node)
def visit_TestCase(self, node): # noqa
self.trim_empty_lines(node)
if node is not self.last_test and not self.templated:
node.body.extend([EmptyLine.from_params()] * self.test_case_lines)
return self.generic_visit(node)
def visit_Keyword(self, node): # noqa
self.trim_empty_lines(node)
if node is not self.last_keyword:
node.body.extend([EmptyLine.from_params()] * self.keyword_lines)
return self.generic_visit(node)
def visit_If(self, node): # noqa
self.trim_empty_lines(node)
return self.generic_visit(node)
visit_For = visit_While = visit_Try = visit_If
def visit_Statement(self, node): # noqa
tokens = []
cont = node.get_token(Token.CONTINUATION)
for line in node.lines:
if cont and all(token.type in self.WHITESPACE_TOKENS for token in line):
continue
if line[-1].type == Token.EOL:
line[-1].value = "\n"
tokens.extend(line)
node.tokens = tokens
return node
def trim_empty_lines(self, node):
self.trim_leading_empty_lines(node)
self.trim_trailing_empty_lines(node)
self.trim_consecutive_empty_lines(node)
@staticmethod
def trim_trailing_empty_lines(node):
if not hasattr(node, "body"):
return
while node.body and isinstance(node.body[-1], EmptyLine):
node.body.pop()
@staticmethod
def trim_leading_empty_lines(node):
while node.body and isinstance(node.body[0], EmptyLine):
node.body.pop(0)
def trim_consecutive_empty_lines(self, node):
empty_count = 0
nodes = []
for child in node.body:
if isinstance(child, EmptyLine):
empty_count += 1
else:
empty_count = 0
if empty_count <= self.consecutive_lines:
nodes.append(child)
node.body = nodes
|
2,782 |
test build gecos field
|
"""
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.modules.useradd as useradd
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
class UserAddTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.useradd
"""
def setup_loader_modules(self):
return {useradd: {}}
@classmethod
def setUpClass(cls):
cls.mock_pwall = {
"gid": 0,
"groups": ["root"],
"home": "/root",
"name": "root",
"passwd": "x",
"shell": "/bin/bash",
"uid": 0,
"fullname": "root",
"roomnumber": "",
"workphone": "",
"homephone": "",
"other": "",
}
@classmethod
def tearDownClass(cls):
del cls.mock_pwall
# 'getent' function tests: 2
@pytest.mark.skipif(HAS_PWD is False, reason="The pwd module is not available")
def test_getent(self):
"""
Test if user.getent already have a value
"""
with patch("salt.modules.useradd.__context__", MagicMock(return_value="Salt")):
self.assertTrue(useradd.getent())
@pytest.mark.skipif(HAS_PWD is False, reason="The pwd module is not available")
def test_getent_user(self):
"""
Tests the return information on all users
"""
with patch("pwd.getpwall", MagicMock(return_value=[""])):
ret = [
{
"gid": 0,
"groups": ["root"],
"home": "/root",
"name": "root",
"passwd": "x",
"shell": "/bin/bash",
"uid": 0,
"fullname": "root",
"roomnumber": "",
"workphone": "",
"homephone": "",
"other": "",
}
]
with patch(
"salt.modules.useradd._format_info",
MagicMock(return_value=self.mock_pwall),
):
self.assertEqual(useradd.getent(), ret)
# 'info' function tests: 1
@pytest.mark.skipif(HAS_PWD is False, reason="The pwd module is not available")
def test_info(self):
"""
Test the user information
"""
self.assertEqual(useradd.info("username-that-does-not-exist"), {})
mock = MagicMock(
return_value=pwd.struct_passwd(
(
"_TEST_GROUP",
"*",
83,
83,
"AMaViS Daemon",
"/var/virusmails",
"/usr/bin/false",
)
)
)
with patch.object(pwd, "getpwnam", mock):
self.assertEqual(
useradd.info("username-that-does-not-exist")["name"], "_TEST_GROUP"
)
# 'list_groups' function tests: 1
def test_list_groups(self):
"""
Test if it return a list of groups the named user belongs to
"""
with patch("salt.utils.user.get_group_list", MagicMock(return_value="Salt")):
self.assertEqual(useradd.list_groups("name"), "Salt")
# 'list_users' function tests: 1
@pytest.mark.skipif(HAS_PWD is False, reason="The pwd module is not available")
def test_list_users(self):
"""
Test if it returns a list of all users
"""
self.assertTrue(useradd.list_users())
def METHOD_NAME(self):
"""
Test if gecos fields are built correctly (removing trailing commas)
"""
test_gecos = {
"fullname": "Testing",
"roomnumber": 1234,
"workphone": 22222,
"homephone": 99999,
}
expected_gecos_fields = "Testing,1234,22222,99999"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop("roomnumber")
test_gecos.pop("workphone")
expected_gecos_fields = "Testing,,,99999"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop("homephone")
expected_gecos_fields = "Testing"
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
|
2,783 |
add image
|
"""
DICOMSeries outlines an instance of the DICOM series that can be found within a study
"""
from PySide6.QtCore import Qt
from src.Model.DICOM.DICOMWidgetItem import DICOMWidgetItem
class Series:
"""Holds a set of images within an individual series within a DICOM study"""
def __init__(self, series_uid):
"""
images: A dictionary of Image objects.
:param series_uid: SeriesInstanceUID in DICOM standard.
"""
self.series_uid = series_uid
self.series_description = None
self.images = {}
self.frame_of_reference_uid = ""
def METHOD_NAME(self, image):
"""
Adds an Image object to the patient's dictionary of images.
:param image: An Image object.
"""
self.images[image.image_uid] = image
def add_referenced_objects(self, dicom_file):
"""Adds referenced dicom file objects to Series"""
if "FrameOfReferenceUID" in dicom_file:
self.frame_of_reference_uid = dicom_file.FrameOfReferenceUID
if dicom_file.Modality == "RTSTRUCT":
self.add_referenced_image_series(dicom_file)
elif dicom_file.Modality == "RTPLAN":
self.add_referenced_rtstruct(dicom_file)
elif dicom_file.Modality == "RTDOSE":
self.add_referenced_rtstruct(dicom_file)
self.add_referenced_rtplan(dicom_file)
elif dicom_file.Modality == "SR":
self.referenced_frame_of_reference_uid = \
dicom_file.ReferencedFrameOfReferenceUID
def add_referenced_image_series(self, dicom_file):
"""adds referenced images to series """
if "ReferencedFrameOfReferenceSequence" in dicom_file:
ref_frame = dicom_file.ReferencedFrameOfReferenceSequence
if "RTReferencedStudySequence" in ref_frame[0]:
ref_study = ref_frame[0].RTReferencedStudySequence[0]
if "RTReferencedSeriesSequence" in ref_study:
if "SeriesInstanceUID" in \
ref_study.RTReferencedSeriesSequence[0]:
ref_series = ref_study.RTReferencedSeriesSequence[0]
self.ref_image_series_uid = \
ref_series.SeriesInstanceUID
else:
self.ref_image_series_uid = ''
def add_referenced_rtstruct(self, dicom_file):
"""adds referenced rtstruct to series"""
if "ReferencedStructureSetSequence" in dicom_file:
self.ref_rtstruct_instance_uid = \
dicom_file.ReferencedStructureSetSequence[
0].ReferencedSOPInstanceUID
else:
self.ref_rtstruct_instance_uid = ''
def add_referenced_rtplan(self, dicom_file):
"""adds referenced rtplan to series"""
if "ReferencedRTPlanSequence" in dicom_file:
self.ref_rtplan_instance_uid = \
dicom_file.ReferencedRTPlanSequence[
0].ReferencedSOPInstanceUID
else:
self.ref_rtplan_instance_uid = ''
def has_image(self, image_uid):
"""
:param image_uid: A SOPInstanceUID to check.
:return: True if images contains image_uid.
"""
return image_uid in self.images
def get_image(self, image_uid):
"""
:param image_uid: ImageID to check
:return: Image object if Image found.
"""
if self.get_image(image_uid):
return self.images[image_uid]
return None
def get_files(self):
"""
:return: List of all filepaths in all images below this item in the
hierarchy.
"""
filepaths = []
for image_uid, image in self.images.items():
filepaths += [image.path]
return filepaths
def output_as_text(self):
"""
:return: Information about the object as a string
"""
return f"Series: {self.series_description} " \
f"({self.get_series_type()}, {len(self.images)} images)"
def get_series_type(self):
"""
:return: List of string or single string containing modalities of all
images in the series.
"""
series_types = set()
for image_uid, image in self.images.items():
series_types.add(image.modality)
return series_types if len(series_types) > 1 else series_types.pop()
def get_instance_uid(self):
"""
:return: List of string or single string containing instance uid of all
images in the series.
"""
instance_uid = []
for image_instance_uid, image in self.images.items():
instance_uid.append(image_instance_uid)
return instance_uid if len(instance_uid) > 1 else instance_uid.pop()
def get_widget_item(self):
"""
:return: DICOMWidgetItem to be used in a QTreeWidget.
"""
widget_item = DICOMWidgetItem(self.output_as_text(), self)
widget_item.setFlags(widget_item.flags() | Qt.ItemIsUserCheckable)
widget_item.setCheckState(0, Qt.Unchecked)
return widget_item
|
2,784 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetReportByBillingAccountResult',
'AwaitableGetReportByBillingAccountResult',
'get_report_by_billing_account',
'get_report_by_billing_account_output',
]
@pulumi.output_type
class GetReportByBillingAccountResult:
"""
A report resource.
"""
def __init__(__self__, definition=None, delivery_info=None, format=None, METHOD_NAME=None, name=None, schedule=None, tags=None, type=None):
if definition and not isinstance(definition, dict):
raise TypeError("Expected argument 'definition' to be a dict")
pulumi.set(__self__, "definition", definition)
if delivery_info and not isinstance(delivery_info, dict):
raise TypeError("Expected argument 'delivery_info' to be a dict")
pulumi.set(__self__, "delivery_info", delivery_info)
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if schedule and not isinstance(schedule, dict):
raise TypeError("Expected argument 'schedule' to be a dict")
pulumi.set(__self__, "schedule", schedule)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def definition(self) -> 'outputs.ReportDefinitionResponse':
"""
Has definition for the report.
"""
return pulumi.get(self, "definition")
@property
@pulumi.getter(name="deliveryInfo")
def delivery_info(self) -> 'outputs.ReportDeliveryInfoResponse':
"""
Has delivery information for the report.
"""
return pulumi.get(self, "delivery_info")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
The format of the report being delivered.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def schedule(self) -> Optional['outputs.ReportScheduleResponse']:
"""
Has schedule information for the report.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetReportByBillingAccountResult(GetReportByBillingAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReportByBillingAccountResult(
definition=self.definition,
delivery_info=self.delivery_info,
format=self.format,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
schedule=self.schedule,
tags=self.tags,
type=self.type)
def get_report_by_billing_account(billing_account_id: Optional[str] = None,
report_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReportByBillingAccountResult:
"""
Gets the report for a billing account by report name.
:param str billing_account_id: BillingAccount ID
:param str report_name: Report Name.
"""
__args__ = dict()
__args__['billingAccountId'] = billing_account_id
__args__['reportName'] = report_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:costmanagement/v20180801preview:getReportByBillingAccount', __args__, opts=opts, typ=GetReportByBillingAccountResult).value
return AwaitableGetReportByBillingAccountResult(
definition=pulumi.get(__ret__, 'definition'),
delivery_info=pulumi.get(__ret__, 'delivery_info'),
format=pulumi.get(__ret__, 'format'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
schedule=pulumi.get(__ret__, 'schedule'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_report_by_billing_account)
def get_report_by_billing_account_output(billing_account_id: Optional[pulumi.Input[str]] = None,
report_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReportByBillingAccountResult]:
"""
Gets the report for a billing account by report name.
:param str billing_account_id: BillingAccount ID
:param str report_name: Report Name.
"""
...
|
2,785 |
unpack range
|
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import errors, types
from numba import typeof
from numba.tests.support import TestCase, MemoryLeakMixin, tag
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
force_pyobj_flags = Flags()
force_pyobj_flags.force_pyobject = True
no_pyobj_flags = Flags()
no_pyobj_flags.nrt = True
def unpack_list(l):
a, b, c = l
return (a, b, c)
def unpack_shape(a):
x, y, z = a.shape
return x + y + z
def METHOD_NAME():
a, b, c = range(3)
return a + b + c
def unpack_range_too_small():
a, b, c = range(2)
return a + b + c
def unpack_range_too_large():
a, b, c = range(4)
return a + b + c
def unpack_tuple():
a, b, c = (1, 2, 3)
return a + b + c
def unpack_tuple_too_small():
a, b, c = (1, 2)
return a + b + c
def unpack_tuple_too_large():
a, b, c = (1, 2, 3, 4)
return a + b + c
def unpack_heterogeneous_tuple_too_small():
a, b, c = (1, 2.5j)
return a + b + c
def unpack_heterogeneous_tuple_too_large():
a, b, c = (1, 2.5, 3j, 4)
return a + b + c
def unpack_heterogeneous_tuple():
a, b, c = (1, 2.5, 3j)
return a + b + c
def unpack_nested_heterogeneous_tuple():
a, (b, c) = (1, (2.5, 3j))
return a + b + c
def unpack_arbitrary(seq):
a, b = seq
return b, a
def unpack_nrt():
a = np.zeros(1)
b = np.zeros(2)
tup = b, a
alpha, beta = tup
return alpha, beta
def chained_unpack_assign1(x, y):
# Used to fail in object mode (issue #580)
a = (b, c) = (x, y)
(d, e) = a
return d + e + b + c
def conditional_swap(x, y):
# Used to produce invalid code (issue #977)
if x > 0:
x, y = y, x
return x, y
class TestUnpack(MemoryLeakMixin, TestCase):
def test_unpack_list(self):
pyfunc = unpack_list
cr = compile_isolated(pyfunc, (), flags=force_pyobj_flags)
cfunc = cr.entry_point
l = [1, 2, 3]
self.assertEqual(cfunc(l), pyfunc(l))
def test_unpack_shape(self, flags=force_pyobj_flags):
pyfunc = unpack_shape
cr = compile_isolated(pyfunc, [types.Array(dtype=types.int32,
ndim=3,
layout='C')],
flags=flags)
cfunc = cr.entry_point
a = np.zeros(shape=(1, 2, 3)).astype(np.int32)
self.assertPreciseEqual(cfunc(a), pyfunc(a))
def test_unpack_shape_npm(self):
self.test_unpack_shape(flags=no_pyobj_flags)
def test_unpack_range(self, flags=force_pyobj_flags):
self.run_nullary_func(METHOD_NAME, flags)
def test_unpack_range_npm(self):
self.test_unpack_range(flags=no_pyobj_flags)
def test_unpack_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_tuple, flags)
def test_unpack_tuple_npm(self):
self.test_unpack_tuple(flags=no_pyobj_flags)
def test_unpack_heterogeneous_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_heterogeneous_tuple, flags)
def test_unpack_heterogeneous_tuple_npm(self):
self.test_unpack_heterogeneous_tuple(flags=no_pyobj_flags)
def test_unpack_nested_heterogeneous_tuple(self, flags=force_pyobj_flags):
self.run_nullary_func(unpack_nested_heterogeneous_tuple, flags)
def test_unpack_nested_heterogeneous_tuple_npm(self):
self.test_unpack_nested_heterogeneous_tuple(flags=no_pyobj_flags)
def test_chained_unpack_assign(self, flags=force_pyobj_flags):
pyfunc = chained_unpack_assign1
cr = compile_isolated(pyfunc, [types.int32, types.int32],
flags=flags)
cfunc = cr.entry_point
args = (4, 5)
self.assertPreciseEqual(cfunc(*args), pyfunc(*args))
def test_chained_unpack_assign_npm(self):
self.test_chained_unpack_assign(flags=no_pyobj_flags)
def check_unpack_error(self, pyfunc, flags=force_pyobj_flags, exc=ValueError):
with self.assertRaises(exc):
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
cfunc()
def test_unpack_tuple_too_small(self):
self.check_unpack_error(unpack_tuple_too_small)
self.check_unpack_error(unpack_heterogeneous_tuple_too_small)
def test_unpack_tuple_too_small_npm(self):
self.check_unpack_error(unpack_tuple_too_small, no_pyobj_flags,
errors.TypingError)
self.check_unpack_error(unpack_heterogeneous_tuple_too_small,
no_pyobj_flags, errors.TypingError)
def test_unpack_tuple_too_large(self):
self.check_unpack_error(unpack_tuple_too_large)
self.check_unpack_error(unpack_heterogeneous_tuple_too_large)
def test_unpack_tuple_too_large_npm(self):
self.check_unpack_error(unpack_tuple_too_large, no_pyobj_flags,
errors.TypingError)
self.check_unpack_error(unpack_heterogeneous_tuple_too_large,
no_pyobj_flags, errors.TypingError)
def test_unpack_range_too_small(self):
self.check_unpack_error(unpack_range_too_small)
def test_unpack_range_too_small_npm(self):
self.check_unpack_error(unpack_range_too_small, no_pyobj_flags)
def test_unpack_range_too_large(self):
self.check_unpack_error(unpack_range_too_large)
def test_unpack_range_too_large_npm(self):
self.check_unpack_error(unpack_range_too_large, no_pyobj_flags)
def check_conditional_swap(self, flags=force_pyobj_flags):
cr = compile_isolated(conditional_swap, (types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(4, 5), (5, 4))
self.assertPreciseEqual(cfunc(0, 5), (0, 5))
def test_conditional_swap(self):
self.check_conditional_swap()
def test_conditional_swap_npm(self):
self.check_conditional_swap(no_pyobj_flags)
def test_unpack_tuple_of_arrays(self):
tup = tuple(np.zeros(i + 1) for i in range(2))
tupty = typeof(tup)
pyfunc = unpack_arbitrary
cr = compile_isolated(pyfunc, (tupty,),
flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(tup), pyfunc(tup))
def test_unpack_nrt(self):
pyfunc = unpack_nrt
cr = compile_isolated(pyfunc, (), flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(), pyfunc())
def test_invalid_unpack(self):
pyfunc = unpack_arbitrary
with self.assertRaises(errors.TypingError) as raises:
compile_isolated(pyfunc, (types.int32,), flags=no_pyobj_flags)
self.assertIn("failed to unpack int32", str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
2,786 |
diff lists
|
from django.utils.translation import gettext as _
from corehq.apps.fixtures.exceptions import FixtureUploadError
from corehq.apps.fixtures.upload.failure_messages import FAILURE_MESSAGES
from corehq.apps.fixtures.upload.workbook import get_workbook
from corehq.apps.fixtures.utils import get_fields_without_attributes
from corehq.util.workbook_json.excel import JSONReaderError, WorksheetNotFound
def validate_fixture_file_format(file_or_filename):
"""
Does basic validation on the uploaded file. Raises a FixtureUploadError if
something goes wrong.
"""
workbook = get_workbook(file_or_filename)
workbook.get_types_sheet()
error_messages = _validate_fixture_upload(workbook)
if error_messages:
raise FixtureUploadError(error_messages)
def _validate_fixture_upload(workbook):
try:
type_sheets = workbook.get_all_type_sheets()
except FixtureUploadError as e:
return e.errors
except JSONReaderError as e:
return e.args
error_messages = []
for table_number, table_def in enumerate(type_sheets):
tag = table_def.table_id
fields = table_def.fields
item_attributes = table_def.item_attributes
try:
data_items = workbook.get_data_sheet(tag)
except WorksheetNotFound:
error_messages.append(_(FAILURE_MESSAGES['type_has_no_sheet']).format(type=tag))
continue
try:
data_item = next(iter(data_items))
except StopIteration:
continue
else:
# Check that type definitions in 'types' sheet vs corresponding columns in the item-sheet MATCH
item_fields_list = list(data_item['field']) if 'field' in data_item else []
not_in_sheet, not_in_types = METHOD_NAME(item_fields_list, get_fields_without_attributes(fields))
for missing_field in not_in_sheet:
error_messages.append(
_(FAILURE_MESSAGES["has_no_field_column"])
.format(tag=tag, field=missing_field))
for missing_field in not_in_types:
error_messages.append(
_(FAILURE_MESSAGES["has_extra_column"])
.format(tag=tag, field=missing_field))
# check that this item has all the properties listed in its 'types' definition
item_attributes_list = list(data_item['property']) if 'property' in data_item else []
not_in_sheet, not_in_types = METHOD_NAME(item_attributes_list, item_attributes)
for missing_field in not_in_sheet:
error_messages.append(
_(FAILURE_MESSAGES["has_no_field_column"])
.format(tag=tag, field=missing_field))
for missing_field in not_in_types:
error_messages.append(
_(FAILURE_MESSAGES["has_extra_column"])
.format(tag=tag, field=missing_field))
# check that properties in 'types' sheet vs item-sheet MATCH
for field in fields:
if len(field.properties) > 0:
sheet_props = data_item.get(field.field_name, {})
if not isinstance(sheet_props, dict):
error_messages.append(
_(FAILURE_MESSAGES["invalid_field_syntax"])
.format(tag=tag, field=field.field_name))
continue
sheet_props_list = list(sheet_props)
type_props = field.properties
not_in_sheet, not_in_types = METHOD_NAME(sheet_props_list, type_props)
for missing_property in not_in_sheet:
error_messages.append(
_(FAILURE_MESSAGES["sheet_has_no_property"])
.format(tag=tag, property=missing_property, field=field.field_name))
for missing_property in not_in_types:
error_messages.append(
_(FAILURE_MESSAGES["sheet_has_extra_property"])
.format(tag=tag, property=missing_property, field=field.field_name))
# check that fields with properties are numbered
if type(data_item.get('field', {}).get(field.field_name, None)) != list:
error_messages.append(
_(FAILURE_MESSAGES["invalid_field_with_property"])
.format(field=field.field_name))
try:
field_prop_len = len(data_item['field'][field.field_name])
except (TypeError, KeyError):
field_prop_len = None
for prop in sheet_props:
if type(sheet_props[prop]) != list:
error_messages.append(
_(FAILURE_MESSAGES["invalid_property"])
.format(field=field.field_name, prop=prop))
try:
props_len = len(sheet_props[prop])
except TypeError:
pass
else:
if field_prop_len is not None and props_len != field_prop_len:
error_messages.append(
_(FAILURE_MESSAGES["wrong_field_property_combos"])
.format(field=field.field_name, prop=prop))
return error_messages
def METHOD_NAME(list_a, list_b):
set_a = set(list_a)
set_b = set(list_b)
not_in_b = set_a.difference(set_b)
not_in_a = set_b.difference(set_a)
return sorted(not_in_a), sorted(not_in_b)
|
2,787 |
modify internal dictionary
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jul 09, 2015
@author: tompjame
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import re
import collections
class BISONMESHSCRIPTparser():
"""
Import Bison Mesh Script input, provide methods to add/change entries and print input back
"""
def __init__(self,inputFile):
"""
Open and read file content into an ordered dictionary
@ In, inputFile, File object, object with information about the template input file
@ Out, None
"""
self.printTag = 'BISONMESHSCRIPT_PARSER'
if not os.path.exists(inputFile.getAbsFile()):
raise IOError('Input file not found: '+inputFile.getAbsFile())
# Initialize file dictionary, storage order, and internal variables
self.AllVarDict = collections.OrderedDict()
self.fileOrderStorage = []
quote_comment = False
quote_comment_line = False
apostrophe_comment = False
apostrophe_comment_line = False
between_str = ''
# Open file
self.inputfile = inputFile.getAbsFile()
# self.keywordDictionary dictionary
for line in inputFile:
if '"""' in line or "'''" in line:
if '"""' in line and quote_comment == True:
quote_comment_line = True
splitline = line.split('"""')
between_str += splitline[0] + '"""'
line = splitline[1]
quote_comment = not quote_comment
elif '"""' in line and quote_comment == False:
quote_comment_line = True
splitline = line.split('"""')
line = splitline[0]
quote_comment = not quote_comment
elif "'''" in line and apostrophe_comment == True:
apostrophe_comment_line = True
splitline = line.split("'''")
between_str += splitline[0] + "'''"
line = splitline[1]
apostrophe_comment = not apostrophe_comment
elif "'''" in line and apostrophe_comment == False:
apostrophe_comment_line = True
splitline = line.split("'''")
line = splitline[0]
apostrophe_comment = not apostrophe_comment
# parse stuff that is left over on the line
if len(line) == 0:
between_str += line
elif line.isspace():
between_str += line
elif line.startswith('#'):
between_str += line
elif '{}' in line:
between_str += line
elif line.startswith('pellets'):
between_str += line
else:
# Append string of non-varying parts of input file to file storage and reset the collection string
if len(between_str) > 0:
self.fileOrderStorage.append(between_str)
between_str = ''
dictname, varname, varvalue = re.split(r"\['|'] = |'] =|']= ", line)
if dictname in self.AllVarDict.keys():
self.AllVarDict[dictname][varname] = varvalue.strip()
else:
self.fileOrderStorage.append([dictname])
self.AllVarDict[dictname] = collections.OrderedDict()
self.AllVarDict[dictname][varname] = varvalue.strip()
# Add comment contents to storage for ''' or """ that starts comment block after code on same line
if quote_comment_line == True and quote_comment == True:
between_str += '"""' + splitline[1]
elif apostrophe_comment_line == True and apostrophe_comment == True:
between_str += "'''" + splitline[1]
quote_comment_line = False
apostrophe_comment_line = False
else:
# Didn't find a comment block flag
if quote_comment == True or apostrophe_comment == True:
between_str += line
continue
else:
# Outside of comment block (in code)
if len(line) == 0:
between_str += line
elif line.isspace():
between_str += line
elif line.startswith('#'):
between_str += line
elif '{}' in line:
between_str += line
elif line.startswith('pellets'):
between_str += line
else:
# Append string of non-varying parts of input file to file storage and reset the collection string
if len(between_str) > 0:
self.fileOrderStorage.append(between_str)
between_str = ''
dictname, varname, varvalue = re.split(r"\['|'] = |'] =|']= ", line)
if dictname in self.AllVarDict.keys():
self.AllVarDict[dictname][varname] = varvalue.strip()
else:
self.fileOrderStorage.append([dictname])
self.AllVarDict[dictname] = collections.OrderedDict()
self.AllVarDict[dictname][varname] = varvalue.strip()
if len(between_str) > 0:
self.fileOrderStorage.append(between_str)
def METHOD_NAME(self,**inDictionary):
"""
Parse the input dictionary and replace matching keywords in internal dictionary.
@ In, inDictionary, dict, dictionary containing full longform name and raven sampled var value
@ Out, None
"""
for keyword, newvalue in inDictionary.items():
keyword1, keyword2 = keyword.split('@')[-1].split('|')
self.AllVarDict[keyword1][keyword2] = newvalue
def writeNewInput(self,outfile=None):
"""
Using the fileOrderStorage list, reconstruct the template input with modified keywordDictionary
@ In, outfile, string, optional, output file name
@ Out, None
"""
if outfile==None:
outfile = self.inputfile
with open(outfile,'w') as IOfile:
for e, entry in enumerate(self.fileOrderStorage):
if type(entry) == unicode:
IOfile.writelines(entry)
elif type(entry) == list:
DictBlockName = self.fileOrderStorage[e][0]
DictBlock = self.AllVarDict[DictBlockName]
for key, value in DictBlock.items():
IOfile.writelines(DictBlockName + "['" + key + "'] = " + str(value) + '\n')
|
2,788 |
is enabled
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from functools import partial
import logging
import aws.client
import azure.client
import desktop.lib.fs.gc.client
import desktop.lib.fs.ozone.client
from aws.conf import METHOD_NAME as is_s3_enabled, has_s3_access
from azure.conf import is_adls_enabled, is_abfs_enabled, has_adls_access, has_abfs_access
from desktop.conf import is_gs_enabled, has_gs_access, DEFAULT_USER, is_ofs_enabled, has_ofs_access, RAZ
from desktop.lib.fs.proxyfs import ProxyFS
from desktop.lib.python_util import current_ms_from_utc
from desktop.lib.idbroker import conf as conf_idbroker
from hadoop.cluster import get_hdfs, _make_filesystem
from hadoop.conf import has_hdfs_enabled
SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs', 'gs', 'ofs']
CLIENT_CACHE = None
_DEFAULT_USER = DEFAULT_USER.get()
# FIXME: Should we check hue principal for the default user?
# FIXME: Caching via username has issues when users get deleted. Need to switch to userid, but bigger change
def _get_cache_key(fs, identifier, user=_DEFAULT_USER):
return fs + ':' + identifier + ':' + str(user)
def clear_cache():
global CLIENT_CACHE
CLIENT_CACHE = None
def has_access(fs=None, user=None):
if fs == 'hdfs':
return True
elif fs == 'adl':
return has_adls_access(user)
elif fs == 's3a':
return has_s3_access(user)
elif fs == 'abfs':
return has_abfs_access(user)
elif fs == 'gs':
return has_gs_access(user)
elif fs == 'ofs':
return has_ofs_access(user)
def METHOD_NAME(fs):
if fs == 'hdfs':
return has_hdfs_enabled()
elif fs == 'adl':
return is_adls_enabled()
elif fs == 's3a':
return is_s3_enabled()
elif fs == 'abfs':
return is_abfs_enabled()
elif fs == 'gs':
return is_gs_enabled()
elif fs == 'ofs':
return is_ofs_enabled()
def is_enabled_and_has_access(fs=None, user=None):
return METHOD_NAME(fs) and has_access(fs, user)
def _make_client(fs, name, user):
if fs == 'hdfs':
return _make_filesystem(name)
elif fs == 's3a':
return aws.client._make_client(name, user)
elif fs == 'adl':
return azure.client._make_adls_client(name, user)
elif fs == 'abfs':
return azure.client._make_abfs_client(name, user)
elif fs == 'gs':
return desktop.lib.fs.gc.client._make_client(name, user)
elif fs == 'ofs':
return desktop.lib.fs.ozone.client._make_ofs_client(name, user)
return None
def _get_client(fs=None):
if fs == 'hdfs':
return get_hdfs
elif fs in ['s3a', 'adl', 'abfs', 'gs', 'ofs']:
return partial(_get_client_cached, fs)
return None
def _get_client_cached(fs, name, user):
global CLIENT_CACHE
if CLIENT_CACHE is None:
CLIENT_CACHE = {}
if (conf_idbroker.is_idbroker_enabled(fs) or RAZ.IS_ENABLED.get()):
cache_key = _get_cache_key(fs, name, user)
else:
# By default, caching via default hue user key because there are no user-mapping like scenarios same as in IDBroker or RAZ.
# For FS like S3 and ABFS: Default case is via access key and secret which just allows everyone to access everything.
# For FS like HDFS and Ozone: This user mapping is handled behind the scenes and we are impersonating user for them.
cache_key = _get_cache_key(fs, name)
client = CLIENT_CACHE.get(cache_key)
# Expiration from IDBroker returns java timestamp in MS
if client and (client.expiration is None or client.expiration > int(current_ms_from_utc())):
return client
else:
client = _make_client(fs, name, user)
CLIENT_CACHE[cache_key] = client
return client
def get_client(name='default', fs=None, user=_DEFAULT_USER):
fs_getter = _get_client(fs)
if fs_getter:
return fs_getter(name, user)
else:
logging.warn('Can not get filesystem called "%s" for "%s" schema' % (name, fs))
return None
def get_default_schema():
fs = [fs for fs in SUPPORTED_FS if METHOD_NAME(fs)]
return fs[0] if fs else None
def get_filesystem(name='default'):
"""
Return the filesystem with the given name.
If the filesystem is not defined, raises KeyError
"""
# Instead of taking a list of cached client, ProxyFS will now resolve the client based on scheme
# The method to resolve clients returns a cached results if possible.
pdict = {}
for fs in SUPPORTED_FS:
if METHOD_NAME(fs):
pdict[fs] = _get_client(fs)
return ProxyFS(pdict, get_default_schema(), name) if pdict else None
def get_filesystems(user):
return [fs for fs in SUPPORTED_FS if METHOD_NAME(fs) and has_access(fs, user)]
|
2,789 |
get roles and doctypes
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import frappe.defaults
from frappe import _
from frappe.core.doctype.doctype.doctype import (
clear_permissions_cache,
validate_permissions_for_doctype,
)
from frappe.exceptions import DoesNotExistError
from frappe.modules.import_file import get_file_path, read_doc_from_file
from frappe.permissions import (
AUTOMATIC_ROLES,
add_permission,
get_all_perms,
get_linked_doctypes,
reset_perms,
setup_custom_perms,
update_permission_property,
)
from frappe.utils.user import get_users_with_role as _get_user_with_role
not_allowed_in_permission_manager = ["DocType", "Patch Log", "Module Def", "Transaction Log"]
@frappe.whitelist()
def METHOD_NAME():
frappe.only_for("System Manager")
active_domains = frappe.get_active_domains()
doctypes = frappe.get_all(
"DocType",
filters={
"istable": 0,
"name": ("not in", ",".join(not_allowed_in_permission_manager)),
},
or_filters={"ifnull(restrict_to_domain, '')": "", "restrict_to_domain": ("in", active_domains)},
fields=["name"],
)
restricted_roles = ["Administrator"]
if frappe.session.user != "Administrator":
custom_user_type_roles = frappe.get_all("User Type", filters={"is_standard": 0}, fields=["role"])
restricted_roles.extend(row.role for row in custom_user_type_roles)
restricted_roles.extend(AUTOMATIC_ROLES)
roles = frappe.get_all(
"Role",
filters={
"name": ("not in", restricted_roles),
"disabled": 0,
},
or_filters={"ifnull(restrict_to_domain, '')": "", "restrict_to_domain": ("in", active_domains)},
fields=["name"],
)
doctypes_list = [{"label": _(d.get("name")), "value": d.get("name")} for d in doctypes]
roles_list = [{"label": _(d.get("name")), "value": d.get("name")} for d in roles]
return {
"doctypes": sorted(doctypes_list, key=lambda d: d["label"].casefold()),
"roles": sorted(roles_list, key=lambda d: d["label"].casefold()),
}
@frappe.whitelist()
def get_permissions(doctype: str | None = None, role: str | None = None):
frappe.only_for("System Manager")
if role:
out = get_all_perms(role)
if doctype:
out = [p for p in out if p.parent == doctype]
else:
filters = {"parent": doctype}
if frappe.session.user != "Administrator":
custom_roles = frappe.get_all("Role", filters={"is_custom": 1}, pluck="name")
filters["role"] = ["not in", custom_roles]
out = frappe.get_all("Custom DocPerm", fields="*", filters=filters, order_by="permlevel")
if not out:
out = frappe.get_all("DocPerm", fields="*", filters=filters, order_by="permlevel")
linked_doctypes = {}
for d in out:
if d.parent not in linked_doctypes:
try:
linked_doctypes[d.parent] = get_linked_doctypes(d.parent)
except DoesNotExistError:
# exclude & continue if linked doctype is not found
frappe.clear_last_message()
continue
d.linked_doctypes = linked_doctypes[d.parent]
if meta := frappe.get_meta(d.parent):
d.is_submittable = meta.is_submittable
d.in_create = meta.in_create
return out
@frappe.whitelist()
def add(parent, role, permlevel):
frappe.only_for("System Manager")
add_permission(parent, role, permlevel)
@frappe.whitelist()
def update(doctype, role, permlevel, ptype, value=None):
"""Update role permission params
Args:
doctype (str): Name of the DocType to update params for
role (str): Role to be updated for, eg "Website Manager".
permlevel (int): perm level the provided rule applies to
ptype (str): permission type, example "read", "delete", etc.
value (None, optional): value for ptype, None indicates False
Returns:
str: Refresh flag is permission is updated successfully
"""
def clear_cache():
frappe.clear_cache(doctype=doctype)
frappe.only_for("System Manager")
out = update_permission_property(doctype, role, permlevel, ptype, value)
frappe.db.after_commit.add(clear_cache)
return "refresh" if out else None
@frappe.whitelist()
def remove(doctype, role, permlevel):
frappe.only_for("System Manager")
setup_custom_perms(doctype)
frappe.db.delete("Custom DocPerm", {"parent": doctype, "role": role, "permlevel": permlevel})
if not frappe.get_all("Custom DocPerm", {"parent": doctype}):
frappe.throw(_("There must be atleast one permission rule."), title=_("Cannot Remove"))
validate_permissions_for_doctype(doctype, for_remove=True, alert=True)
@frappe.whitelist()
def reset(doctype):
frappe.only_for("System Manager")
reset_perms(doctype)
clear_permissions_cache(doctype)
@frappe.whitelist()
def get_users_with_role(role):
frappe.only_for("System Manager")
return _get_user_with_role(role)
@frappe.whitelist()
def get_standard_permissions(doctype):
frappe.only_for("System Manager")
meta = frappe.get_meta(doctype)
if meta.custom:
doc = frappe.get_doc("DocType", doctype)
return [p.as_dict() for p in doc.permissions]
else:
# also used to setup permissions via patch
path = get_file_path(meta.module, "DocType", doctype)
return read_doc_from_file(path).get("permissions")
|
2,790 |
set status
|
# Copyright (C) 2012 Rocco Aliberti
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import operator
logger = logging.getLogger(__name__)
import os
from urllib.parse import urlparse
import http.client
import socket
import xml.etree.ElementTree as ETree
from xl import event, main, playlist, xdg
from xl.radio import RadioStation, RadioList, RadioItem
from xl.nls import gettext as _
from xlgui.panel import radio
STATION = None
def enable(exaile):
if exaile.loading:
event.add_callback(_enable, 'exaile_loaded')
else:
_enable(None, exaile, None)
def _enable(o1, exaile, o2):
global STATION
STATION = SomaFMRadioStation()
exaile.radio.add_station(STATION)
def disable(exaile):
global STATION
exaile.radio.remove_station(STATION)
STATION = None
def METHOD_NAME(message, timeout=0):
radio.METHOD_NAME(message, timeout)
class SomaFMRadioStation(RadioStation):
name = "somafm"
def __init__(self):
"""
Initializes the somafm radio station
"""
self.user_agent = main.exaile().get_user_agent_string('somafm')
self.somafm_url = 'https://somafm.com/'
self.channels_xml_url = self.somafm_url + 'channels.xml'
self.cache_file = os.path.join(xdg.get_cache_dir(), 'somafm.cache')
self.channelist = ''
self.data = {}
self._load_cache()
self.subs = {}
self.playlists = {}
self.playlist_id = 0
logger.debug(self.user_agent)
def get_document(self, url):
"""
Connects to the server and retrieves the document
"""
METHOD_NAME(_('Contacting SomaFM server...'))
hostinfo = urlparse(url)
try:
c = http.client.HTTPConnection(hostinfo.netloc, timeout=20)
except TypeError:
c = http.client.HTTPConnection(hostinfo.netloc)
try:
c.request('GET', hostinfo.path, headers={'User-Agent': self.user_agent})
response = c.getresponse()
except (socket.timeout, socket.error):
raise radio.RadioException(_('Error connecting to SomaFM server.'))
if response.status != 200:
raise radio.RadioException(_('Error connecting to SomaFM server.'))
document = response.read()
c.close()
METHOD_NAME('')
return document
def _load_cache(self):
"""
Loads somafm data from cache
"""
self.data = {}
if os.path.isfile(self.cache_file):
tree = ETree.parse(self.cache_file)
for channel in tree.findall('channel'):
self.data[channel.get("id")] = channel.get("name")
def _save_cache(self):
"""
Saves cache data
"""
channellist = ETree.Element('channellist')
for channel_id, channel_name in self.data.items():
ETree.SubElement(channellist, 'channel', id=channel_id, name=channel_name)
with open(self.cache_file, 'w') as h:
h.write('<?xml version="1.0" encoding="UTF-8"?>')
h.write(ETree.tostring(channellist, 'unicode'))
def get_lists(self, no_cache=False):
"""
Returns the rlists for somafm
"""
if no_cache or not self.data:
self.channellist = self.get_document(self.channels_xml_url)
data = {}
tree = ETree.fromstring(self.channellist)
for channel in tree.findall('channel'):
name = channel.find('title').text
data[channel.get("id")] = name
self.data = data
self._save_cache()
else:
data = self.data
rlists = []
for id, name in data.items():
rlist = RadioList(name, station=self)
rlist.get_items = lambda no_cache, id=id: self._get_subrlists(
id=id, no_cache=no_cache
)
rlists.append(rlist)
rlists.sort(key=operator.attrgetter('name'))
self.rlists = rlists
return rlists
def _get_subrlists(self, id, no_cache=False):
"""
Gets the subrlists for a rlist
"""
if no_cache or id not in self.subs:
rlists = self._get_stations(id)
rlists.sort(key=operator.attrgetter('name'))
self.subs[id] = rlists
return self.subs[id]
def _get_playlist(self, url, playlist_id):
"""
Gets the playlist for the given url and id
"""
if playlist_id not in self.playlists:
METHOD_NAME(_('Contacting SomaFM server...'))
try:
self.playlists[playlist_id] = playlist.import_playlist(url)
except Exception:
METHOD_NAME(_("Error importing playlist"))
logger.exception("Error importing playlist")
METHOD_NAME('')
return self.playlists[playlist_id]
def _get_stations(self, id):
if not self.channelist:
self.channelist = self.get_document(self.channels_xml_url)
tree = ETree.fromstring(self.channelist)
channel = tree.find('.//channel[@id="%s"]' % id)
plss = channel.findall('.//*[@format]')
rlists = []
for pls in plss:
type = pls.tag.replace('pls', '')
format = pls.attrib['format'].upper()
url = pls.text
display_name = format + " - " + type
rlist = RadioItem(display_name, station=self)
rlist.format = format
rlist.get_playlist = (
lambda url=url, playlist_id=self.playlist_id: self._get_playlist(
url, playlist_id
)
)
self.playlist_id += 1
rlists.append(rlist)
return rlists
def get_menu(self, parent):
return parent.get_menu()
|
2,791 |
get
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Infrastructure to get local variables embedded in comments in a document.
"""
import re
from PyQt5.QtCore import QTimer
import document
import signals
import plugin
__all__ = ['get', 'update', 'manager', 'variables']
_variable_re = re.compile(r'\s*?([a-z]+(?:-[a-z]+)*):[ \t]*(.*?);')
_LINES = 5 # how many lines from top and bottom to scan for variables
def METHOD_NAME(doc, varname, default=None):
"""Get a single value from the document.
If a default is given and the type is bool or int, the value is converted to the same type.
If no value exists, the default is returned.
"""
variables = manager(doc).variables()
try:
return prepare(variables[varname], default)
except KeyError:
return default
def update(doc, dictionary):
"""Updates the given dictionary with values from the document, using present values as default."""
for name, value in manager(doc).variables().items():
if name in dictionary:
dictionary[name] = prepare(value, dictionary[name])
return dictionary
def manager(doc):
"""Returns a VariableManager for this document."""
return VariableManager.instance(doc)
def variables(text):
"""Reads variables from the first and last _LINES lines of text."""
lines = text.splitlines()
start, count = 0, len(lines)
d = {}
if count > 2 * _LINES:
d.update(m.group(1, 2) for n, m in positions(lines[:_LINES]))
start = count - _LINES
d.update(m.group(1, 2) for n, m in positions(lines[start:]))
return d
class VariableManager(plugin.DocumentPlugin):
"""Caches variables in the document and monitors for changes.
The changed() Signal is emitted some time after the list of variables has been changed.
It is recommended to not change the document itself in response to this signal.
"""
changed = signals.Signal() # without argument
def __init__(self, doc):
self._updateTimer = QTimer(singleShot=True, timeout=self.slotTimeout)
self._variables = self.readVariables()
if doc.__class__ == document.EditorDocument:
doc.contentsChange.connect(self.slotContentsChange)
doc.closed.connect(self._updateTimer.stop) # just to be sure
def slotTimeout(self):
variables = self.readVariables()
if variables != self._variables:
self._variables = variables
self.changed()
def slotContentsChange(self, position, removed, added):
"""Called if the document changes."""
if (self.document().findBlock(position).blockNumber() < _LINES or
self.document().findBlock(position + added).blockNumber() > self.document().blockCount() - _LINES):
self._updateTimer.start(500)
def variables(self):
"""Returns the document variables (cached) as a dictionary. This method is recommended."""
if self._updateTimer.isActive():
# an update is pending, force it
self._updateTimer.stop()
self.slotTimeout()
return self._variables
def readVariables(self):
"""Reads the variables from the document and returns a dictionary. Internal."""
count = self.document().blockCount()
blocks = [self.document().firstBlock()]
if count > _LINES * 2:
blocks.append(self.document().findBlockByNumber(count - _LINES))
count = _LINES
def lines(block):
for i in range(count):
yield block.text()
block = block.next()
variables = {}
for block in blocks:
variables.update(m.group(1, 2) for n, m in positions(lines(block)))
return variables
def positions(lines):
"""Lines should be an iterable returning lines of text.
Returns an iterable yielding tuples (lineNum, matchObj) for every variable found.
Every matchObj has group(1) pointing to the variable name and group(2) to the value.
"""
commentstart = ''
interesting = False
for lineNum, text in enumerate(lines):
# first check the line start
start = 0
if interesting:
# already parsing? then skip comment start tokens
m = re.match(fr'\s*{re.escape(commentstart)}', text)
if m:
start = m.end()
else:
# does the line have '-*-' ?
m = re.search(r'(\S*)\s*-\*-', text)
if m:
interesting = True
commentstart = m.group(1)
start = m.end()
# now parse the line
if interesting:
while True:
m = _variable_re.match(text, start)
if m:
yield lineNum, m
start = m.end()
else:
if start < len(text) and not text[start:].isspace():
interesting = False
break
def prepare(value, default):
"""Try to convert the value (which is a string) to the type of the default value.
If (for int and bool) that fails, returns the default, otherwise returns the string unchanged.
"""
if isinstance(default, bool):
if value.lower() in ('true', 'yes', 'on', 't', '1'):
return True
elif value.lower() in ('false', 'no', 'off', 'f', '0'):
return False
return default
elif isinstance(default, int):
try:
return int(value)
except ValueError:
return default
return value
|
2,792 |
test smart component regular flipped x
|
# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from fontTools.pens.areaPen import AreaPen
from glyphsLib import to_ufos, load
from glyphsLib.classes import (
GSFont,
GSFontMaster,
GSGlyph,
GSLayer,
GSPath,
GSNode,
GSSmartComponentAxis,
GSComponent,
)
# https://glyphsapp.com/tutorials/smart-components
@pytest.fixture
def smart_font():
"""Make a font with a smart component in the shape of a rectangle."""
font = GSFont()
master = GSFontMaster()
font.masters.append(master)
rectangle = GSGlyph()
rectangle.name = "_part.rectangle"
# Could also be rectangle.name = '_smart.rectangle'
font.glyphs.append(rectangle)
# Three axes
width = GSSmartComponentAxis()
width.name = "Width"
# This one is easy 0-1
width.bottomValue = 0.0
width.topValue = 1.0
rectangle.smartComponentAxes.append(width)
height = GSSmartComponentAxis()
height.name = "Height"
# This one is off the origin
height.bottomValue = 100
height.topValue = 500
rectangle.smartComponentAxes.append(height)
shift = GSSmartComponentAxis()
shift.name = "Shift"
# This one has negative values
shift.bottomValue = -100
shift.topValue = 0
rectangle.smartComponentAxes.append(shift)
# Four masters
regular = GSLayer()
regular.layerId = font.masters[0].id
regular.associatedMasterId = font.masters[0].id
regular.width = 300
rectangle.layers.append(regular)
regular.paths.append(rectangle_path(100, 100, 100, 100))
regular.smartComponentPoleMapping["Width"] = 1 # 1 is bottom pole
regular.smartComponentPoleMapping["Height"] = 1
regular.smartComponentPoleMapping["Shift"] = 2 # 2 is the top pole
wide = GSLayer()
wide.name = "Wide"
wide.layerId = "wide"
wide.associatedMasterId = font.masters[0].id
wide.width = 700
rectangle.layers.append(wide)
wide.paths.append(rectangle_path(100, 100, 500, 100))
wide.smartComponentPoleMapping["Width"] = 2
wide.smartComponentPoleMapping["Height"] = 1
wide.smartComponentPoleMapping["Shift"] = 2
tall = GSLayer()
tall.name = "Tall"
tall.layerId = "tall"
tall.associatedMasterId = font.masters[0].id
tall.width = 300
rectangle.layers.append(tall)
tall.paths.append(rectangle_path(100, 100, 100, 500))
tall.smartComponentPoleMapping["Width"] = 1
tall.smartComponentPoleMapping["Height"] = 2
tall.smartComponentPoleMapping["Shift"] = 2
shifted = GSLayer()
shifted.name = "Shifted"
shifted.layerId = "shifted"
shifted.associatedMasterId = font.masters[0].id
shifted.width = 100
rectangle.layers.append(shifted)
shifted.paths.append(rectangle_path(0, 0, 100, 100))
shifted.smartComponentPoleMapping["Width"] = 1
shifted.smartComponentPoleMapping["Height"] = 1
shifted.smartComponentPoleMapping["Shift"] = 1
# Also add a normal glyph in which we can instanciate the component
a = GSGlyph()
a.name = "a"
font.glyphs.append(a)
regular = GSLayer()
regular.layerId = font.masters[0].id
regular.associatedMasterId = font.masters[0].id
regular.width = 1000
a.layers.append(regular)
component = GSComponent(rectangle.name)
component.smartComponentValues = {}
regular.components.append(component)
return font
def rectangle_path(x, y, w, h):
path = GSPath()
# draw a rect counter-clockwise (to the right, top, left and close)
path.nodes.append(GSNode((x, y)))
path.nodes.append(GSNode((x + w, y)))
path.nodes.append(GSNode((x + w, y + h)))
path.nodes.append(GSNode((x, y + h)))
return path
@pytest.mark.parametrize(
"values,expected_rect",
[
# Eight corners
({"Width": 0, "Height": 100, "Shift": 0}, (100, 100, 100, 100)),
({"Width": 1, "Height": 100, "Shift": 0}, (100, 100, 500, 100)),
({"Width": 0, "Height": 500, "Shift": 0}, (100, 100, 100, 500)),
({"Width": 1, "Height": 500, "Shift": 0}, (100, 100, 500, 500)),
({"Width": 0, "Height": 100, "Shift": -100}, (0, 0, 100, 100)),
({"Width": 1, "Height": 100, "Shift": -100}, (0, 0, 500, 100)),
({"Width": 0, "Height": 500, "Shift": -100}, (0, 0, 100, 500)),
({"Width": 1, "Height": 500, "Shift": -100}, (0, 0, 500, 500)),
# Some points in the middle
({"Width": 0.5, "Height": 300, "Shift": -50}, (50, 50, 300, 300)),
# Extrapolation
({"Width": 0, "Height": 800, "Shift": 0}, (100, 100, 100, 800)),
],
)
def test_smart_component_regular(values, expected_rect, smart_font):
component = smart_font.glyphs["a"].layers[0].components[0]
for key, value in values.items():
component.smartComponentValues[key] = value
(ufo,) = to_ufos(smart_font)
rect, clockwise = get_rectangle_data(ufo)
assert rect == expected_rect
assert not clockwise
@pytest.mark.parametrize(
"values,expected_rect",
[
# Eight corners
({"Width": 0, "Height": 100, "Shift": 0}, (-200, 100, 100, 100)),
({"Width": 1, "Height": 100, "Shift": 0}, (-600, 100, 500, 100)),
({"Width": 0, "Height": 500, "Shift": 0}, (-200, 100, 100, 500)),
({"Width": 1, "Height": 500, "Shift": 0}, (-600, 100, 500, 500)),
({"Width": 0, "Height": 100, "Shift": -100}, (-100, 0, 100, 100)),
({"Width": 1, "Height": 100, "Shift": -100}, (-500, 0, 500, 100)),
({"Width": 0, "Height": 500, "Shift": -100}, (-100, 0, 100, 500)),
({"Width": 1, "Height": 500, "Shift": -100}, (-500, 0, 500, 500)),
# Some points in the middle
({"Width": 0.5, "Height": 300, "Shift": -50}, (-350, 50, 300, 300)),
# Extrapolation
({"Width": 0, "Height": 800, "Shift": 0}, (-200, 100, 100, 800)),
],
)
def METHOD_NAME(values, expected_rect, smart_font):
# same as test_smart_component_regular but with transform that flips x
component = smart_font.glyphs["a"].layers[0].components[0]
component.transform[0] = -1.0
for key, value in values.items():
component.smartComponentValues[key] = value
(ufo,) = to_ufos(smart_font)
rect, clockwise = get_rectangle_data(ufo)
assert rect == expected_rect
# after decomposing the flipped component, the original counter-clockwise
# path direction should not change
assert not clockwise
def get_rectangle_data(ufo, glyph_name="a"):
"""Retrieve the results of the smart component interpolation."""
a = ufo[glyph_name]
contour = a[0]
left = min(node.x for node in contour)
right = max(node.x for node in contour)
top = max(node.y for node in contour)
bottom = min(node.y for node in contour)
pen = AreaPen(ufo)
contour.draw(pen)
clockwise = pen.value < 0
return (left, bottom, right - left, top - bottom), clockwise
def test_smarts_with_one_master(datadir, ufo_module):
file = "DumbSmarts.glyphs"
with open(str(datadir.join(file)), encoding="utf-8") as f:
original_glyphs_font = load(f)
ufos = to_ufos(original_glyphs_font, ufo_module=ufo_module)
assert len(ufos[0]["lam-ar.swsh"].components) == 1
assert len(ufos[0]["lam-ar.swsh"]) == 1
assert len(ufos[1]["lam-ar.swsh"].components) == 1
assert len(ufos[1]["lam-ar.swsh"]) == 1
|
2,793 |
expand
|
from random import random
from panda3d.core import Point3
from direct.task.TaskManagerGlobal import taskMgr
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import DirectButton, DirectDialog, DirectEntry, DirectFrame, YesNoDialog
def test_DirectGui(base):
# EXAMPLE CODE
# Load a model
smiley = base.loader.loadModel('models/misc/smiley')
# Here we specify the button's command
def dummyCmd(index):
print('Button %d POW!!!!' % index)
# Define some commands to bind to enter, exit and click events
def shrink(db):
db['text2_text'] = 'Hi!'
taskMgr.remove('shrink')
taskMgr.remove('expand')
# Get a handle on the geometry for the rollover state
rolloverSmiley = db.component('geom2')
rolloverSmiley.setScale(db.component('geom0').getScale()[0])
rolloverSmiley.lerpScale(.1, .1, .1, 1.0, blendType = 'easeInOut',
task = 'shrink')
def METHOD_NAME(db):
db['text0_text'] = 'Bye!'
taskMgr.remove('shrink')
taskMgr.remove('expand')
db.component('geom0').setScale(db.component('geom2').getScale()[0])
db.component('geom0').lerpScale(1, 1, 1, 1, blendType = 'easeInOut',
task = 'expand')
db.component('geom2').clearColor()
def ouch(db):
taskMgr.remove('shrink')
taskMgr.remove('expand')
taskMgr.remove('runAway')
db.component('geom0').setScale(db.component('geom2').getScale()[0])
db.component('geom1').setScale(db.component('geom2').getScale()[0])
db['text2_text'] = 'Ouch!'
db['geom2_color'] = (1, 0, 0, 1)
newX = -1.0 + random() * 2.0
newZ = -1.0 + random() * 2.0
db.lerpPos(Point3(newX, 0, newZ), 1.0, task = 'runAway',
blendType = 'easeOut')
dl = DirectFrame(image = 'models/maps/noise.rgb')
dl.setScale(.5)
# Create a button with a background image, smiley as a geometry element,
# and a text overlay, set a different text for the four button states:
# (normal, press, rollover, and disabled), set scale = .15, and relief raised
dbArray = []
for i in range(10):
db = DirectButton(parent = dl,
image = 'models/maps/noise.rgb',
geom = smiley,
text = ('Hi!', 'Ouch!', 'Bye!', 'ZZZZ!'),
scale = .15, relief = 'raised',
# Here we set an option for a component of the button
geom1_color = (1, 0, 0, 1),
# Here is an example of a component group option
text_pos = (.6, -.8),
# Set audio characteristics
clickSound = DirectGuiGlobals.getDefaultClickSound(),
rolloverSound = DirectGuiGlobals.getDefaultRolloverSound()
)
# You can set component or component group options after a gui item
# has been created
db['text_scale'] = 0.5
db['command'] = lambda i = i: dummyCmd(i)
# Bind the commands
db.bind(DirectGuiGlobals.ENTER, lambda x, db = db: shrink(db))
db.bind(DirectGuiGlobals.EXIT, lambda x, db = db: METHOD_NAME(db))
db.bind(DirectGuiGlobals.B1PRESS, lambda x, db = db: ouch(db))
# Pop up placer when button 2 is pressed
db.bind(DirectGuiGlobals.B3PRESS, lambda x, db = db: db.place())
dbArray.append(db)
# To get rid of button and clear out hooks call:
# db.destroy()
# DIRECT ENTRY EXAMPLE
def printEntryText(text):
print('Text: %s' % (text))
# Here we create an entry, and specify everything up front
# CALL de1.get() and de1.set('new text') to get and set entry contents
de1 = DirectEntry(initialText = 'Hello, how are you?',
image = 'models/maps/noise.rgb',
image_pos = (4.55, 0, -2.55),
image_scale = (5.5, 1, 4),
command = printEntryText,
pos = (-1.1875, 0, 0.879167),
scale = 0.0707855,
cursorKeys = 1,
)
# DIRECT DIALOG EXAMPLE
def printDialogValue(value):
print('Value: %s' % (value))
simpleDialog = YesNoDialog(text = 'Simple',
command = printDialogValue)
customValues = YesNoDialog(text = 'Not Quite So Simple',
buttonValueList = ['Yes', 'No'],
command = printDialogValue)
fancyDialog = YesNoDialog(text = 'Testing Direct Dialog',
geom = smiley,
geom_scale = .1,
geom_pos = (-0.3, 0, 0),
command = printDialogValue)
customDialog = DirectDialog(text = 'Pick a number',
buttonTextList = [str(i) for i in range(10)],
buttonValueList = range(10),
command = printDialogValue)
# NOTE: There are some utility functions which help you get size
# of a direct gui widget. These can be used to position and scale an
# image after you've created the entry. scale = (width/2, 1, height/2)
print('BOUNDS: %s' % de1.getBounds())
print('WIDTH: %s' % de1.getWidth())
print('HEIGHT: %s' % de1.getHeight())
print('CENTER: %s' % (de1.getCenter(),))
|
2,794 |
test package resource access
|
# Owner(s): ["oncall: package/deploy"]
from io import BytesIO
from sys import version_info
from textwrap import dedent
from unittest import skipIf
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
@skipIf(version_info < (3, 7), "ResourceReader API introduced in Python 3.7")
class TestResources(PackageTestCase):
"""Tests for access APIs for packaged resources."""
def test_resource_reader(self):
"""Test compliance with the get_resource_reader importlib API."""
buffer = BytesIO()
with PackageExporter(buffer) as pe:
# Layout looks like:
# package
# ├── one/
# │ ├── a.txt
# │ ├── b.txt
# │ ├── c.txt
# │ └── three/
# │ ├── d.txt
# │ └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
pe.save_text("one.three", "d.txt", "hello, d!")
pe.save_text("one.three", "e.txt", "hello, e!")
pe.save_text("two", "f.txt", "hello, f!")
pe.save_text("two", "g.txt", "hello, g!")
buffer.seek(0)
importer = PackageImporter(buffer)
reader_one = importer.get_resource_reader("one")
with self.assertRaises(FileNotFoundError):
reader_one.resource_path("a.txt")
self.assertTrue(reader_one.is_resource("a.txt"))
self.assertEqual(reader_one.open_resource("a.txt").getbuffer(), b"hello, a!")
self.assertFalse(reader_one.is_resource("three"))
reader_one_contents = list(reader_one.contents())
self.assertSequenceEqual(
reader_one_contents, ["a.txt", "b.txt", "c.txt", "three"]
)
reader_two = importer.get_resource_reader("two")
self.assertTrue(reader_two.is_resource("f.txt"))
self.assertEqual(reader_two.open_resource("f.txt").getbuffer(), b"hello, f!")
reader_two_contents = list(reader_two.contents())
self.assertSequenceEqual(reader_two_contents, ["f.txt", "g.txt"])
reader_one_three = importer.get_resource_reader("one.three")
self.assertTrue(reader_one_three.is_resource("d.txt"))
self.assertEqual(
reader_one_three.open_resource("d.txt").getbuffer(), b"hello, d!"
)
reader_one_three_contenst = list(reader_one_three.contents())
self.assertSequenceEqual(reader_one_three_contenst, ["d.txt", "e.txt"])
self.assertIsNone(importer.get_resource_reader("nonexistent_package"))
def METHOD_NAME(self):
"""Packaged modules should be able to use the importlib.resources API to access
resources saved in the package.
"""
mod_src = dedent(
"""\
import importlib.resources
import my_cool_resources
def secret_message():
return importlib.resources.read_text(my_cool_resources, 'sekrit.txt')
"""
)
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.save_source_string("foo.bar", mod_src)
pe.save_text("my_cool_resources", "sekrit.txt", "my sekrit plays")
buffer.seek(0)
importer = PackageImporter(buffer)
self.assertEqual(
importer.import_module("foo.bar").secret_message(), "my sekrit plays"
)
def test_importer_access(self):
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.save_text("main", "main", "my string")
he.save_binary("main", "main_binary", b"my string")
src = dedent(
"""\
import importlib
import torch_package_importer as resources
t = resources.load_text('main', 'main')
b = resources.load_binary('main', 'main_binary')
"""
)
he.save_source_string("main", src, is_package=True)
buffer.seek(0)
hi = PackageImporter(buffer)
m = hi.import_module("main")
self.assertEqual(m.t, "my string")
self.assertEqual(m.b, b"my string")
def test_resource_access_by_path(self):
"""
Tests that packaged code can used importlib.resources.path.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
he.save_binary("string_module", "my_string", b"my string")
src = dedent(
"""\
import importlib.resources
import string_module
with importlib.resources.path(string_module, 'my_string') as path:
with open(path, mode='r', encoding='utf-8') as f:
s = f.read()
"""
)
he.save_source_string("main", src, is_package=True)
buffer.seek(0)
hi = PackageImporter(buffer)
m = hi.import_module("main")
self.assertEqual(m.s, "my string")
if __name__ == "__main__":
run_tests()
|
2,795 |
print usage
|
# Copyright (c) 2012-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""
Defi base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
"""
import hashlib
import sys
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes((n,))
__b58chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
__b58base = len(__b58chars)
b58chars = __b58chars
__unusedChars = "0OIl"
def b58encode(v):
"""encode v, which is a string of bytes, to base58."""
long_value = 0
for i, c in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ""
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Defi does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0] * nPad) + result
def b58decode(v, length=None):
"""decode v into a string of len bytes"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
print("result is none")
return None
print("decode result: ", bytes(result).hex())
checksumResult = checksum(result[:-4])
if result[-4:] == checksumResult:
return result[:-4]
else:
print(bytes(result[-4:]).hex(), "\n")
print("check sum result: ", bytes(checksumResult).hex())
return None
def get_bcaddress_version(strAddress):
"""Returns None if strAddress is invalid. Otherwise returns integer version of address."""
addr = b58decode_chk(strAddress)
if addr is None or len(addr) != 21:
return None
version = addr[0]
return ord(version)
def METHOD_NAME():
print("Usage of this script(Need python3):")
print("python3 " + sys.argv[0] + " AddressStartString")
print("Mainnet address start with string from 8F ~ 8d")
print("Testnet address start with string from 73 ~ 7R")
print("Regtest address start with string from mf ~ n4")
print("The address start string cannot contain these characters: 0OIl")
print("For example:")
print(" python3 gen_burn_addr.py 8addressForBurn")
print(" python3 gen_burn_addr.py 7AddressForBurn")
def check_start_range(fst2):
if fst2 >= "73" and fst2 <= "7R":
return True
if fst2 >= "8F" and fst2 <= "8d":
return True
if fst2 >= "mf" and fst2 <= "n4":
return True
return False
if __name__ == "__main__":
# Check our input parameters for this script
if len(sys.argv) < 2:
METHOD_NAME()
sys.exit(0)
if len(sys.argv) > 2:
print("Too many input arguments!")
METHOD_NAME()
sys.exit(0)
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
METHOD_NAME()
sys.exit(0)
startString = sys.argv[1]
if len(startString) > 28:
print("Address start string is too long!")
sys.exit(0)
if not startString.isalnum():
print("Address start string containts invalid characters!")
sys.exit(0)
if any((c in startString) for c in __unusedChars):
print("Address start string cannot contain 0OIl")
sys.exit(0)
if len(startString) < 2:
print("The start string is too short")
sys.exit(0)
fst2 = startString[0:2]
if not check_start_range(fst2):
print("Address start is not correct!")
print("Mainnet address start with string from 8F ~ 8d")
print("Testnet address start with string from 73 ~ 7R")
print("Regtest address start with string from mf ~ n4")
sys.exit(0)
anotherString = startString + "X" * (34 - len(startString))
result = b58decode(anotherString)
if result is None:
print("result is none")
exit(-1)
checksumResult = checksum(result[:-4])
mutableResult = bytearray(result)
mutableResult[-4] = checksumResult[0]
mutableResult[-3] = checksumResult[1]
mutableResult[-2] = checksumResult[2]
mutableResult[-1] = checksumResult[3]
finalResult = b58encode(mutableResult)
print("Generated address: ", finalResult)
|
2,796 |
test auto all error
|
import datetime
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
from piccolo.apps.migrations.commands.new import (
BaseMigrationManager,
_create_new_migration,
_generate_migration_meta,
new,
)
from piccolo.conf.apps import AppConfig
from piccolo.utils.sync import run_sync
from tests.base import engines_only
from tests.example_apps.music.tables import Manager
class TestNewMigrationCommand(TestCase):
def test_manual(self):
"""
Create a manual migration (i.e. non-auto).
"""
migration_folder = os.path.join(
tempfile.gettempdir(), "piccolo_migrations"
)
if os.path.exists(migration_folder):
shutil.rmtree(migration_folder)
os.mkdir(migration_folder)
app_config = AppConfig(
app_name="music",
migrations_folder_path=migration_folder,
table_classes=[Manager],
)
run_sync(_create_new_migration(app_config, auto=False))
migration_modules = BaseMigrationManager().get_migration_modules(
migration_folder
)
self.assertTrue(len(migration_modules.keys()) == 1)
@engines_only("postgres")
@patch("piccolo.apps.migrations.commands.new.print")
def test_auto(self, print_: MagicMock):
"""
Call the command, when no migration changes are needed.
"""
run_sync(new(app_name="music", auto=True))
self.assertListEqual(
print_.call_args_list,
[
call("🚀 Creating new migration ..."),
call("🏁 No changes detected."),
call("\n✅ Finished\n"),
],
)
@engines_only("postgres")
@patch("piccolo.apps.migrations.commands.new.print")
def test_auto_all(self, print_: MagicMock):
"""
Try auto migrating all apps.
"""
run_sync(new(app_name="all", auto=True))
self.assertListEqual(
print_.call_args_list,
[
call("🚀 Creating new migration ..."),
call("🏁 No changes detected."),
call("🚀 Creating new migration ..."),
call("🏁 No changes detected."),
call("\n✅ Finished\n"),
],
)
@engines_only("postgres")
def METHOD_NAME(self):
"""
Call the command, when no migration changes are needed.
"""
with self.assertRaises(ValueError) as manager:
run_sync(new(app_name="all", auto=False))
self.assertEqual(
manager.exception.__str__(),
"Only use `--app_name=all` in conjunction with `--auto`.",
)
class TestGenerateMigrationMeta(TestCase):
@patch("piccolo.apps.migrations.commands.new.now")
def test_filename(self, now: MagicMock):
now.return_value = datetime.datetime(
year=2022,
month=1,
day=10,
hour=7,
minute=15,
second=20,
microsecond=3000,
)
# Try with an app name which already contains valid characters for a
# Python module.
migration_meta = _generate_migration_meta(
app_config=AppConfig(
app_name="app_name",
migrations_folder_path="/tmp/",
)
)
self.assertEqual(
migration_meta.migration_filename,
"app_name_2022_01_10t07_15_20_003000",
)
self.assertEqual(
migration_meta.migration_path,
"/tmp/app_name_2022_01_10t07_15_20_003000.py",
)
# Try with an app name with invalid characters for a Python module.
migration_meta = _generate_migration_meta(
app_config=AppConfig(
app_name="App-Name!",
migrations_folder_path="/tmp/",
)
)
self.assertEqual(
migration_meta.migration_filename,
"app_name_2022_01_10t07_15_20_003000",
)
self.assertEqual(
migration_meta.migration_path,
"/tmp/app_name_2022_01_10t07_15_20_003000.py",
)
|
2,797 |
install runtime dependencies
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder import pip_utils
from pybuilder.core import (task,
description,
use_plugin,
depends,
init)
from pybuilder.install_utils import install_dependencies as install_dependency
from pybuilder.utils import mkdir, as_list
__author__ = "Alexander Metzner, Arcadiy Ivanov"
use_plugin("core")
@init
def initialize_install_dependencies_plugin(project):
project.set_property_if_unset("pip_verbose", 0)
project.set_property_if_unset("install_env", "system")
project.set_property_if_unset("dir_install_logs", "$dir_logs/install_dependencies")
project.set_property_if_unset("install_dependencies_index_url", None)
project.set_property_if_unset("install_dependencies_local_mapping", {})
project.set_property_if_unset("install_dependencies_extra_index_url", None)
project.set_property_if_unset("install_dependencies_trusted_host", None)
project.set_property_if_unset("install_dependencies_constraints", "constraints_file")
# Deprecated - has no effect
project.set_property_if_unset("install_dependencies_upgrade", False)
project.set_property_if_unset("install_dependencies_insecure_installation", [])
@task
@depends("prepare")
@description("Installs all (both runtime and build) dependencies specified in the build descriptor")
def install_dependencies(logger, project, reactor):
logger.info("Installing all dependencies")
install_dependency(logger, project, as_list(project.build_dependencies) + as_list(project.dependencies),
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@depends("prepare")
@description("Installs all build dependencies specified in the build descriptor")
def install_build_dependencies(logger, project, reactor):
logger.info("Installing build dependencies")
install_dependency(logger, project, project.build_dependencies,
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@depends("prepare")
@description("Installs all runtime dependencies specified in the build descriptor")
def METHOD_NAME(logger, project, reactor):
logger.info("Installing runtime dependencies")
install_dependency(logger, project, project.dependencies,
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@description("Displays all dependencies the project requires")
def list_dependencies(project):
print("\n".join(
map(lambda d: "{0}".format(" ".join(pip_utils.as_pip_install_target(d))),
project.build_dependencies + project.dependencies)))
@task("prepare")
def create_install_log_directory(logger, project):
log_dir = project.expand_path("$dir_install_logs")
logger.debug("Creating log directory %r", log_dir)
mkdir(log_dir)
target_dir = project.expand_path("$dir_target")
logger.debug("Creating target directory %r", target_dir)
mkdir(target_dir)
|
2,798 |
test multiple swipes
|
"""
Copyright(C) 2023 Altom Consulting
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import pytest
from .utils import Scenes
from alttester import By
class TestScene03:
@pytest.fixture(autouse=True)
def setup(self, altdriver):
self.altdriver = altdriver
self.altdriver.reset_input()
self.altdriver.load_scene(Scenes.Scene03)
def wait_for_swipe_to_finish(self):
self.altdriver.wait_for_object_to_not_be_present(By.NAME, "icon")
def get_sprite_name(self, source_image_name, image_source_drop_zone_name):
image_source = self.altdriver.find_object(
By.NAME, source_image_name).get_component_property(
"UnityEngine.UI.Image", "sprite.name", "UnityEngine.UI")
image_source_drop_zone = self.altdriver.find_object(
By.NAME, image_source_drop_zone_name).get_component_property(
"UnityEngine.UI.Image", "sprite.name", "UnityEngine.UI")
return image_source, image_source_drop_zone
def drop_image_with_multipoint_swipe(self, object_names, duration, wait):
positions = []
for name in object_names:
alt_object = self.altdriver.find_object(By.NAME, name)
positions.append(alt_object.get_screen_position())
self.altdriver.multipoint_swipe(positions, duration, wait)
def drop_image(self, drag_location_name, drop_location_name, duration, wait):
drag_location = self.altdriver.find_object(By.NAME, drag_location_name)
drop_location = self.altdriver.find_object(By.NAME, drop_location_name)
self.altdriver.swipe(drag_location.get_screen_position(
), drop_location.get_screen_position(), duration, wait)
def test_pointer_enter_and_exit(self):
alt_object = self.altdriver.find_object(By.NAME, "Drop Image")
color1 = alt_object.get_component_property(
"AltExampleScriptDropMe",
"highlightColor",
"Assembly-CSharp"
)
alt_object.pointer_enter()
color2 = alt_object.get_component_property(
"AltExampleScriptDropMe",
"highlightColor",
"Assembly-CSharp"
)
assert color1["r"] != color2["r"] or \
color1["g"] != color2["g"] or \
color1["b"] != color2["b"] or \
color1["a"] != color2["a"]
alt_object.pointer_exit()
color3 = alt_object.get_component_property(
"AltExampleScriptDropMe",
"highlightColor",
"Assembly-CSharp"
)
assert color3["r"] != color2["r"] or \
color3["g"] != color2["g"] or \
color3["b"] != color2["b"] or \
color3["a"] != color2["a"]
assert color3["r"] == color1["r"] and \
color3["g"] == color1["g"] and \
color3["b"] == color1["b"] and \
color3["a"] == color1["a"]
def METHOD_NAME(self):
self.drop_image("Drag Image2", "Drop Box2", 1, False)
self.drop_image("Drag Image2", "Drop Box1", 1, False)
self.drop_image("Drag Image1", "Drop Box1", 2, False)
self.wait_for_swipe_to_finish()
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image2", "Drop")
assert image_source == image_source_drop_zone
def test_multiple_swipe_and_waits(self):
self.drop_image("Drag Image2", "Drop Box2", 1, True)
self.drop_image("Drag Image2", "Drop Box1", 1, True)
self.drop_image("Drag Image1", "Drop Box1", 1, True)
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image2", "Drop")
assert image_source == image_source_drop_zone
def test_multiple_swipe_with_multipoint_swipe(self):
self.drop_image_with_multipoint_swipe(
["Drag Image1", "Drop Box1"], 1, False)
self.drop_image_with_multipoint_swipe(
["Drag Image2", "Drop Box1", "Drop Box2"], 1, False)
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image2", "Drop")
assert image_source == image_source_drop_zone
def test_multiple_swipe_and_waits_with_multipoint_swipe(self):
self.drop_image_with_multipoint_swipe(
["Drag Image1", "Drop Box1"], 1, True)
self.drop_image_with_multipoint_swipe(
["Drag Image2", "Drop Box1", "Drop Box2"], 1, True)
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image1", "Drop Image")
assert image_source == image_source_drop_zone
image_source, image_source_drop_zone = self.get_sprite_name(
"Drag Image2", "Drop")
assert image_source == image_source_drop_zone
def test_begin_move_end_touch(self):
alt_object1 = self.altdriver.find_object(By.NAME, "Drag Image1")
alt_object2 = self.altdriver.find_object(By.NAME, "Drop Box1")
id = self.altdriver.begin_touch(alt_object1.get_screen_position())
self.altdriver.move_touch(id, alt_object2.get_screen_position())
self.altdriver.end_touch(id)
imageSource = alt_object1.get_component_property(
"UnityEngine.UI.Image", "sprite.name", "UnityEngine.UI")
imageSourceDropZone = self.altdriver.find_object(By.NAME, "Drop Image").get_component_property(
"UnityEngine.UI.Image", "sprite.name", "UnityEngine.UI")
assert imageSource == imageSourceDropZone
|
2,799 |
test srmr
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Dict
import pytest
import torch
from srmrpy import srmr as srmrpy_srmr
from torch import Tensor
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio
from torchmetrics.utilities.imports import _TORCHAUDIO_GREATER_EQUAL_0_10
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
preds = torch.rand(2, 2, 8000)
def _ref_metric_batch(preds: Tensor, target: Tensor, fs: int, fast: bool, norm: bool, **kwargs: Dict[str, Any]):
# shape: preds [BATCH_SIZE, Time]
shape = preds.shape
preds = preds.reshape(1, -1) if len(shape) == 1 else preds.reshape(-1, shape[-1])
n_batch, time = preds.shape
preds = preds.detach().cpu().numpy()
score = []
for b in range(preds.shape[0]):
val, _ = srmrpy_srmr(preds[b, ...], fs=fs, fast=fast, norm=norm, max_cf=128 if not norm else 30)
score.append(val)
score = torch.tensor(score)
return score.reshape(*shape[:-1])
def _average_metric(preds, target, metric_func, **kwargs: Dict[str, Any]):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target, **kwargs).mean()
def _speech_reverberation_modulation_energy_ratio_cheat(preds, target, **kwargs: Dict[str, Any]):
# cheat the MetricTester as the speech_reverberation_modulation_energy_ratio doesn't need target
return speech_reverberation_modulation_energy_ratio(preds, **kwargs)
class _SpeechReverberationModulationEnergyRatioCheat(SpeechReverberationModulationEnergyRatio):
# cheat the MetricTester as SpeechReverberationModulationEnergyRatioCheat doesn't need target
def update(self, preds: Tensor, target: Tensor) -> None:
super().update(preds=preds)
@pytest.mark.skipif(not _TORCHAUDIO_GREATER_EQUAL_0_10, reason="torchaudio>=0.10.0 is required")
@pytest.mark.parametrize(
"preds, fs, fast, norm",
[
(preds, 8000, False, False),
(preds, 8000, False, True),
(preds, 8000, True, False),
(preds, 8000, True, True),
(preds, 16000, False, False),
(preds, 16000, False, True),
(preds, 16000, True, False),
(preds, 16000, True, True),
],
)
class TestSRMR(MetricTester):
"""Test class for `SpeechReverberationModulationEnergyRatio` metric."""
atol = 5e-2
@pytest.mark.parametrize("ddp", [True, False])
def METHOD_NAME(self, preds, fs, fast, norm, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds=preds,
target=preds,
metric_class=_SpeechReverberationModulationEnergyRatioCheat,
reference_metric=partial(_average_metric, metric_func=_ref_metric_batch, fs=fs, fast=fast, norm=norm),
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
def test_srmr_functional(self, preds, fs, fast, norm):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=preds,
metric_functional=_speech_reverberation_modulation_energy_ratio_cheat,
reference_metric=partial(_ref_metric_batch, fs=fs, fast=fast, norm=norm),
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
def test_srmr_differentiability(self, preds, fs, fast, norm):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
if fast is True:
pytest.xfail("SRMR metric is not differentiable when `fast=True`")
else:
pytest.xfail("differentiable test for SRMR metric is skipped as it is too slow")
def test_srmr_half_cpu(self, preds, fs, fast, norm):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SRMR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_srmr_half_gpu(self, preds, fs, fast, norm):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=preds,
metric_module=_SpeechReverberationModulationEnergyRatioCheat,
metric_functional=_speech_reverberation_modulation_energy_ratio_cheat,
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.