id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,100 |
"""Test case to start and stop games."""
from unittest.mock import MagicMock
from mpf.tests.MpfTestCase import MpfTestCase
class MpfGameTestCase(MpfTestCase):
"""Test case for starting and running games.
This is based on ``MpfTestCase`` but adds methods and assertions related
to running games (rather than just testing MPF components or devices).
"""
def __init__(self, methodName):
"""Patch minimal config needed to start a game into the machine config.
This method adds a switch called ``s_start`` with a tag called
``start``.
"""
super().__init__(methodName)
self.machine_config_patches['switches'] = dict()
self.machine_config_patches['switches']['s_start'] = {"number": "", "tags": "start"}
def start_two_player_game(self, start_switch=None):
"""Start two player game."""
self.start_game(start_switch=start_switch)
self.add_player()
def fill_troughs(self):
"""Fill all ball devices tagged with ``trough`` with balls."""
for trough in self.machine.ball_devices.items_tagged("trough"):
for switch in trough.ball_count_handler.counter.config['ball_switches']:
self.hit_switch_and_run(switch.name, 0)
self.advance_time_and_run()
def start_game(self, num_balls_known=None, start_switch=None):
"""Start a game.
This method checks to make sure a game is not running,
then hits and releases the ``s_start`` switch, and
finally checks to make sure a game actually started
properly.
For example:
.. code::
self.start_game()
"""
if num_balls_known is not None:
self.assertNumBallsKnown(num_balls_known)
if start_switch is None:
start_switch = "s_start"
# game start should work
self.assertGameIsNotRunning()
self.hit_and_release_switch(start_switch)
self.advance_time_and_run()
self.assertGameIsRunning()
self.assertEqual(1, self.machine.game.num_players)
self.assertPlayerNumber(1)
def add_player(self):
"""Add a player to the current game.
This method hits and releases a switch called ``s_start``
and then verifies that the player count actually increased
by 1.
You can call this method multiple times to add multiple
players. For example, to start a game and then add 2 additional
players (for 3 players total), you would use:
.. code::
self.start_game()
self.add_player()
self.add_player()
"""
prev_players = self.machine.game.num_players
self.hit_and_release_switch("s_start")
self.advance_time_and_run(1)
self.assertEqual(prev_players + 1, self.machine.game.num_players)
def assertBallNumber(self, number):
"""Asserts that the current ball is a certain ball numebr.
Args:
number: The number to check.
Raises:
Assertion error if there is no game in progress or if
the current ball is not the ball number passed.
The following code will check to make sure the game is on
Ball 1:
.. code::
self.assertBallNumber(1)
"""
self.assertGameIsRunning()
self.assertEqual(number, self.machine.game.player.ball)
def METHOD_NAME(self, balls):
"""Asserts that a certain number of balls are in play.
Note that the number of balls in play is not necessarily the same as
the number of balls on the playfield. (For example, a ball could
be held in a ball device, or the machine could be in the process
of adding a ball to the platfield.)
Args:
balls: The number of balls you want to assert are in
play.
To assert that there are 3 balls in play (perhaps during a multiball),
you would use:
.. code::
self.assertBallsInPlay(3)
"""
self.assertEqual(balls, self.machine.game.balls_in_play)
def drain_all_balls(self):
"""Drain all balls in play."""
drain = self.machine.ball_devices.items_tagged("drain")[0]
for _ in range(self.machine.game.balls_in_play):
self.machine.default_platform.add_ball_to_device(drain)
def drain_one_ball(self):
"""Drain a single ball.
If more than 1 ball is in play, this method will need to
be called once for each ball in order to end the current
ball.
If you want to drain all balls use `drain_all_balls`.
"""
drain = self.machine.ball_devices.items_tagged("drain")[0]
self.machine.default_platform.add_ball_to_device(drain)
def assertPlayerNumber(self, number):
"""Asserts that the current player is a certain player number.
Args:
number: The player number you can to assert is the current
player.
For example, to assert that the current player is Player 2, you
would use:
.. code::
self.assertPlayerNumber(2)
"""
self.assertEqual(number, self.machine.game.player.index + 1)
def assertPlayerCount(self, count):
"""Asserts that count players exist.
Args:
count: The expected number of players.
For example, to assert that the to players are in the game:
.. code::
self.assertPlayerCount(2)
"""
self.assertEqual(count, len(self.machine.game.player_list))
def stop_game(self, stop_time=1):
"""Stop the current game.
This method asserts that a game is running, then call's
the game mode's ``end_game()`` method, then asserts that
the game has successfully stopped.
Example:
.. code::
self.stop_game()
"""
self.assertGameIsRunning()
self.machine.game.end_game()
self.advance_time_and_run(stop_time)
self.assertGameIsNotRunning()
def assertGameIsRunning(self):
"""Assert a game is running.
Example:
.. code::
self.assertGameIsRunning()
"""
self.assertIsNotNone(self.machine.game, "Expected a running game but no game is active.")
def assertGameIsNotRunning(self):
"""Assert a game is not running.
Example:
.. code::
self.assertGameIsNotRunning()
"""
self.assertIsNone(self.machine.game, "Expected game to have ended but game is active.")
| null |
5,101 |
import oauth2 as oauth
from django.conf import settings
from oauth_provider.compat import now
from oauth_provider.store import InvalidConsumerError, InvalidTokenError, Store
from oauth_provider.models import Nonce, Token, Consumer, VERIFIER_SIZE
NONCE_VALID_PERIOD = getattr(settings, "OAUTH_NONCE_VALID_PERIOD", None)
SCOPES = [x[1] for x in settings.OAUTH_SCOPES]
class ModelStore(Store):
"""
Store implementation using the Django models defined in `piston.models`.
"""
def get_consumer(self, request, oauth_request, consumer_key):
try:
# LRS CHANGE - ADDED STATUS OF CONSUMER TO BE ACCEPTED
return Consumer.objects.get(key=consumer_key, status=2)
except Consumer.DoesNotExist:
raise InvalidConsumerError()
def get_consumer_for_request_token(self, request, oauth_request, request_token):
return request_token.consumer
def get_consumer_for_access_token(self, request, oauth_request, access_token):
return access_token.consumer
def create_request_token(self, request, oauth_request, consumer, callback):
# LRS CHANGED - SCOPE NO LONGER A MODEL - JUST USE INITIAL SCOPE DEFAULT FROM WHEN CONSUMER
# WAS CREATED
# LRS CHANGED - IF SPEC GIVEN, UTILS SHOULD PLACE SCOPE IN OAUTH_REQUEST
# CHECK IF THERE AND EQUALS THE CONSUMER SCOPES - IF NOT THROW ERROR
try:
scope = oauth_request.get_parameter('scope')
except oauth.Error:
scope = "statements/write statements/read/mine"
scope_list = scope.split(' ')
if not set(scope_list).issubset(set(SCOPES)):
raise oauth.Error('Scope does not exist.')
token = Token.objects.create_token(
token_type=Token.REQUEST,
consumer=Consumer.objects.get(
key=oauth_request['oauth_consumer_key']),
timestamp=oauth_request['oauth_timestamp'],
scope=scope,
)
token.set_callback(callback)
token.save()
return token
def get_request_token(self, request, oauth_request, request_token_key):
try:
return Token.objects.get(key=request_token_key, token_type=Token.REQUEST)
except Token.DoesNotExist:
raise InvalidTokenError()
def authorize_request_token(self, request, oauth_request, request_token):
request_token.is_approved = True
request_token.user = request.user
request_token.verifier = oauth.generate_verifier(VERIFIER_SIZE)
request_token.save()
return request_token
def create_access_token(self, request, oauth_request, consumer, request_token):
scope = request_token.scope
access_token = Token.objects.create_token(
token_type=Token.ACCESS,
timestamp=oauth_request['oauth_timestamp'],
consumer=Consumer.objects.get(key=consumer.key),
user=request_token.user,
scope=scope,
# LRS CHANGE - KEEP ACCESS TOKEN APPROVAL THE SAME AS REQUEST TOKEN SINCE THIS IS CALLED
# AFTER TOKEN VALIDATION IN THE ACCESS_TOKEN VIEW
is_approved=request_token.is_approved
)
request_token.delete()
return access_token
def get_access_token(self, request, oauth_request, consumer, access_token_key):
try:
# LRS CHANGE - ADDED IS_APPROVED PARAM TO BE SURE
return Token.objects.get(key=access_token_key, token_type=Token.ACCESS, is_approved=True)
except Token.DoesNotExist:
raise InvalidTokenError()
def get_user_for_access_token(self, request, oauth_request, access_token):
return access_token.user
def METHOD_NAME(self, request, oauth_request, consumer):
return consumer.user
def check_nonce(self, request, oauth_request, nonce, timestamp=0):
timestamp = int(timestamp)
if NONCE_VALID_PERIOD and int(now().strftime("%s")) - timestamp > NONCE_VALID_PERIOD:
return False
nonce, created = Nonce.objects.get_or_create(
consumer_key=oauth_request['oauth_consumer_key'],
token_key=oauth_request.get('oauth_token', ''),
key=nonce, timestamp=timestamp,
)
return created
| null |
5,102 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test lite server inference python API.
"""
import mindspore_lite as mslite
import numpy as np
import pytest
# ============================ Context.parallel ============================
def test_context_parallel_workers_num_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.workers_num = "4"
assert "workers_num must be int" in str(raise_info.value)
def test_context_parallel_workers_num_negative_error():
with pytest.raises(ValueError) as raise_info:
context = mslite.Context()
context.parallel.workers_num = -4
assert "workers_num must be a non-negative int" in str(raise_info.value)
def test_context_parallel_config_info_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = 1
assert "config_info must be dict" in str(raise_info.value)
def test_context_parallel_config_info_key_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {1: {"test": "test"}}
assert "config_info_key must be str" in str(raise_info.value)
def test_context_parallel_config_info_value_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": "test"}
assert "config_info_value must be dict" in str(raise_info.value)
def test_context_parallel_config_info_value_key_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": {1: "test"}}
assert "config_info_value_key must be str" in str(raise_info.value)
def test_context_parallel_config_info_value_value_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": {"test": 1}}
assert "config_info_value_value must be str" in str(raise_info.value)
def test_context_parallel_config_path_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_path = 1
assert "config_path must be str" in str(raise_info.value)
def test_context_parallel_config_path_not_exist_error():
with pytest.raises(ValueError) as raise_info:
context = mslite.Context()
context.parallel.config_path = "test.cfg"
assert "config_path does not exist" in str(raise_info.value)
def test_context_parallel():
config_info = {"weight": {"weight_path": "path of model weight"}}
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
assert "workers num:" in str(context.parallel)
context.parallel.config_info = config_info
assert "config info:" in str(context.parallel)
# ============================ ModelParallelRunner ============================
def test_model_parallel_runner():
model_parallel_runner = mslite.ModelParallelRunner()
assert "model_path:" in str(model_parallel_runner)
def test_model_parallel_runner_build_from_file_model_path_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path=["test.ms"])
assert "model_path must be str" in str(raise_info.value)
def test_model_parallel_runner_build_from_file_model_path_not_exist_error():
with pytest.raises(RuntimeError) as raise_info:
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="test.ms")
assert "model_path does not exist" in str(raise_info.value)
def test_model_parallel_runner_build_from_file_01():
model_parallel_runner = mslite.model.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms")
assert "model_path:" in str(model_parallel_runner)
def test_model_parallel_runner_build_from_file_02():
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
model_parallel_runner = mslite.model.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms", context=context)
assert "model_path:" in str(model_parallel_runner)
def get_model_parallel_runner():
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms", context=context)
return model_parallel_runner
def test_model_parallel_runner_predict_inputs_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
outputs = model_parallel_runner.predict(inputs[0])
assert "inputs must be list" in str(raise_info.value)
def test_model_parallel_runner_predict_inputs_elements_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
outputs = model_parallel_runner.predict(["input"])
assert "inputs element must be Tensor" in str(raise_info.value)
def METHOD_NAME():
with pytest.raises(RuntimeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
tensor1 = mslite.Tensor()
tensor2 = mslite.Tensor()
inputs = [tensor1, tensor2]
outputs = model_parallel_runner.predict(inputs)
assert "predict failed" in str(raise_info.value)
def test_model_parallel_runner_predict_01():
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
in_data = np.arange(1 * 224 * 224 * 3, dtype=np.float32).reshape((1, 224, 224, 3))
inputs[0].set_data_from_numpy(in_data)
outputs = model_parallel_runner.predict(inputs)
def test_model_parallel_runner_predict_02():
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
input_tensor = mslite.Tensor()
input_tensor.dtype = inputs[0].dtype
input_tensor.shape = inputs[0].shape
input_tensor.format = inputs[0].format
input_tensor.name = inputs[0].name
in_data = np.arange(1 * 224 * 224 * 3, dtype=np.float32).reshape((1, 224, 224, 3))
input_tensor.set_data_from_numpy(in_data)
outputs = model_parallel_runner.predict([input_tensor])
| null |
5,103 |
from functools import singledispatch
from sympy.core.numbers import pi
from sympy.functions.elementary.trigonometric import tan
from sympy.simplify import trigsimp
from sympy.core import Basic, Tuple
from sympy.core.symbol import _symbol
from sympy.solvers import solve
from sympy.geometry import Point, Segment, Curve, Ellipse, Polygon
from sympy.vector import ImplicitRegion
class ParametricRegion(Basic):
"""
Represents a parametric region in space.
Examples
========
>>> from sympy import cos, sin, pi
>>> from sympy.abc import r, theta, t, a, b, x, y
>>> from sympy.vector import ParametricRegion
>>> ParametricRegion((t, t**2), (t, -1, 2))
ParametricRegion((t, t**2), (t, -1, 2))
>>> ParametricRegion((x, y), (x, 3, 4), (y, 5, 6))
ParametricRegion((x, y), (x, 3, 4), (y, 5, 6))
>>> ParametricRegion((r*cos(theta), r*sin(theta)), (r, -2, 2), (theta, 0, pi))
ParametricRegion((r*cos(theta), r*sin(theta)), (r, -2, 2), (theta, 0, pi))
>>> ParametricRegion((a*cos(t), b*sin(t)), t)
ParametricRegion((a*cos(t), b*sin(t)), t)
>>> circle = ParametricRegion((r*cos(theta), r*sin(theta)), r, (theta, 0, pi))
>>> circle.parameters
(r, theta)
>>> circle.definition
(r*cos(theta), r*sin(theta))
>>> circle.limits
{theta: (0, pi)}
Dimension of a parametric region determines whether a region is a curve, surface
or volume region. It does not represent its dimensions in space.
>>> circle.dimensions
1
Parameters
==========
definition : tuple to define base scalars in terms of parameters.
bounds : Parameter or a tuple of length 3 to define parameter and corresponding lower and upper bound.
"""
def __new__(cls, definition, *bounds):
METHOD_NAME = ()
limits = {}
if not isinstance(bounds, Tuple):
bounds = Tuple(*bounds)
for bound in bounds:
if isinstance(bound, (tuple, Tuple)):
if len(bound) != 3:
raise ValueError("Tuple should be in the form (parameter, lowerbound, upperbound)")
METHOD_NAME += (bound[0],)
limits[bound[0]] = (bound[1], bound[2])
else:
METHOD_NAME += (bound,)
if not isinstance(definition, (tuple, Tuple)):
definition = (definition,)
obj = super().__new__(cls, Tuple(*definition), *bounds)
obj._parameters = METHOD_NAME
obj._limits = limits
return obj
@property
def definition(self):
return self.args[0]
@property
def limits(self):
return self._limits
@property
def METHOD_NAME(self):
return self._parameters
@property
def dimensions(self):
return len(self.limits)
@singledispatch
def parametric_region_list(reg):
"""
Returns a list of ParametricRegion objects representing the geometric region.
Examples
========
>>> from sympy.abc import t
>>> from sympy.vector import parametric_region_list
>>> from sympy.geometry import Point, Curve, Ellipse, Segment, Polygon
>>> p = Point(2, 5)
>>> parametric_region_list(p)
[ParametricRegion((2, 5))]
>>> c = Curve((t**3, 4*t), (t, -3, 4))
>>> parametric_region_list(c)
[ParametricRegion((t**3, 4*t), (t, -3, 4))]
>>> e = Ellipse(Point(1, 3), 2, 3)
>>> parametric_region_list(e)
[ParametricRegion((2*cos(t) + 1, 3*sin(t) + 3), (t, 0, 2*pi))]
>>> s = Segment(Point(1, 3), Point(2, 6))
>>> parametric_region_list(s)
[ParametricRegion((t + 1, 3*t + 3), (t, 0, 1))]
>>> p1, p2, p3, p4 = [(0, 1), (2, -3), (5, 3), (-2, 3)]
>>> poly = Polygon(p1, p2, p3, p4)
>>> parametric_region_list(poly)
[ParametricRegion((2*t, 1 - 4*t), (t, 0, 1)), ParametricRegion((3*t + 2, 6*t - 3), (t, 0, 1)),\
ParametricRegion((5 - 7*t, 3), (t, 0, 1)), ParametricRegion((2*t - 2, 3 - 2*t), (t, 0, 1))]
"""
raise ValueError("SymPy cannot determine parametric representation of the region.")
@parametric_region_list.register(Point)
def _(obj):
return [ParametricRegion(obj.args)]
@parametric_region_list.register(Curve) # type: ignore
def _(obj):
definition = obj.arbitrary_point(obj.parameter).args
bounds = obj.limits
return [ParametricRegion(definition, bounds)]
@parametric_region_list.register(Ellipse) # type: ignore
def _(obj, parameter='t'):
definition = obj.arbitrary_point(parameter).args
t = _symbol(parameter, real=True)
bounds = (t, 0, 2*pi)
return [ParametricRegion(definition, bounds)]
@parametric_region_list.register(Segment) # type: ignore
def _(obj, parameter='t'):
t = _symbol(parameter, real=True)
definition = obj.arbitrary_point(t).args
for i in range(0, 3):
lower_bound = solve(definition[i] - obj.points[0].args[i], t)
upper_bound = solve(definition[i] - obj.points[1].args[i], t)
if len(lower_bound) == 1 and len(upper_bound) == 1:
bounds = t, lower_bound[0], upper_bound[0]
break
definition_tuple = obj.arbitrary_point(parameter).args
return [ParametricRegion(definition_tuple, bounds)]
@parametric_region_list.register(Polygon) # type: ignore
def _(obj, parameter='t'):
l = [parametric_region_list(side, parameter)[0] for side in obj.sides]
return l
@parametric_region_list.register(ImplicitRegion) # type: ignore
def _(obj, METHOD_NAME=('t', 's')):
definition = obj.rational_parametrization(METHOD_NAME)
bounds = []
for i in range(len(obj.variables) - 1):
# Each parameter is replaced by its tangent to simplify intergation
parameter = _symbol(METHOD_NAME[i], real=True)
definition = [trigsimp(elem.subs(parameter, tan(parameter/2))) for elem in definition]
bounds.append((parameter, 0, 2*pi),)
definition = Tuple(*definition)
return [ParametricRegion(definition, *bounds)]
| null |
5,104 |
import functools
import os
import os.path
import random
import re
import socket
import tarfile
import tempfile
import time
import docker
import paramiko
import pytest
def make_tree(dirs, files):
base = tempfile.mkdtemp()
for path in dirs:
os.makedirs(os.path.join(base, path))
for path in files:
with open(os.path.join(base, path), 'w') as f:
f.write("content")
return base
def simple_tar(path):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
abs_path = os.path.abspath(path)
t.add(abs_path, arcname=os.path.basename(path), recursive=False)
t.close()
f.seek(0)
return f
def untar_file(tardata, filename):
with tarfile.open(mode='r', fileobj=tardata) as t:
f = t.extractfile(filename)
result = f.read()
f.close()
return result
def skip_if_desktop():
def fn(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
info = self.client.info()
if info['Name'] == 'docker-desktop':
pytest.skip('Test does not support Docker Desktop')
return f(self, *args, **kwargs)
return wrapped
return fn
def requires_api_version(version):
test_version = os.environ.get(
'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
)
return pytest.mark.skipif(
docker.utils.version_lt(test_version, version),
reason=f"API version is too low (< {version})"
)
def requires_experimental(until=None):
test_version = os.environ.get(
'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
)
def req_exp(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if not self.client.info()['ExperimentalBuild']:
pytest.skip('Feature requires Docker Engine experimental mode')
return f(self, *args, **kwargs)
if until and docker.utils.version_gte(test_version, until):
return f
return wrapped
return req_exp
def wait_on_condition(condition, delay=0.1, timeout=40):
start_time = time.time()
while not condition():
if time.time() - start_time > timeout:
raise AssertionError(f"Timeout: {condition}")
time.sleep(delay)
def random_name():
return f'dockerpytest_{random.getrandbits(64):x}'
def force_leave_swarm(client):
"""Actually force leave a Swarm. There seems to be a bug in Swarm that
occasionally throws "context deadline exceeded" errors when leaving."""
while True:
try:
if isinstance(client, docker.DockerClient):
return client.swarm.leave(force=True)
return client.leave_swarm(force=True) # elif APIClient
except docker.errors.APIError as e:
if e.explanation == "context deadline exceeded":
continue
else:
return
def METHOD_NAME():
return f'0.0.0.0:{random.randrange(10000, 25000)}'
def assert_cat_socket_detached_with_keys(sock, inputs):
if hasattr(sock, '_sock'):
sock = sock._sock
for i in inputs:
sock.sendall(i)
time.sleep(0.5)
# If we're using a Unix socket, the sock.send call will fail with a
# BrokenPipeError ; INET sockets will just stop receiving / sending data
# but will not raise an error
if isinstance(sock, paramiko.Channel):
with pytest.raises(OSError):
sock.sendall(b'make sure the socket is closed\n')
else:
if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1):
# We do not want to use pytest.raises here because future versions
# of the daemon no longer cause this to raise an error.
try:
sock.sendall(b'make sure the socket is closed\n')
except OSError:
return
sock.sendall(b"make sure the socket is closed\n")
data = sock.recv(128)
# New in 18.06: error message is broadcast over the socket when reading
# after detach
assert data == b'' or data.startswith(
b'exec attach failed: error on attach stdin: read escape sequence'
)
def ctrl_with(char):
if re.match('[a-z]', char):
return chr(ord(char) - ord('a') + 1).encode('ascii')
else:
raise Exception('char must be [a-z]')
| null |
5,105 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from pgadmin.browser.server_groups.servers.databases.extensions.tests import \
utils as extension_utils
from pgadmin.browser.server_groups.servers.databases.foreign_data_wrappers. \
foreign_servers.tests import utils as fsrv_utils
from pgadmin.browser.server_groups.servers.databases.foreign_data_wrappers.\
tests import utils as fdw_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as um_utils
from unittest.mock import patch
class UserMappingGetTestCase(BaseTestGenerator):
"""This class will add user mapping under foreign server node."""
scenarios = utils.generate_scenarios('user_mapping_get',
um_utils.test_cases)
def setUp(self):
""" This function will create extension and foreign data wrapper."""
super().setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.extension_name = "cube"
self.fdw_name = "fdw_%s" % (str(uuid.uuid4())[1:8])
self.fsrv_name = "fsrv_%s" % (str(uuid.uuid4())[1:8])
self.extension_id = extension_utils.create_extension(
self.server, self.db_name, self.extension_name, self.schema_name)
self.fdw_id = fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_name)
self.fsrv_id = fsrv_utils.create_fsrv(self.server, self.db_name,
self.fsrv_name, self.fdw_name)
self.um_id = um_utils.create_user_mapping(self.server, self.db_name,
self.fsrv_name)
def get_user_mapping(self):
"""
This function returns the user mapping get response
:return: user mapping get response
"""
return self.tester.get(self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.fdw_id) + '/' +
str(self.fsrv_id) + '/' +
str(self.um_id),
content_type='html/json')
def METHOD_NAME(self):
"""
This functions returns the user mapping list
:return: user mapping list
"""
return self.tester.get(self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.fdw_id) + "/" +
str(self.fsrv_id) + '/',
content_type='html/json')
def runTest(self):
"""This function will update foreign server present under test
database."""
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_name)
if not fdw_response:
raise Exception("Could not find FDW.")
fsrv_response = fsrv_utils.verify_fsrv(self.server, self.db_name,
self.fsrv_name)
if not fsrv_response:
raise Exception("Could not find FSRV.")
if self.is_positive_test:
if hasattr(self, "um_list"):
response = self.METHOD_NAME()
else:
response = self.get_user_mapping()
else:
if hasattr(self, "error_fetching_um"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
if hasattr(self, "um_list"):
response = self.METHOD_NAME()
else:
response = self.get_user_mapping()
if hasattr(self, "wrong_um_id"):
self.um_id = 99999
response = self.get_user_mapping()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function disconnect the test database and drop added extension
and dependant objects."""
extension_utils.drop_extension(self.server, self.db_name,
self.extension_name)
database_utils.disconnect_database(self, self.server_id, self.db_id)
| null |
5,106 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops import _constants as Constants
mul = P.Mul()
add = P.Add()
square = P.Square()
sqrt = P.Sqrt()
real_div = P.RealDiv()
sub = P.Sub()
make_tuple = Primitive('MakeTuple')
tuple_getitem = Primitive(Constants.kTupleGetItem)
adam_apply_one_with_decay = Primitive('AdamApplyOneWithDecay')
class FnDict:
def __init__(self):
self.fn_dict = {}
def __call__(self, fn):
self.fn_dict[fn.__name__] = fn
def __getitem__(self, name):
return self.fn_dict.get(name)
def test_adam_apply_one_with_decay_rule_dyn(tag):
"""
Feature: test AdamApplyOneWithDecay dynamic shape
Description: The input shape is dynamic
Expectation: Assert that result is error
"""
fns = FnDict()
@fns
def before_cond1(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
mul0 = mul(mul0_x, input2)
mul1 = mul(mul1_x, input0)
square0 = square(input0)
add0 = add(mul0, mul1)
mul2 = mul(mul2_x, input1)
mul3 = mul(mul3_x, square0)
add1 = add(mul2, mul3)
sqrt0 = sqrt(add1)
add2 = add(add2_y, sqrt0)
mul4 = mul(mul4_x, input3)
real_div0 = real_div(add0, add2)
add3 = add(mul4, real_div0)
mul5 = mul(input4, add3)
sub0 = sub(input3, mul5)
return make_tuple(add1, add0, sub0)
@fns
def METHOD_NAME(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
mul0 = mul(input2, mul0_x)
mul1 = mul(input0, mul1_x)
square0 = square(input0)
add0 = add(mul0, mul1)
mul2 = mul(input1, mul2_x)
mul3 = mul(mul3_x, square0)
add1 = add(mul2, mul3)
sqrt0 = sqrt(add1)
add2 = add(sqrt0, add2_y)
mul4 = mul(input3, mul4_x)
real_div0 = real_div(add0, add2)
add3 = add(mul4, real_div0)
mul5 = mul(add3, input4)
sub0 = sub(input3, mul5)
return make_tuple(add1, add0, sub0)
@fns
def before_cond3(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
mul0 = mul(mul0_x, input2)
mul1 = mul(mul1_x, input0)
square0 = square(input0)
add0 = add(mul0, mul1)
mul2 = mul(mul2_x, input1)
mul3 = mul(square0, mul3_x)
add1 = add(mul2, mul3)
sqrt0 = sqrt(add1)
add2 = add(sqrt0, add2_y)
mul4 = mul(mul4_x, input3)
real_div0 = real_div(add0, add2)
add3 = add(mul4, real_div0)
mul5 = mul(add3, input4)
sub0 = sub(input3, mul5)
return make_tuple(add1, add0, sub0)
@fns
def before_cond4(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
mul0 = mul(mul0_x, input2)
mul1 = mul(mul1_x, input0)
square0 = square(input0)
add0 = add(mul0, mul1)
mul2 = mul(mul2_x, input1)
mul3 = mul(mul3_x, square0)
add1 = add(mul2, mul3)
sqrt0 = sqrt(add1)
add2 = add(add2_y, sqrt0)
mul4 = mul(mul4_x, input3)
real_div0 = real_div(add0, add2)
add3 = add(mul4, real_div0)
mul5 = mul(add3, input4)
sub0 = sub(input3, mul5)
return make_tuple(add1, add0, sub0)
@fns
def before_cond5(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
mul0 = mul(mul0_x, input2)
mul1 = mul(mul1_x, input0)
square0 = square(input0)
add0 = add(mul0, mul1)
mul2 = mul(mul2_x, input1)
mul3 = mul(mul3_x, square0)
add1 = add(mul2, mul3)
sqrt0 = sqrt(add1)
add2 = add(sqrt0, add2_y)
mul4 = mul(mul4_x, input3)
real_div0 = real_div(add0, add2)
add3 = add(mul4, real_div0)
mul5 = mul(add3, input4)
sub0 = sub(input3, mul5)
return make_tuple(add1, add0, sub0)
@fns
def after(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y):
res = adam_apply_one_with_decay(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, mul4_x,
add2_y)
item0 = tuple_getitem(res, 0)
item1 = tuple_getitem(res, 1)
item2 = tuple_getitem(res, 2)
return make_tuple(make_tuple(item0, item1, item2))
return fns[tag]
| null |
5,107 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
test msssim
"""
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
class MSSSIMNet(nn.Cell):
def __init__(self, max_val=1.0, power_factors=_MSSSIM_WEIGHTS, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
super(MSSSIMNet, self).__init__()
self.net = nn.MSSSIM(max_val, power_factors, filter_size, filter_sigma, k1, k2)
def construct(self, img1, img2):
return self.net(img1, img2)
def METHOD_NAME():
factors = (0.033, 0.033, 0.033)
net = MSSSIMNet(power_factors=factors)
img1 = Tensor(np.random.random((8, 3, 128, 128)))
img2 = Tensor(np.random.random((8, 3, 128, 128)))
_cell_graph_executor.compile(net, img1, img2)
def test_compile_grayscale():
max_val = 255
factors = (0.033, 0.033, 0.033)
net = MSSSIMNet(max_val=max_val, power_factors=factors)
img1 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
img2 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_max_val_negative():
max_val = -1
with pytest.raises(ValueError):
_ = MSSSIMNet(max_val)
def test_msssim_max_val_bool():
max_val = True
with pytest.raises(TypeError):
_ = MSSSIMNet(max_val)
def test_msssim_max_val_zero():
max_val = 0
with pytest.raises(ValueError):
_ = MSSSIMNet(max_val)
def test_msssim_power_factors_set():
with pytest.raises(TypeError):
_ = MSSSIMNet(power_factors={0.033, 0.033, 0.033})
def test_msssim_filter_size_float():
with pytest.raises(TypeError):
_ = MSSSIMNet(filter_size=1.1)
def test_msssim_filter_size_zero():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_size=0)
def test_msssim_filter_sigma_zero():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_sigma=0.0)
def test_msssim_filter_sigma_negative():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_sigma=-0.1)
def test_msssim_different_shape():
shape_1 = (8, 3, 128, 128)
shape_2 = (8, 3, 256, 256)
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random(shape_1))
img2 = Tensor(np.random.random(shape_2))
net = MSSSIMNet(power_factors=factors)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_different_dtype():
dtype_1 = mstype.float32
dtype_2 = mstype.float16
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_1)
img2 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_2)
net = MSSSIMNet(power_factors=factors)
with pytest.raises(TypeError):
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_invalid_5d_input():
shape_1 = (8, 3, 128, 128)
shape_2 = (8, 3, 256, 256)
invalid_shape = (8, 3, 128, 128, 1)
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random(shape_1))
invalid_img1 = Tensor(np.random.random(invalid_shape))
img2 = Tensor(np.random.random(shape_2))
invalid_img2 = Tensor(np.random.random(invalid_shape))
net = MSSSIMNet(power_factors=factors)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, invalid_img1, img2)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, img1, invalid_img2)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, invalid_img1, invalid_img2)
| null |
5,108 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for retrieving paths for various types of artifacts."""
import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
_OLD_EVAL_MODEL_DIR = 'eval_model_dir'
_OLD_SERVING_MODEL_DIR = 'serving_model_dir'
"""Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir, eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
For generic trainer with Keras, there won't be eval model:
|-- <ModelExportPath>
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
TODO(b/160795287): Deprecate estimator based executor.
Support for estimator-based executor and model export will be
deprecated soon. The following estimator working directory
structure is still supported for backwards compatibility:
Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir
|-- <timestamped model> <- eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir
|-- export
|-- <exporter name>
|-- <timestamped model> <- serving_model_path
|-- saved_model.pb
|-- ...
|-- ...
"""
def is_old_model_artifact(model_artifact: artifact.Artifact) -> bool:
"""Check whether the model artifact is generated by old TFX version."""
if model_artifact.type != standard_artifacts.Model:
absl.logging.warning(f'Artifact type is not Model: {model_artifact.type}.')
return artifact_utils.is_artifact_version_older_than(
model_artifact, artifact_utils._ARTIFACT_VERSION_FOR_MODEL_UPDATE) # pylint: disable=protected-access
def eval_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for evaluation purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_EVAL_MODEL_DIR)
return os.path.join(output_uri, path_constants.EVAL_MODEL_DIR)
def eval_model_path(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns final path to exported model for evaluation purpose."""
model_dir = eval_model_dir(output_uri, is_old_artifact)
model_file = os.path.join(model_dir, 'saved_model.pb')
if fileio.exists(model_file):
return model_dir
elif fileio.exists(model_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning('Support for estimator-based executor and model'
' export will be deprecated soon. Please use'
' export structure '
'<ModelExportPath>/eval_model_dir/saved_model.pb"')
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If eval model doesn't exist, use serving model for eval.
return serving_model_path(output_uri, is_old_artifact)
def serving_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for serving purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_SERVING_MODEL_DIR)
return os.path.join(output_uri, path_constants.SERVING_MODEL_DIR)
def METHOD_NAME(output_uri: str,
model_type: str,
is_old_artifact: bool = False) -> str:
"""Returns directly for exported model depending on model_type."""
if model_type == path_constants.TFMA_EVAL:
return eval_model_dir(output_uri, is_old_artifact)
else:
return serving_model_dir(output_uri, is_old_artifact)
def serving_model_path(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns path for exported serving model."""
model_dir = serving_model_dir(output_uri, is_old_artifact)
export_dir = os.path.join(model_dir, 'export')
if fileio.exists(export_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning(
'Support for estimator-based executor and model export'
' will be deprecated soon. Please use export structure '
'<ModelExportPath>/serving_model_dir/saved_model.pb"')
model_dir = io_utils.get_only_uri_in_dir(export_dir)
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If dir doesn't match estimator structure, use serving model root directly.
return model_dir
def stamped_model_path(output_uri: str) -> str:
"""Returns path for the stamped model."""
return os.path.join(output_uri, path_constants.STAMPED_MODEL_DIR)
def warmup_file_path(saved_model_path: str) -> str:
"""Returns SavedModel Warmup file path.
See https://www.tensorflow.org/tfx/serving/saved_model_warmup.
This is a lexical operation, and does not guarantee the path is valid.
Args:
saved_model_path: A POSIX path to the TensorFlow SavedModel.
Returns:
A POSIX path to the SavedModel Warmup file.
"""
return os.path.join(
saved_model_path,
'assets.extra',
'tf_serving_warmup_requests')
| null |
5,109 |
import logging
import os
import plistlib
import traceback
from zipfile import ZipFile
import click
import IPython
from pygments import formatters, highlight, lexers
from remotezip import RemoteZip
from pymobiledevice3 import usbmux
from pymobiledevice3.METHOD_NAME.cli_common import print_json, set_verbosity
from pymobiledevice3.exceptions import ConnectionFailedError, IncorrectModeError
from pymobiledevice3.irecv import IRecv
from pymobiledevice3.lockdown import LockdownClient, create_using_usbmux
from pymobiledevice3.restore.device import Device
from pymobiledevice3.restore.recovery import Behavior, Recovery
from pymobiledevice3.restore.restore import Restore
from pymobiledevice3.services.diagnostics import DiagnosticsService
SHELL_USAGE = """
# use `irecv` variable to access Restore mode API
# for example:
print(irecv.getenv('build-version'))
"""
logger = logging.getLogger(__name__)
class Command(click.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params[:0] = [
click.Option(('device', '--ecid'), type=click.INT, callback=self.device),
click.Option(('verbosity', '-v', '--verbose'), count=True, callback=set_verbosity, expose_value=False),
]
@staticmethod
def device(ctx, param, value):
if '_PYMOBILEDEVICE3_COMPLETE' in os.environ:
# prevent lockdown connection establishment when in autocomplete mode
return
ecid = value
logger.debug('searching among connected devices via lockdownd')
for device in usbmux.list_devices():
try:
lockdown = create_using_usbmux(serial=device.serial, connection_type='USB')
except (ConnectionFailedError, IncorrectModeError):
continue
if (ecid is None) or (lockdown.ecid == value):
logger.debug('found device')
return lockdown
else:
continue
logger.debug('waiting for device to be available in Recovery mode')
return IRecv(ecid=ecid)
@click.group()
def METHOD_NAME():
""" cli """
pass
@METHOD_NAME.group()
def restore():
""" restore options """
pass
@restore.command('shell', cls=Command)
def restore_shell(device):
""" create an IPython shell for interacting with iBoot """
IPython.embed(
header=highlight(SHELL_USAGE, lexers.PythonLexer(), formatters.TerminalTrueColorFormatter(style='native')),
user_ns={
'irecv': device,
})
@restore.command('enter', cls=Command)
def restore_enter(device):
""" enter Recovery mode """
if isinstance(device, LockdownClient):
device.enter_recovery()
@restore.command('exit')
def restore_exit():
""" exit Recovery mode """
irecv = IRecv()
irecv.set_autoboot(True)
irecv.reboot()
@restore.command('restart', cls=Command)
def restore_restart(device):
""" restarts device """
if isinstance(device, LockdownClient):
with DiagnosticsService(device) as diagnostics:
diagnostics.restart()
else:
device.reboot()
@restore.command('tss', cls=Command)
@click.argument('ipsw')
@click.argument('out', type=click.File('wb'), required=False)
@click.option('--color/--no-color', default=True)
def restore_tss(device, ipsw, out, color):
""" query SHSH blobs """
lockdown = None
irecv = None
if isinstance(device, LockdownClient):
lockdown = device
elif isinstance(device, IRecv):
irecv = device
if ipsw.startswith('http://') or ipsw.startswith('https://'):
ipsw = RemoteZip(ipsw)
else:
ipsw = ZipFile(ipsw)
device = Device(lockdown=lockdown, irecv=irecv)
tss = Recovery(ipsw, device).fetch_tss_record()
if out:
plistlib.dump(tss, out)
print_json(tss, colored=color)
@restore.command('ramdisk', cls=Command)
@click.argument('ipsw')
@click.option('--tss', type=click.File('rb'))
def restore_ramdisk(device, ipsw, tss):
"""
don't perform an actual restore. just enter the update ramdisk
ipsw can be either a filename or an url
"""
if tss:
tss = plistlib.load(tss)
if ipsw.startswith('http://') or ipsw.startswith('https://'):
ipsw = RemoteZip(ipsw)
else:
ipsw = ZipFile(ipsw)
lockdown = None
irecv = None
if isinstance(device, LockdownClient):
lockdown = device
elif isinstance(device, IRecv):
irecv = device
device = Device(lockdown=lockdown, irecv=irecv)
Recovery(ipsw, device, tss=tss).boot_ramdisk()
@restore.command('update', cls=Command)
@click.argument('ipsw')
@click.option('--tss', type=click.File('rb'))
@click.option('--erase', is_flag=True, help='use the Erase BuildIdentity (full factory-reset)')
@click.option('--ignore-fdr', is_flag=True, help='only establish an FDR service connection, but don\'t proxy any '
'traffic')
def restore_update(device, ipsw: str, tss, erase, ignore_fdr):
"""
perform an update
ipsw can be either a filename or an url
"""
if tss:
tss = plistlib.load(tss)
if ipsw.startswith('http://') or ipsw.startswith('https://'):
ipsw = RemoteZip(ipsw)
else:
ipsw = ZipFile(ipsw)
lockdown = None
irecv = None
if isinstance(device, LockdownClient):
lockdown = device
elif isinstance(device, IRecv):
irecv = device
device = Device(lockdown=lockdown, irecv=irecv)
behavior = Behavior.Update
if erase:
behavior = Behavior.Erase
try:
Restore(ipsw, device, tss=tss, behavior=behavior, ignore_fdr=ignore_fdr).update()
except Exception:
# click may "swallow" several exception types so we try to catch them all here
traceback.print_exc()
raise
| null |
5,110 |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing fill op
"""
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms as data_trans
def test_fillop_basic():
"""
Feature: Fill op
Description: Test Fill op basic usage (positive int onto an array of uint8)
Expectation: Output is equal to the expected output
"""
def gen():
yield (np.array([4, 5, 6, 7], dtype=np.uint8),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill(3)
data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array([3, 3, 3, 3], dtype=np.uint8)
for data_row in data:
np.testing.assert_array_equal(data_row[0].asnumpy(), expected)
def METHOD_NAME():
"""
Feature: Fill op
Description: Test Fill op with a negative number onto an array of unsigned int8
Expectation: Output is equal to the expected output
"""
def gen():
yield (np.array([4, 5, 6, 7], dtype=np.uint8),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill(-3)
data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array([253, 253, 253, 253], dtype=np.uint8)
for data_row in data:
np.testing.assert_array_equal(data_row[0].asnumpy(), expected)
def test_fillop_up_type_cast():
"""
Feature: Fill op
Description: Test Fill op with a int onto an array of floats
Expectation: Output is equal to the expected output
"""
def gen():
yield (np.array([4, 5, 6, 7], dtype=np.float),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill(3)
data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array([3., 3., 3., 3.], dtype=np.float)
for data_row in data:
np.testing.assert_array_equal(data_row[0].asnumpy(), expected)
def test_fillop_string():
"""
Feature: Fill op
Description: Test Fill op with a string onto an array of strings
Expectation: Output is equal to the expected output
"""
def gen():
yield (np.array(["45555", "45555"], dtype=np.str_),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill("error")
data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array(['error', 'error'], dtype=np.str_)
for data_row in data.create_tuple_iterator(num_epochs=1, output_numpy=True):
np.testing.assert_array_equal(data_row[0], expected)
def test_fillop_bytes():
"""
Feature: Fill op
Description: Test Fill op with bytes onto an array of strings
Expectation: Output is equal to the expected output
"""
def gen():
yield (np.array(["A", "B", "C"], dtype=np.bytes_),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill(b'abc')
data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array([b'abc', b'abc', b'abc'], dtype=np.bytes_)
for data_row in data.create_tuple_iterator(num_epochs=1, output_numpy=True):
np.testing.assert_array_equal(data_row[0], expected)
def test_fillop_error_handling():
"""
Feature: Fill op
Description: Test Fill op with a mismatch data type (string onto an array of ints)
Expectation: Error is raised as expected
"""
def gen():
yield (np.array([4, 4, 4, 4]),)
data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = data_trans.Fill("words")
data = data.map(operations=fill_op, input_columns=["col"])
with pytest.raises(RuntimeError) as error_info:
for _ in data:
pass
assert "fill_value and the input tensor must be of the same data type" in str(error_info.value)
if __name__ == "__main__":
test_fillop_basic()
test_fillop_up_type_cast()
METHOD_NAME()
test_fillop_string()
test_fillop_bytes()
test_fillop_error_handling()
| null |
5,111 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import MatrixWorkspace, Run
from mantid.simpleapi import SANSILLReduction, config, mtd
from mantid.geometry import Instrument
import numpy as np
class SANSILLReductionTest(unittest.TestCase):
_facility = None
_instrument = None
@classmethod
def setUpClass(cls):
cls._data_search_dirs = config["datasearch.directories"]
cls._facility = config["default.facility"]
cls._instrument = config["default.instrument"]
config.appendDataSearchSubDir("ILL/D11/")
config.appendDataSearchSubDir("ILL/D16/")
config.appendDataSearchSubDir("ILL/D33/")
config["default.facility"] = "ILL"
config["default.instrument"] = "D11"
def METHOD_NAME(self):
mtd.clear()
@classmethod
def tearDownClass(cls):
config["default.facility"] = cls._facility
config["default.instrument"] = cls._instrument
config["datasearch.directories"] = cls._data_search_dirs
def test_absorber(self):
SANSILLReduction(Run="010462", ProcessAs="Absorber", OutputWorkspace="Cd", Version=1)
self._check_output(mtd["Cd"], True, 1, 128 * 128 + 2)
self._check_process_flag(mtd["Cd"], "Absorber")
def test_beam(self):
SANSILLReduction(Run="010414", ProcessAs="Beam", OutputWorkspace="Db", FluxOutputWorkspace="Fl", Version=1)
self._check_output(mtd["Db"], True, 1, 128 * 128 + 2)
self._check_process_flag(mtd["Db"], "Beam")
run = mtd["Db"].getRun()
self.assertAlmostEqual(run.getLogData("BeamCenterX").value, 0.0048, delta=1e-4)
self.assertAlmostEqual(run.getLogData("BeamCenterY").value, -0.0027, delta=1e-4)
self._check_output(mtd["Fl"], False, 1, 128 * 128 + 2)
self._check_process_flag(mtd["Fl"], "Beam")
self.assertAlmostEqual(mtd["Fl"].readY(0)[0], 6628249, delta=1)
self.assertAlmostEqual(mtd["Fl"].readE(0)[0], 8566, delta=1)
def test_transmission(self):
SANSILLReduction(Run="010414", ProcessAs="Beam", OutputWorkspace="Db", Version=1)
SANSILLReduction(Run="010585", ProcessAs="Transmission", BeamInputWorkspace="Db", OutputWorkspace="Tr", Version=1)
self.assertAlmostEqual(mtd["Tr"].readY(0)[0], 0.642, delta=1e-3)
self.assertAlmostEqual(mtd["Tr"].readE(0)[0], 0.0019, delta=1e-4)
self._check_process_flag(mtd["Tr"], "Transmission")
def test_container(self):
SANSILLReduction(Run="010460", ProcessAs="Container", OutputWorkspace="can", Version=1)
self._check_output(mtd["can"], True, 1, 128 * 128 + 2)
self._check_process_flag(mtd["can"], "Container")
def test_reference(self):
SANSILLReduction(Run="010453", ProcessAs="Sample", SensitivityOutputWorkspace="sens", OutputWorkspace="water", Version=1)
self._check_output(mtd["water"], True, 1, 128 * 128 + 2)
self._check_output(mtd["sens"], False, 1, 128 * 128 + 2)
self._check_process_flag(mtd["water"], "Sample")
self._check_process_flag(mtd["sens"], "Sensitivity")
def test_sample(self):
SANSILLReduction(Run="010569", ProcessAs="Sample", OutputWorkspace="sample", Version=1)
self._check_output(mtd["sample"], True, 1, 128 * 128 + 2)
self._check_process_flag(mtd["sample"], "Sample")
def test_absorber_tof(self):
# D33 VTOF
# actually this is a container run, not an absorber, but is fine for this test
SANSILLReduction(Run="093409", ProcessAs="Absorber", OutputWorkspace="absorber", Version=1)
self._check_output(mtd["absorber"], True, 30, 256 * 256 + 2)
self._check_process_flag(mtd["absorber"], "Absorber")
def test_beam_tof(self):
# D33 VTOF
SANSILLReduction(Run="093406", ProcessAs="Beam", OutputWorkspace="beam", FluxOutputWorkspace="flux", Version=1)
self._check_output(mtd["beam"], True, 30, 256 * 256 + 2)
self._check_process_flag(mtd["beam"], "Beam")
run = mtd["beam"].getRun()
self.assertAlmostEqual(run.getLogData("BeamCenterX").value, 0.0025, delta=1e-4)
self.assertAlmostEqual(run.getLogData("BeamCenterY").value, 0.0009, delta=1e-4)
self._check_output(mtd["flux"], False, 30, 256 * 256 + 2)
self._check_process_flag(mtd["flux"], "Beam")
def test_transmission_tof(self):
# D33 VTOF
SANSILLReduction(Run="093406", ProcessAs="Beam", OutputWorkspace="beam", Version=1)
SANSILLReduction(Run="093407", ProcessAs="Transmission", BeamInputWorkspace="beam", OutputWorkspace="ctr", Version=1)
self._check_output(mtd["ctr"], False, 97, 1)
def test_reference_tof(self):
# D33 VTOF
# this is actually a sample run, not water, but is fine for this test
SANSILLReduction(Run="093410", ProcessAs="Sample", OutputWorkspace="ref", Version=1)
self._check_output(mtd["ref"], True, 30, 256 * 256 + 2)
self._check_process_flag(mtd["ref"], "Sample")
def test_sample_tof(self):
# D33 VTOF, Pluronic F127
SANSILLReduction(Run="093410", ProcessAs="Sample", OutputWorkspace="sample", Version=1)
self._check_output(mtd["sample"], True, 30, 256 * 256 + 2)
self._check_process_flag(mtd["sample"], "Sample")
def test_sample_thickness(self):
SANSILLReduction(Run="010569", ProcessAs="Sample", SampleThickness=-1, OutputWorkspace="sample", Version=1)
a = mtd["sample"].getHistory().lastAlgorithm()
thickness = a.getProperty("SampleThickness").value
self.assertEqual(thickness, 0.1)
def test_finite_sensitivity(self):
SANSILLReduction(Runs="022846", ProcessAs="Water", OutputSensitivityWorkspace="sens", OutputWorkspace="_", MaxThreshold=5)
self._check_process_flag(mtd["sens"], "Water")
for spec_no in range(mtd["sens"].getNumberHistograms()):
self.assertFalse(np.isnan(mtd["sens"].readY(spec_no)))
def _check_process_flag(self, ws, value):
self.assertTrue(ws.getRun().getLogData("ProcessedAs").value, value)
def _check_output(self, ws, logs, blocksize, spectra):
self.assertTrue(ws)
self.assertTrue(isinstance(ws, MatrixWorkspace))
self.assertTrue(ws.isHistogramData())
self.assertTrue(not ws.isDistribution())
self.assertEqual(ws.getAxis(0).getUnit().unitID(), "Wavelength")
self.assertEqual(ws.blocksize(), blocksize)
self.assertEqual(ws.getNumberHistograms(), spectra)
self.assertTrue(isinstance(ws.getInstrument(), Instrument))
self.assertTrue(isinstance(ws.getRun(), Run))
self.assertTrue(ws.getHistory())
if logs:
self.assertTrue(ws.getRun().hasProperty("qmin"))
self.assertTrue(ws.getRun().hasProperty("qmax"))
self.assertTrue(ws.getRun().hasProperty("l2"))
self.assertTrue(ws.getRun().hasProperty("collimation.actual_position"))
if __name__ == "__main__":
unittest.main()
| null |
5,112 |
"""Variable Config Player (used for scoring and more)."""
from collections import namedtuple
from typing import Dict, List, Any
from mpf.core.config_player import ConfigPlayer
from mpf.core.machine import MachineController
VarBlock = namedtuple("VarBlock", ["priority", "context"])
class VariablePlayer(ConfigPlayer):
"""Posts events based on config."""
config_file_section = 'variable_player'
show_section = 'variables'
__slots__ = ["blocks"]
def __init__(self, machine: MachineController) -> None:
"""Initialise variable player."""
super().__init__(machine)
self.blocks = {} # type: Dict[str, List[VarBlock]]
@staticmethod
def is_entry_valid_outside_mode(settings: dict) -> bool:
"""Return true if this entry may run without a game and player."""
for event, setting in settings.items():
del event
if setting['action'] not in ("set_machine", "add_machine"):
return False
# true if only set_machine or add_machine are used
return True
# pylint: disable-msg=too-many-arguments
def handle_subscription_change(self, value, settings, priority, context, key):
"""Handle subscriptions."""
for var, s in settings.items():
if var == "block":
self.raise_config_error('Do not use "block" as variable name in variable_player.', 1, context=context)
if s['action'] not in ("set", "set_machine"):
self.raise_config_error('Cannot use add on subscriptions. '
'Use action set or set_machine.', 8, context=context)
args = {"value": value}
if s['condition'] and not s['condition'].evaluate(args):
continue
block_item = var + ":" + str(key)
if self._is_blocked(block_item, context, priority):
continue
if s['block']:
if block_item not in self.blocks:
self.blocks[block_item] = []
if VarBlock(priority, context) not in self.blocks[block_item]:
self.blocks[block_item].append(VarBlock(priority, context))
self._set_variable(var, s, args, context)
def play(self, settings: dict, context: str, calling_context: str,
priority: int = 0, **kwargs) -> None:
"""Variable name."""
for var, s in settings.items():
if var == "block":
self.raise_config_error('Do not use "block" as variable name in variable_player.', 1, context=context)
if s['action'] in ("add", "add_machine") and s['string']:
self.raise_config_error('Cannot add two strings. Use action set or set_machine.', 3, context=context)
if s['condition'] and not s['condition'].evaluate(kwargs):
continue
block_item = var + ":" + str(calling_context)
if self._is_blocked(block_item, context, priority):
continue
if s['block']:
if block_item not in self.blocks:
self.blocks[block_item] = []
if VarBlock(priority, context) not in self.blocks[block_item]:
self.blocks[block_item].append(VarBlock(priority, context))
self._set_variable(var, s, kwargs, context)
def _is_blocked(self, block_item: str, context: str,
priority: int) -> bool:
if block_item not in self.blocks or not self.blocks[block_item]:
return False
priority_sorted = sorted(self.blocks[block_item], reverse=True)
first_element = priority_sorted[0]
return first_element.priority > priority and first_element.context != context
def _set_variable(self, var: str, entry: dict, placeholder_parameters: dict, context) -> None:
# evaluate placeholder
if entry['float']:
value = entry['float'].evaluate(placeholder_parameters)
elif entry['int']:
value = entry['int'].evaluate(placeholder_parameters)
elif entry['string']:
value = entry['string'].evaluate(placeholder_parameters)
else:
value = None # prevent type confusion
self.raise_config_error("You need to either set float, int or string", 2, context=context)
if entry['action'] == "add":
assert self.machine.game is not None
assert self.machine.game.player is not None
# default to current player
player = self.machine.game.player
if entry['player']:
# specific player
try:
player = self.machine.game.player_list[entry['player'] - 1]
except IndexError:
self.warning_log("Failed to set player var %s for player %s. There are only %s players.",
var, entry['player'] - 1, self.machine.game.num_players)
player.add_with_kwargs(var, value, source=context)
elif entry['action'] == "set":
assert self.machine.game is not None
assert self.machine.game.player is not None
# default to current player
player = self.machine.game.player
if entry['player']:
# specific player
try:
player = self.machine.game.player_list[entry['player'] - 1]
except IndexError:
self.warning_log("Failed to set player var %s for player %s. There are only %s players.",
var, entry['player'] - 1, self.machine.game.num_players)
player.set_with_kwargs(var, value, source=context)
elif entry['action'] == "add_machine":
old_value = self.machine.variables.get_machine_var(var)
if old_value is None:
old_value = 0
self.machine.variables.set_machine_var(var, old_value + value)
elif entry['action'] == "set_machine":
self.machine.variables.set_machine_var(var, value)
else:
self.raise_config_error("Invalid value: {}".format(entry), 8, context=context)
def clear_context(self, context: str) -> None:
"""Clear context."""
for var in self.blocks:
for entry, s in enumerate(self.blocks[var]):
if s.context == context:
del self.blocks[var][entry]
def validate_config_entry(self, settings: dict, name: str) -> dict:
"""Validate one entry of this player."""
config = {}
if not isinstance(settings, dict):
self.raise_config_error("Settings of variable_player should "
"be a dict. But are: {}".format(settings), 5, context=name)
for var, s in settings.items():
var_conditional_event = self._parse_and_validate_conditional(var, name)
value_dict = self._parse_config(s, name)
value_dict["condition"] = var_conditional_event.condition
config[var_conditional_event.name] = value_dict
return config
def get_express_config(self, value: Any) -> dict:
"""Parse express config."""
if not isinstance(value, str):
block = False
else:
try:
value, block_str = value.split('|')
except ValueError:
block = False
else:
if block_str != "block":
self.raise_config_error("Invalid action in variable_player entry: {}".format(value), 6)
block = True
return {"int": value, "block": block}
def METHOD_NAME(self, value: Any):
"""Parse list."""
self.raise_config_error("Variable player does not support lists.", 7)
| null |
5,113 |
# https://stackoverflow.com/a/56497521/104668
# taken from https://github.com/andy-gh/prettyjson
def METHOD_NAME(obj, indent=2, maxlinelength=80):
"""Renders JSON content with indentation and line splits/concatenations to fit maxlinelength.
Only dicts, lists and basic types are supported"""
items, _ = getsubitems(
obj,
itemkey="",
islast=True,
maxlinelength=maxlinelength - indent,
indent=indent,
)
return indentitems(items, indent, level=0)
def getsubitems(obj, itemkey, islast, maxlinelength, indent):
items = []
is_inline = (
True # at first, assume we can concatenate the inner tokens into one line
)
isdict = isinstance(obj, dict)
islist = isinstance(obj, list)
istuple = isinstance(obj, tuple)
isbasictype = not (isdict or islist or istuple)
maxlinelength = max(0, maxlinelength)
# build json content as a list of strings or child lists
if isbasictype:
# render basic type
keyseparator = "" if itemkey == "" else ": "
itemseparator = "" if islast else ","
items.append(itemkey + keyseparator + basictype2str(obj) + itemseparator)
else:
# render lists/dicts/tuples
if isdict:
opening, closing, keys = ("{", "}", iter(obj.keys()))
elif islist:
opening, closing, keys = ("[", "]", range(0, len(obj)))
elif istuple:
opening, closing, keys = (
"[",
"]",
range(0, len(obj)),
) # tuples are converted into json arrays
if itemkey != "":
opening = itemkey + ": " + opening
if not islast:
closing += ","
count = 0
itemkey = ""
subitems = []
# get the list of inner tokens
for (i, k) in enumerate(keys):
islast_ = i == len(obj) - 1
itemkey_ = ""
if isdict:
itemkey_ = basictype2str(k)
inner, is_inner_inline = getsubitems(
obj[k], itemkey_, islast_, maxlinelength - indent, indent
)
subitems.extend(inner) # inner can be a string or a list
is_inline = (
is_inline and is_inner_inline
) # if a child couldn't be rendered inline, then we are not able either
# fit inner tokens into one or multiple lines, each no longer than maxlinelength
if is_inline:
multiline = True
# in Multi-line mode items of a list/dict/tuple can be rendered in multiple lines if they don't fit on one.
# suitable for large lists holding data that's not manually editable.
# in Single-line mode items are rendered inline if all fit in one line, otherwise each is rendered in a separate line.
# suitable for smaller lists or dicts where manual editing of individual items is preferred.
# this logic may need to be customized based on visualization requirements:
if isdict:
multiline = False
if islist:
multiline = True
if multiline:
lines = []
current_line = ""
current_index = 0
for (i, item) in enumerate(subitems):
item_text = item
if i < len(inner) - 1:
item_text = item + ","
if len(current_line) > 0:
try_inline = current_line + " " + item_text
else:
try_inline = item_text
if len(try_inline) > maxlinelength:
# push the current line to the list if maxlinelength is reached
if len(current_line) > 0:
lines.append(current_line)
current_line = item_text
else:
# keep fitting all to one line if still below maxlinelength
current_line = try_inline
# Push the remainder of the content if end of list is reached
if i == len(subitems) - 1:
lines.append(current_line)
subitems = lines
if len(subitems) > 1:
is_inline = False
else: # single-line mode
totallength = len(subitems) - 1 # spaces between items
for item in subitems:
totallength += len(item)
if totallength <= maxlinelength:
str = ""
for item in subitems:
str += (
item + " "
) # insert space between items, comma is already there
subitems = [str.strip()] # wrap concatenated content in a new list
else:
is_inline = False
# attempt to render the outer brackets + inner tokens in one line
if is_inline:
item_text = ""
if len(subitems) > 0:
item_text = subitems[0]
if len(opening) + len(item_text) + len(closing) <= maxlinelength:
items.append(opening + item_text + closing)
else:
is_inline = False
# if inner tokens are rendered in multiple lines already, then the outer brackets remain in separate lines
if not is_inline:
items.append(opening) # opening brackets
items.append(subitems) # Append children to parent list as a nested list
items.append(closing) # closing brackets
return items, is_inline
def basictype2str(obj):
if isinstance(obj, str):
strobj = '"' + str(obj) + '"'
elif isinstance(obj, bool):
strobj = {True: "true", False: "false"}[obj]
else:
strobj = str(obj)
return strobj
def indentitems(items, indent, level):
"""Recursively traverses the list of json lines, adds indentation based on the current depth"""
res = ""
indentstr = " " * (indent * level)
for (i, item) in enumerate(items):
if isinstance(item, list):
res += indentitems(item, indent, level + 1)
else:
islast = i == len(items) - 1
# no new line character after the last rendered line
if level == 0 and islast:
res += indentstr + item
else:
res += indentstr + item + "\n"
return res
| null |
5,114 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.write_split."""
import os
import apache_beam as beam
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.runners.direct import direct_runner
import pyarrow as pa
import tensorflow as tf
from tfx.components.example_gen import write_split
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
class WriteSplitTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def testWriteSplitCounter_WithFormatUnspecified(self):
count = 10
exec_properties = {}
def Pipeline(root):
data = [tf.train.Example()] * count
_ = (
root
| beam.Create(data)
| write_split.WriteSplit(self._output_data_dir,
example_gen_pb2.FILE_FORMAT_UNSPECIFIED,
exec_properties))
run_result = direct_runner.DirectRunner().run(Pipeline)
run_result.wait_until_finish()
num_instances = run_result.metrics().query(
MetricsFilter().with_name('num_instances'))
self.assertTrue(
fileio.exists(
os.path.join(self._output_data_dir,
'data_tfrecord-00000-of-00001.gz')))
self.assertTrue(num_instances['counters'])
self.assertEqual(len(num_instances['counters']), 1)
self.assertEqual(num_instances['counters'][0].result, count)
def testWriteSplitCounter_WithTFRECORDS_GZIP(self):
count = 10
exec_properties = {
standard_component_specs.OUTPUT_DATA_FORMAT_KEY:
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE
}
def Pipeline(root):
data = [tf.train.Example()] * count
_ = (
root
| beam.Create(data)
| write_split.WriteSplit(self._output_data_dir,
example_gen_pb2.FORMAT_TFRECORDS_GZIP,
exec_properties))
run_result = direct_runner.DirectRunner().run(Pipeline)
run_result.wait_until_finish()
num_instances = run_result.metrics().query(
MetricsFilter().with_name('num_instances'))
self.assertTrue(
fileio.exists(
os.path.join(self._output_data_dir,
'data_tfrecord-00000-of-00001.gz')))
self.assertTrue(num_instances['counters'])
self.assertEqual(len(num_instances['counters']), 1)
self.assertEqual(num_instances['counters'][0].result, count)
def METHOD_NAME(self):
count = 10
exec_properties = {
standard_component_specs.OUTPUT_DATA_FORMAT_KEY:
example_gen_pb2.PayloadFormat.FORMAT_PARQUET,
'pyarrow_schema':
pa.schema([pa.field('feature', pa.string())])
}
def Pipeline(root):
data = [{'feature': 'value'}] * count
_ = (
root
| beam.Create(data)
| write_split.WriteSplit(self._output_data_dir,
example_gen_pb2.FILE_FORMAT_PARQUET,
exec_properties))
run_result = direct_runner.DirectRunner().run(Pipeline)
run_result.wait_until_finish()
self.assertTrue(
fileio.exists(
os.path.join(self._output_data_dir,
'data_parquet-00000-of-00001.parquet')))
if __name__ == '__main__':
tf.test.main()
| null |
5,115 |
"""Test the entry point of TopoStats and its ability to correctly direct to programs."""
from pathlib import Path
from typing import Callable
import pytest
from topostats.entry_point import (
entry_point,
legacy_run_topostats_entry_point,
legacy_toposum_entry_point,
)
from topostats.run_topostats import run_topostats
from topostats.plotting import run_toposum
# Test "help" arguments
@pytest.mark.parametrize("option", ("-h", "--help"))
def METHOD_NAME(capsys, option) -> None:
"""Test the help argument of the general entry point."""
try:
entry_point(manually_provided_args=[option])
except SystemExit:
pass
output = capsys.readouterr().out
assert "usage:" in output and "program" in output
@pytest.mark.parametrize("option", ("-h", "--help"))
def test_entry_point_process_help(capsys, option):
"""Test the help argument of the process program."""
try:
entry_point(manually_provided_args=["process", option])
except SystemExit:
pass
output = capsys.readouterr().out
assert "usage:" in output and "process" in output
@pytest.mark.parametrize("option", ("-h", "--help"))
def test_entry_point_summary_help(capsys, option):
"""Test the help argument of the summary program."""
try:
entry_point(manually_provided_args=["summary", option])
except SystemExit:
pass
output = capsys.readouterr().out
assert "usage:" in output and "summary" in output
# Test that the right functions are returned with the right arguments
@pytest.mark.parametrize(
"options, expected_function, expected_arg_name, expected_arg_value",
[
(
[
"process",
"-c dummy/config/dir/config.yaml",
],
run_topostats,
"config_file",
" dummy/config/dir/config.yaml",
),
(
[
"process",
"--config",
"dummy/config/dir/config.yaml",
],
run_topostats,
"config_file",
"dummy/config/dir/config.yaml",
),
(
[
"summary",
"-l dummy/config/dir/var_to_label.yaml",
],
run_toposum,
"var_to_label",
" dummy/config/dir/var_to_label.yaml",
),
],
)
def test_entry_point(
options: list, expected_function: Callable, expected_arg_name: str, expected_arg_value: str
) -> None:
"""Test the entry point, ensuring the correct function is called for each program, and arguments
are carried through."""
returned_args = entry_point(options, testing=True)
# convert argparse's Namespace object to dictionary
returned_args_dict = vars(returned_args)
# check that the correct function is collected
assert returned_args.func == expected_function
# check that the argument has successfully been passed through into the dictionary
assert returned_args_dict[expected_arg_name] == expected_arg_value
def test_entry_point_create_config_file(tmp_path: Path) -> None:
"""Test that the entry point is able to produce a default config file when asked to."""
with pytest.raises(SystemExit):
entry_point(manually_provided_args=["process", "--create_config_file", f"{tmp_path}/test_create_config.yaml"])
assert Path(f"{tmp_path}/test_create_config.yaml").is_file()
# Test that the right functions are returned with the right arguments
@pytest.mark.parametrize(
"options, expected_arg_name, expected_arg_value",
[
(
[
"-c dummy/config/dir/config.yaml",
],
"config_file",
" dummy/config/dir/config.yaml",
),
(
[
"--config",
"dummy/config/dir/config.yaml",
],
"config_file",
"dummy/config/dir/config.yaml",
),
],
)
def test_legacy_run_topostats_entry_point(options: list, expected_arg_name: str, expected_arg_value: str) -> None:
"""Test the run_topostats legacy entry point, ensuring the arguments
are parsed and carried through correctly."""
returned_args = legacy_run_topostats_entry_point(options, testing=True)
# Convert argparse's Namespace object to dictionary
returned_args_dict = vars(returned_args)
assert returned_args_dict[expected_arg_name] == expected_arg_value
def test_legacy_run_topostats_entry_point_create_config_file(tmp_path: Path) -> None:
"""Test that the run_topostats legacy entry point is able to produce a default config file
when asked to."""
with pytest.raises(SystemExit):
legacy_run_topostats_entry_point(
args=["--create-config-file", f"{tmp_path}/test_legacy_run_topostats_create_config.yaml"]
)
assert Path(f"{tmp_path}/test_legacy_run_topostats_create_config.yaml").is_file()
def test_legacy_toposum_entry_point_create_config_file(tmp_path: Path) -> None:
"""Test that the toposum legacy entry point is able to produce a default config file
when asked to."""
with pytest.raises(SystemExit):
legacy_toposum_entry_point(
args=["--create-config-file", f"{tmp_path}/test_legacy_toposum_create_config_file.yaml"]
)
assert Path(f"{tmp_path}/test_legacy_toposum_create_config_file.yaml").is_file()
def test_legacy_toposum_entry_point_create_label_file(tmp_path: Path) -> None:
"""Test that the toposum legacy entry point is able to produce a default label file
when asked to."""
with pytest.raises(SystemExit):
legacy_toposum_entry_point(
args=["--create-label-file", f"{tmp_path}/test_legacy_toposum_create_label_file.yaml"]
)
assert Path(f"{tmp_path}/test_legacy_toposum_create_label_file.yaml").is_file()
| null |
5,116 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test jit_class """
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor, context, jit_class
context.set_context(mode=context.GRAPH_MODE)
def test_ms_class_attr():
"""
Feature: JIT Fallback
Description: Access the attributes of user-defined classes decorated with jit_class.
Expectation: No exception.
"""
@jit_class
class InnerNet:
def __init__(self):
self.number = Tensor(1, dtype=mstype.int32)
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.inner_net = InnerNet()
def construct(self):
out = self.inner_net.number
return out
net = Net()
out = net()
assert out.asnumpy() == 1
def test_ms_class_input_attr():
"""
Feature: JIT Fallback
Description: Access the attributes of user-defined classes decorated with jit_class.
Expectation: No exception.
"""
@jit_class
class InnerNet:
def __init__(self):
self.number = Tensor(np.array([1, 2, 3]))
class Net(nn.Cell):
def __init__(self, net):
super(Net, self).__init__()
self.inner_net = net()
def construct(self):
out = self.inner_net.number
return out
net = Net(InnerNet)
out = net()
expect_res = np.array([1, 2, 3])
assert np.all(out.asnumpy() == expect_res)
def test_ms_class_input_method():
"""
Feature: JIT Fallback
Description: Access the methods of user-defined classes decorated with jit_class.
Expectation: No exception.
"""
@jit_class
class InnerNet:
def __init__(self):
self.val = Tensor(2, dtype=mstype.int32)
def act(self, x, y):
return self.val * (x + y)
class Net(nn.Cell):
def __init__(self, net):
super(Net, self).__init__()
self.inner_net = net()
def construct(self):
out = self.inner_net.act(1, 2)
return out
net = Net(InnerNet)
out = net()
assert out.asnumpy() == 6
def test_ms_class_nested():
"""
Feature: JIT Fallback
Description: Test nested jit_class in graph.
Expectation: No exception.
"""
@jit_class
class Inner:
def __init__(self):
self.number = Tensor(1, dtype=mstype.int32)
@jit_class
class InnerNet:
def __init__(self):
self.inner = Inner()
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.inner_net = InnerNet()
def construct(self):
out = self.inner_net.inner.number
return out
net = Net()
out = net()
assert out.asnumpy() == 1
def test_ms_class_cell_nested():
"""
Feature: JIT Fallback
Description: Test nested jit_class and cell in graph.
Expectation: No exception.
"""
class Net(nn.Cell):
def __init__(self, val):
super().__init__()
self.val = val
def construct(self, x):
return x + self.val
@jit_class
class TrainNet():
class Loss(nn.Cell):
def __init__(self, net):
super().__init__()
self.net = net
def construct(self, x):
out = self.net(x)
return out * 2
def __init__(self, net):
self.net = net
loss_net = self.Loss(self.net)
self.number = loss_net(10)
global_net = Net(1)
class LearnNet(nn.Cell):
def __init__(self):
super().__init__()
self.value = TrainNet(global_net).number
def construct(self, x):
return x + self.value
leanrn_net = LearnNet()
out = leanrn_net(3)
print(out)
assert out == 25
def test_ms_class_type_attr():
"""
Feature: JIT Fallback
Description: Access the attributes of class type.
Expectation: No exception.
"""
@jit_class
class InnerNet:
val = Tensor(2, dtype=mstype.int32)
def act(self, x, y):
return self.val * (x + y)
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.inner_net = InnerNet
# Support accessing attributes of class type, but do not support
# accessing methods, e.g. self.inner_net.act(1, 2)
def construct(self):
out = self.inner_net.val
return out
net = Net()
out = net()
assert out == 2
def test_ms_class_create_instance_attr():
"""
Feature: JIT Fallback
Description: Access the attributes of the created class instance.
Expectation: No exception.
"""
@jit_class
class InnerNet:
def __init__(self, val):
self.number = val + 3
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.inner_net = InnerNet
def construct(self, x):
net = self.inner_net(x)
return net.number
net = Net()
out = net(2)
assert out == 5
def METHOD_NAME():
"""
Feature: JIT Fallback
Description: Decorator jit_class cannot be used for non-class types.
Expectation: No exception.
"""
with pytest.raises(TypeError):
@jit_class
def func(x, y):
return x + y
func(1, 2)
def test_raise_error_decorate_cell():
"""
Feature: JIT Fallback
Description: Decorator jit_class cannot be used for nn.Cell
Expectation: No exception.
"""
with pytest.raises(TypeError):
@jit_class
class Net(nn.Cell):
def construct(self, x):
return x
x = Tensor(1)
net = Net()
net(x)
def test_with_as_exception():
"""
Feature: Support with as statement.
Description: Support with as statement.
Expectation: No exception.
"""
@jit_class
class Sample():
def __init__(self):
super(Sample, self).__init__()
self.num = Tensor([1])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
print("type:", exc_type)
print("value:", exc_value)
print("trace:", traceback)
return self.do_something(1)
def do_something(self, x):
bar = 2 / 0 + x + self.num
return bar + 10
class TestNet(nn.Cell):
def construct(self, x):
a = 1
with Sample() as sample:
a = sample.do_something(a + x)
return x * a
with pytest.raises(ValueError) as as_exception:
x = Tensor([1])
test_net = TestNet()
res = test_net(x)
print("res:", res)
assert res == 10
assert "The divisor could not be zero" in str(as_exception.value)
| null |
5,117 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.ops import operations as P
from mindspore.ops.functional import vmap
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
class Conv2dInput(nn.Cell):
def __init__(self):
super(Conv2dInput, self).__init__()
out_channel = 1
kernel_size = 3
self.conv_input = P.Conv2DBackpropInput(out_channel,
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=1,
dilation=1,
group=1)
self.get_shape = P.Shape()
@jit
def construct(self, out, w, x):
return self.conv_input(out, w, self.get_shape(x))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('algo', ["normal", "performance", "algo_0", "algo_1", "fft", "fft_tiling", "winograd",
"winograd_nonfused"])
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('conv_allow_tf32', [True, False])
def METHOD_NAME(algo, mode, conv_allow_tf32):
"""
Feature: Test conv2d backprop input op
Description: Test conv2d backprop input op
Expectation: The value is processed as expected
"""
gpu_config = {"conv_dgrad_algo": algo, "conv_allow_tf32": conv_allow_tf32}
context.set_context(mode=mode, device_target="GPU", gpu_config=gpu_config)
w = Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32))
x = Tensor(np.array([[[
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32))
out = Tensor(np.array([[[
[-5, -4, 0, 8],
[-10, -2, 2, 3],
[0, -2, -4, -7],
[-3, -2, -3, -16]]]]).astype(np.float32))
conv2d_input = Conv2dInput()
output = conv2d_input(out, w, x)
expect = np.array([[[[-5, -4, 5, 12, 0, -8],
[-15, -6, 17, 17, -2, -11],
[-15, -8, 13, 12, 2, -4],
[-13, -6, 8, -14, 5, 20],
[-3, -4, -4, -19, 7, 23],
[-3, -2, 0, -14, 3, 16]]]]).astype(np.float32)
assert (abs(output.asnumpy() - expect) < np.ones(shape=[1, 1, 6, 6]) * 1.0e-4).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_conv2d_backprop_input_vmap():
"""
Feature: Conv2DBackpropInput op
Description: Test vmap rule for Conv2DBackpropInput op
Expectation: The dataset is processed as expected
"""
conv2d_input = Conv2dInput()
batch_dout = Tensor(np.arange(1 * 2 * 1 * 4 * 4).reshape(1, 2, 1, 4, 4).astype(np.float32))
x = Tensor(np.arange(1 * 1 * 3 * 3).reshape(1, 1, 3, 3).astype(np.float32))
w = Tensor(np.ones([1, 1, 6, 6]).astype(np.float32))
expected1 = np.array([[[[[0., 0., 1., 4., 7., 6.], [0., 7., 23., 38., 41., 29.],
[12., 45., 102., 138., 126., 81.], [48., 129., 246., 282., 234., 141.],
[84., 197., 341., 374., 287., 163.], [72., 162., 271., 292., 217., 120.]]]],
[[[[0., 16., 49., 52., 55., 38.], [48., 135., 263., 278., 233., 141.],
[156., 381., 678., 714., 558., 321.], [192., 465., 822., 858., 666., 381.],
[228., 517., 869., 902., 671., 371.], [168., 370., 607., 628., 457., 248.]]]]]
).astype(np.float32)
output1 = vmap(conv2d_input, (1, None, None))(batch_dout, x, w)
assert np.allclose(output1.asnumpy(), expected1, 0.0001, 0.0001)
dout = Tensor(np.arange(1 * 1 * 4 * 4).reshape(1, 1, 4, 4).astype(np.float32))
batch_x = Tensor(np.arange(2 * 1 * 1 * 3 * 3).reshape(2, 1, 1, 3, 3).astype(np.float32))
expected2 = np.array([[[[[0., 0., 1., 4., 7., 6.], [0., 7., 23., 38., 41., 29.],
[12., 45., 102., 138., 126., 81.], [48., 129., 246., 282., 234., 141.],
[84., 197., 341., 374., 287., 163.], [72., 162., 271., 292., 217., 120.]]]],
[[[[0., 9., 28., 58., 52., 33.], [36., 97., 185., 254., 203., 119.],
[120., 288., 507., 624., 477., 270.], [264., 588., 975., 1092., 801., 438.],
[264., 575., 935., 1022., 737., 397.], [180., 387., 622., 670., 478., 255.]]]]]
).astype(np.float32)
output2 = vmap(conv2d_input, (None, 0, None))(dout, batch_x, w)
assert np.allclose(output2.asnumpy(), expected2, 0.0001, 0.0001)
expected3 = np.array([[[[[0., 0., 1., 4., 7., 6.], [0., 7., 23., 38., 41., 29.],
[12., 45., 102., 138., 126., 81.], [48., 129., 246., 282., 234., 141.],
[84., 197., 341., 374., 287., 163.], [72., 162., 271., 292., 217., 120.]]]],
[[[[144., 313., 508., 538., 388., 209.], [372., 801., 1289., 1358., 971., 519.],
[696., 1488., 2379., 2496., 1773., 942.], [840., 1788., 2847., 2964., 2097., 1110.],
[696., 1471., 2327., 2414., 1697., 893.], [420., 883., 1390., 1438., 1006., 527.]]]]]
).astype(np.float32)
output3 = vmap(conv2d_input, (1, 0, None))(batch_dout, batch_x, w)
assert np.allclose(output3.asnumpy(), expected3, 0.0001, 0.0001)
| null |
5,118 |
# The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Load settings from .ini file and create an ApiSettings object
with the settings as attributes
"""
import configparser as cp
import os
import sys
from typing import Dict, Optional, Set, cast
import warnings
from looker_sdk.rtl import transport
if sys.version_info >= (3, 8):
from typing import Protocol, TypedDict
else:
from typing_extensions import Protocol, TypedDict
from typing_extensions import Required
class SettingsConfig(TypedDict, total=False):
client_id: Required[str]
client_secret: Required[str]
base_url: str
verify_ssl: str
timeout: str
redirect_uri: str
looker_url: str
class PApiSettings(transport.PTransportSettings, Protocol):
def read_config(self) -> SettingsConfig:
...
_DEFAULT_INIS = ["looker.ini", "../looker.ini"]
class ApiSettings(PApiSettings):
deprecated_settings: Set[str] = {"api_version", "embed_secret", "user_id"}
def __init__(
self,
*,
filename: str = _DEFAULT_INIS[0],
section: Optional[str] = None,
sdk_version: Optional[str] = "",
env_prefix: Optional[str] = None,
):
"""Configure using a config file and/or environment variables.
Environment variables will override config file settings. Neither
is necessary but some combination must supply the minimum to
instantiate ApiSettings.
ENV variables map like this:
<package-prefix>_BASE_URL -> base_url
<package-prefix>_VERIFY_SSL -> verify_ssl
Args:
filename (str): config file. If specified, the file must exist.
If not specified and the default value of "looker.ini" does not
exist then no error is raised.
section (str): section in config file. If not supplied default to
reading first section.
"""
if not os.path.isfile(filename):
if filename and filename not in _DEFAULT_INIS:
raise FileNotFoundError(f"No config file found: '{filename}'")
self.filename = filename
self.section = section
self.env_prefix = env_prefix
data = self.read_config()
verify_ssl = data.get("verify_ssl")
if verify_ssl is None:
self.verify_ssl = True
else:
self.verify_ssl = self._bool(verify_ssl)
self.base_url = data.get("base_url", "")
self.timeout = int(data.get("timeout", 120))
self.headers = {"Content-Type": "application/json"}
self.agent_tag = f"{transport.AGENT_PREFIX}"
if sdk_version:
self.agent_tag += f" {sdk_version}"
def read_config(self) -> SettingsConfig:
cfg_parser = cp.ConfigParser()
data: SettingsConfig = {
"client_id": "",
"client_secret": "",
}
try:
config_file = open(self.filename)
except FileNotFoundError:
pass
else:
cfg_parser.read_file(config_file)
config_file.close()
# If section is not specified, use first section in file
section = self.section or cfg_parser.sections()[0]
if not cfg_parser.has_section(section):
raise cp.NoSectionError(section)
self._override_settings(data, dict(cfg_parser[section]))
if self.env_prefix:
self._override_settings(data, self.METHOD_NAME())
return self._clean_input(data)
@staticmethod
def _bool(val: str) -> bool:
if val.lower() in ("yes", "y", "true", "t", "1"):
converted = True
elif val.lower() in ("", "no", "n", "false", "f", "0"):
converted = False
else:
raise TypeError
return converted
def _override_settings(
self, data: SettingsConfig, overrides: Dict[str, str]
) -> SettingsConfig:
# https://github.com/python/mypy/issues/6262
for setting in SettingsConfig.__annotations__.keys(): # type: ignore
if setting in overrides:
data[setting] = overrides[setting] # type: ignore
return data
def METHOD_NAME(self) -> Dict[str, str]:
overrides = {}
base_url = os.getenv(f"{self.env_prefix}_BASE_URL")
if base_url:
overrides["base_url"] = base_url
verify_ssl = os.getenv(f"{self.env_prefix}_VERIFY_SSL")
if verify_ssl:
overrides["verify_ssl"] = verify_ssl
timeout = os.getenv(f"{self.env_prefix}_TIMEOUT")
if timeout:
overrides["timeout"] = timeout
client_id = os.getenv(f"{self.env_prefix}_CLIENT_ID")
if client_id:
overrides["client_id"] = client_id
client_secret = os.getenv(f"{self.env_prefix}_CLIENT_SECRET")
if client_secret:
overrides["client_secret"] = client_secret
return overrides
def _clean_input(self, data: SettingsConfig) -> SettingsConfig:
"""Remove surrounding quotes and discard empty strings.
"""
cleaned = {}
for setting, value in data.items():
if setting in self.deprecated_settings:
warnings.warn(
message=DeprecationWarning(
f"'{setting}' config setting is deprecated"
)
)
if not isinstance(value, str):
continue
# Remove empty setting values
if value in ['""', "''", ""]:
continue
# Strip quotes from setting values
elif value.startswith(('"', "'")) or value.endswith(('"', "'")):
cleaned[setting] = value.strip("\"'")
else:
cleaned[setting] = value
return cast(SettingsConfig, cleaned)
| null |
5,119 |
"""
Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra.
0-1-graph is the weighted graph with the weights equal to 0 or 1.
Link: https://codeforces.com/blog/entry/22276
"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class Edge:
"""Weighted directed graph edge."""
destination_vertex: int
weight: int
class AdjacencyList:
"""Graph adjacency list."""
def __init__(self, size: int):
self._graph: list[list[Edge]] = [[] for _ in range(size)]
self._size = size
def __getitem__(self, vertex: int) -> Iterator[Edge]:
"""Get all the vertices adjacent to the given one."""
return iter(self._graph[vertex])
@property
def size(self):
return self._size
def add_edge(self, from_vertex: int, to_vertex: int, weight: int):
"""
>>> g = AdjacencyList(2)
>>> g.add_edge(0, 1, 0)
>>> g.add_edge(1, 0, 1)
>>> list(g[0])
[Edge(destination_vertex=1, weight=0)]
>>> list(g[1])
[Edge(destination_vertex=0, weight=1)]
>>> g.add_edge(0, 1, 2)
Traceback (most recent call last):
...
ValueError: Edge weight must be either 0 or 1.
>>> g.add_edge(0, 2, 1)
Traceback (most recent call last):
...
ValueError: Vertex indexes must be in [0; size).
"""
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(to_vertex, weight))
def METHOD_NAME(self, start_vertex: int, finish_vertex: int) -> int | None:
"""
Return the shortest distance from start_vertex to finish_vertex in 0-1-graph.
1 1 1
0--------->3 6--------7>------->8
| ^ ^ ^ |1
| | | |0 v
0| |0 1| 9-------->10
| | | ^ 1
v | | |0
1--------->2<-------4------->5
0 1 1
>>> g = AdjacencyList(11)
>>> g.add_edge(0, 1, 0)
>>> g.add_edge(0, 3, 1)
>>> g.add_edge(1, 2, 0)
>>> g.add_edge(2, 3, 0)
>>> g.add_edge(4, 2, 1)
>>> g.add_edge(4, 5, 1)
>>> g.add_edge(4, 6, 1)
>>> g.add_edge(5, 9, 0)
>>> g.add_edge(6, 7, 1)
>>> g.add_edge(7, 8, 1)
>>> g.add_edge(8, 10, 1)
>>> g.add_edge(9, 7, 0)
>>> g.add_edge(9, 10, 1)
>>> g.add_edge(1, 2, 2)
Traceback (most recent call last):
...
ValueError: Edge weight must be either 0 or 1.
>>> g.get_shortest_path(0, 3)
0
>>> g.get_shortest_path(0, 4)
Traceback (most recent call last):
...
ValueError: No path from start_vertex to finish_vertex.
>>> g.get_shortest_path(4, 10)
2
>>> g.get_shortest_path(4, 8)
2
>>> g.get_shortest_path(0, 1)
0
>>> g.get_shortest_path(1, 0)
Traceback (most recent call last):
...
ValueError: No path from start_vertex to finish_vertex.
"""
queue = deque([start_vertex])
distances: list[int | None] = [None] * self.size
distances[start_vertex] = 0
while queue:
current_vertex = queue.popleft()
current_distance = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
new_distance = current_distance + edge.weight
dest_vertex_distance = distances[edge.destination_vertex]
if (
isinstance(dest_vertex_distance, int)
and new_distance >= dest_vertex_distance
):
continue
distances[edge.destination_vertex] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| null |
5,120 |
from testtools import TestCase
from mock import patch
import os
import shutil
import stat
from tempfile import mkdtemp
from charmhelpers.payload import execd
class ExecDTestCase(TestCase):
def setUp(self):
super(ExecDTestCase, self).setUp()
charm_dir = mkdtemp()
self.addCleanup(shutil.rmtree, charm_dir)
self.test_charm_dir = charm_dir
env_patcher = patch.dict('os.environ',
{'CHARM_DIR': self.test_charm_dir})
env_patcher.start()
self.addCleanup(env_patcher.stop)
def test_default_execd_dir(self):
expected = os.path.join(self.test_charm_dir, 'exec.d')
default_dir = execd.default_execd_dir()
self.assertEqual(expected, default_dir)
def make_preinstall_executable(self, module_dir, execd_dir='exec.d',
error_on_preinstall=False):
"""Add a charm-pre-install to module dir.
When executed, the charm-pre-install will create a second
file in the same directory, charm-pre-install-success.
"""
module_path = os.path.join(self.test_charm_dir, execd_dir, module_dir)
os.makedirs(module_path)
charm_pre_install_path = os.path.join(module_path,
'charm-pre-install')
pre_install_success_path = os.path.join(module_path,
'charm-pre-install-success')
with open(charm_pre_install_path, 'w+') as f:
if not error_on_preinstall:
f.write("#!/bin/bash\n"
"/usr/bin/touch {}".format(pre_install_success_path))
else:
f.write("#!/bin/bash\n"
"echo stdout_from_pre_install\n"
"echo stderr_from_pre_install >&2\n"
"exit 1")
# ensure it is executable.
perms = stat.S_IRUSR + stat.S_IXUSR
os.chmod(charm_pre_install_path, perms)
def assert_preinstall_called_for_mod(self, module_dir,
execd_dir='exec.d'):
"""Asserts that the charm-pre-install-success file exists."""
expected_file = os.path.join(self.test_charm_dir, execd_dir,
module_dir, 'charm-pre-install-success')
files = os.listdir(os.path.dirname(expected_file))
self.assertTrue(os.path.exists(expected_file), "files were: %s. charmdir is: %s" % (files, self.test_charm_dir))
def test_execd_preinstall(self):
"""All charm-pre-install hooks are executed."""
self.make_preinstall_executable(module_dir='basenode')
self.make_preinstall_executable(module_dir='mod2')
execd.execd_preinstall()
self.assert_preinstall_called_for_mod('basenode')
self.assert_preinstall_called_for_mod('mod2')
def test_execd_module_list_from_env(self):
modules = ['basenode', 'mod2', 'c']
for module in modules:
self.make_preinstall_executable(module_dir=module)
actual_mod_paths = list(execd.execd_module_paths())
expected_mod_paths = [
os.path.join(self.test_charm_dir, 'exec.d', module)
for module in modules]
self.assertSetEqual(set(actual_mod_paths), set(expected_mod_paths))
def METHOD_NAME(self):
modules = ['basenode', 'mod2', 'c']
for module in modules:
self.make_preinstall_executable(module_dir=module,
execd_dir='foo')
actual_mod_paths = list(execd.execd_module_paths(
execd_dir=os.path.join(self.test_charm_dir, 'foo')))
expected_mod_paths = [
os.path.join(self.test_charm_dir, 'foo', module)
for module in modules]
self.assertSetEqual(set(actual_mod_paths), set(expected_mod_paths))
def test_execd_module_paths_no_execd_dir(self):
"""Empty list is returned when the exec.d doesn't exist."""
actual_mod_paths = list(execd.execd_module_paths())
self.assertEqual(actual_mod_paths, [])
def test_execd_submodule_list(self):
modules = ['basenode', 'mod2', 'c']
for module in modules:
self.make_preinstall_executable(module_dir=module)
submodules = list(execd.execd_submodule_paths('charm-pre-install'))
expected = [os.path.join(self.test_charm_dir, 'exec.d', mod,
'charm-pre-install') for mod in modules]
self.assertEqual(sorted(submodules), sorted(expected))
def test_execd_run(self):
modules = ['basenode', 'mod2', 'c']
for module in modules:
self.make_preinstall_executable(module_dir=module)
execd.execd_run('charm-pre-install')
self.assert_preinstall_called_for_mod('basenode')
self.assert_preinstall_called_for_mod('mod2')
self.assert_preinstall_called_for_mod('c')
@patch('charmhelpers.core.hookenv.log')
def test_execd_run_logs_exception(self, log_):
self.make_preinstall_executable(module_dir='basenode',
error_on_preinstall=True)
execd.execd_run('charm-pre-install', die_on_error=False)
expected_log = ('Error (1) running {}/exec.d/basenode/'
'charm-pre-install. Output: '
'stdout_from_pre_install\n'
'stderr_from_pre_install\n'.format(self.test_charm_dir))
log_.assert_called_with(expected_log)
@patch('charmhelpers.core.hookenv.log')
@patch('sys.exit')
def test_execd_run_dies_with_return_code(self, exit_, log):
self.make_preinstall_executable(module_dir='basenode',
error_on_preinstall=True)
with open(os.devnull, 'wb') as devnull:
execd.execd_run('charm-pre-install', stderr=devnull)
exit_.assert_called_with(1)
| null |
5,121 |
from tests.test_utils import create_env_build_request
from _orchest.internals.test_utils import CeleryMock, gen_uuid, raise_exception_function
from app.apis import namespace_environment_builds
def test_environmentbuildlist_get_empty(client):
data = client.get("/api/environment-builds/").get_json()
assert data == {"environment_builds": []}
def METHOD_NAME(client, celery, project, monkeypatch):
req = create_env_build_request(project.uuid, 1)
data = client.post("/api/environment-builds/", json=req).get_json()
assert data["failed_requests"] is None
env = req["environment_build_requests"][0]
env["status"] = "PENDING"
env["started_time"] = None
env["finished_time"] = None
env_build = data["environment_builds"][0]
for k, v in env.items():
assert v == env_build[k]
def test_environmentbuildlist_post_same(client, celery, project):
env = {
"project_uuid": project.uuid,
"project_path": "project_path",
"environment_uuid": gen_uuid(),
}
req = {"environment_build_requests": [env, env]}
data = client.post("/api/environment-builds/", json=req).get_json()
assert len(data["environment_builds"]) == 1
assert data["failed_requests"] is None
def test_environmentbuildlist_post_with_error1(client, project, monkeypatch):
monkeypatch.setattr(
namespace_environment_builds, "make_celery", raise_exception_function()
)
data = client.post(
"/api/environment-builds/", json=create_env_build_request(project.uuid, 1)
).get_json()
assert len(data["failed_requests"]) == 1
def test_environmentbuildlist_post_with_error2(client, project, monkeypatch):
celery = CeleryMock()
# Make it so that only the first request will go through.
monkeypatch.setattr(
namespace_environment_builds,
"make_celery",
raise_exception_function(
should_trigger=lambda: bool(celery.tasks), return_value=celery
),
)
data = client.post(
"/api/environment-builds/", json=create_env_build_request(project.uuid, 3)
).get_json()
assert len(data["environment_builds"]) == 3
assert len(data["failed_requests"]) == 2
def test_environmentbuildlist_get(client, celery, project):
client.post(
"/api/environment-builds/", json=create_env_build_request(project.uuid, 1)
)
data = client.get("/api/environment-builds/").get_json()
assert len(data["environment_builds"]) == 1
def test_environmentbuildlist_post_revert(client, project, monkeypatch):
monkeypatch.setattr(
namespace_environment_builds, "make_celery", raise_exception_function()
)
client.post(
"/api/environment-builds/", json=create_env_build_request(project.uuid, 1)
)
data = client.get("/api/environment-builds/").get_json()
data = data["environment_builds"][0]
assert data["status"] == "FAILURE"
def test_environmentbuild_get_empty(client):
resp = client.get("/api/environment-builds/build_uuid")
assert resp.status_code == 404
def test_environmentbuild_delete(client, celery, project, abortable_async_res):
data = client.post(
"/api/environment-builds/", json=create_env_build_request(project.uuid, 1)
).get_json()
data = data["environment_builds"][0]
assert data["status"] == "PENDING"
env_build_uuid = data["uuid"]
resp = client.delete(f"/api/environment-builds/{env_build_uuid}")
assert resp.status_code == 200
assert abortable_async_res.is_aborted()
assert celery.revoked_tasks
def test_projectenvironmostrecentbuild_get_empty(client):
data = client.get(
"/api/environment-builds/most-recent/proj_uuid/env_uuid"
).get_json()
assert data == {"environment_builds": []}
def test_projectenvironmentmostrecentbuild_get(
client, celery, project, abortable_async_res
):
req = create_env_build_request(project.uuid, 1)
for _ in range(5):
last_uuid = client.post("/api/environment-builds/", json=req).get_json()[
"environment_builds"
][0]["uuid"]
env_uuid = req["environment_build_requests"][0]["environment_uuid"]
data = client.get(
f"/api/environment-builds/most-recent/{project.uuid}/{env_uuid}"
).get_json()
assert data["environment_builds"][0]["uuid"] == last_uuid
def test_projectmostrecentbuildlist_get_empty(client):
data = client.get("/api/environment-builds/most-recent/proj_uuid").get_json()
assert data == {"environment_builds": []}
def test_projectmostrecentbuildlist_get(client, celery, project, abortable_async_res):
req = create_env_build_request(project.uuid, 2)
for _ in range(5):
data = client.post("/api/environment-builds/", json=req).get_json()[
"environment_builds"
]
possible_uuids = [data[0]["uuid"], data[1]["uuid"]]
data = client.get(f"/api/environment-builds/most-recent/{project.uuid}").get_json()[
"environment_builds"
]
assert len(data) == 2
assert data[0]["uuid"] in possible_uuids
assert data[1]["uuid"] in possible_uuids
| null |
5,122 |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.nn import BatchNorm2d
from mindspore.nn import Cell
from mindspore.ops import composite as C
from mindspore.ops import functional as F
class BatchNormNet(Cell):
def __init__(self, c, weight, bias, moving_mean, moving_var_init):
super(BatchNormNet, self).__init__()
self.bn = BatchNorm2d(c, eps=0.00001, momentum=0.1, beta_init=bias, gamma_init=weight,
moving_mean_init=moving_mean, moving_var_init=moving_var_init)
def construct(self, input_data):
x = self.bn(input_data)
return x
class Grad(Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = C.GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, input_data, sens):
gout = self.grad(self.network)(input_data, sens)
return gout
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_train_forward():
x = np.array([[
[[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],
[[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)
expect_output = np.array([[[[-0.6059, 0.3118, 0.3118, 1.2294],
[-0.1471, 0.7706, 1.6882, 2.6059],
[0.3118, 1.6882, 2.1471, 2.1471],
[0.7706, 0.3118, 2.6059, -0.1471]],
[[0.9119, 1.8518, 1.3819, -0.0281],
[-0.0281, 0.9119, 1.3819, 1.8518],
[2.7918, 0.4419, -0.4981, 0.9119],
[1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
weight = np.ones(2).astype(np.float32)
bias = np.ones(2).astype(np.float32)
moving_mean = np.ones(2).astype(np.float32)
moving_var_init = np.ones(2).astype(np.float32)
error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
bn_net = BatchNormNet(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
bn_net.set_train()
output = bn_net(Tensor(x))
diff = output.asnumpy() - expect_output
assert np.all(diff < error)
assert np.all(-diff < error)
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
bn_net = BatchNormNet(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
bn_net.set_train(False)
output = bn_net(Tensor(x))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
x = np.array([[
[[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],
[[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)
grad = np.array([[
[[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],
[[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)
expect_output = np.array([[[[-0.69126546, -0.32903028, 1.9651246, -0.88445705],
[0.6369296, -0.37732816, -0.93275493, -0.11168876],
[-0.7878612, 1.3614, 0.8542711, -0.52222186],
[-0.37732816, 0.5886317, -0.11168876, -0.28073236]],
[[1.6447213, -0.38968924, -1.0174079, -0.55067265],
[-2.4305856, -1.1751484, 0.86250514, 0.5502673],
[0.39576983, 0.5470243, 1.1715001, 1.6447213],
[-1.7996241, -0.7051701, 0.7080077, 0.5437813]]]]).astype(np.float32)
weight = Tensor(np.ones(2).astype(np.float32))
bias = Tensor(np.ones(2).astype(np.float32))
moving_mean = Tensor(np.ones(2).astype(np.float32))
moving_var_init = Tensor(np.ones(2).astype(np.float32))
error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-6
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
bn_net = BatchNormNet(2, weight, bias, moving_mean, moving_var_init)
bn_net.set_train()
bn_grad = Grad(bn_net)
output = bn_grad(Tensor(x), Tensor(grad))
diff = output[0].asnumpy() - expect_output
assert np.all(diff < error)
assert np.all(-diff < error)
def test_batch_norm_forward_functional(nptype):
"""
Feature: test batch_norm forward for given input dtype.
Description: test inputs for given input dtype.
Expectation: the result match with expected result.
"""
input_x = Tensor(np.ones([2, 2]).astype(nptype))
running_mean = Tensor(np.ones([2]).astype(nptype))
running_var = Tensor(np.ones([2]).astype(nptype))
weight = Tensor(np.ones([2]).astype(nptype))
bias = Tensor(np.ones([2]).astype(nptype))
output = F.batch_norm(input_x, running_mean, running_var, weight, bias)
expected = np.array([[1., 1.], [1., 1.]]).astype(nptype)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_batch_norm_forward_float32_functional():
"""
Feature: test batch_norm forward.
Description: test float32 inputs.
Expectation: the result match with expected result.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_batch_norm_forward_functional(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
test_batch_norm_forward_functional(np.float32)
if __name__ == '__main__':
test_batch_norm_forward_float32_functional()
| null |
5,123 |
# This file is part of cloud-init. See LICENSE file for license information.
import os
import time
from cloudinit import helpers, util
from cloudinit.net.dhcp import IscDhclient
from cloudinit.sources.DataSourceCloudStack import DataSourceCloudStack
from tests.unittests.helpers import CiTestCase, ExitStack, mock
MOD_PATH = "cloudinit.sources.DataSourceCloudStack"
DS_PATH = MOD_PATH + ".DataSourceCloudStack"
class TestCloudStackPasswordFetching(CiTestCase):
def setUp(self):
super(TestCloudStackPasswordFetching, self).setUp()
self.patches = ExitStack()
self.addCleanup(self.patches.close)
mod_name = MOD_PATH
self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name)))
self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name)))
default_gw = "192.201.20.0"
get_latest_lease = mock.MagicMock(return_value=None)
self.patches.enter_context(
mock.patch(
mod_name + ".dhcp.IscDhclient.get_latest_lease",
get_latest_lease,
)
)
get_default_gw = mock.MagicMock(return_value=default_gw)
self.patches.enter_context(
mock.patch(mod_name + ".get_default_gateway", get_default_gw)
)
get_networkd_server_address = mock.MagicMock(return_value=None)
self.patches.enter_context(
mock.patch(
mod_name + ".dhcp.networkd_get_option_from_leases",
get_networkd_server_address,
)
)
get_data_server = mock.MagicMock(return_value=None)
self.patches.enter_context(
mock.patch(mod_name + ".get_data_server", get_data_server)
)
self.tmp = self.tmp_dir()
def _set_password_server_response(self, response_string):
subp = mock.MagicMock(return_value=(response_string, ""))
self.patches.enter_context(
mock.patch(
"cloudinit.sources.DataSourceCloudStack.subp.subp", subp
)
)
return subp
def test_empty_password_doesnt_create_config(self):
self._set_password_server_response("")
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
def test_saved_password_doesnt_create_config(self):
self._set_password_server_response("saved_password")
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
@mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_password_sets_password(self, m_wait):
m_wait.return_value = True
password = "SekritSquirrel"
self._set_password_server_response(password)
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
ds.get_data()
self.assertEqual(password, ds.get_config_obj()["password"])
@mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_bad_request_doesnt_stop_ds_from_working(self, m_wait):
m_wait.return_value = True
self._set_password_server_response("bad_request")
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
self.assertTrue(ds.get_data())
def assertRequestTypesSent(self, subp, expected_request_types):
request_types = []
for call in subp.call_args_list:
args = call[0][0]
for arg in args:
if arg.startswith("DomU_Request"):
request_types.append(arg.split()[1])
self.assertEqual(expected_request_types, request_types)
@mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_valid_response_means_password_marked_as_saved(self, m_wait):
m_wait.return_value = True
password = "SekritSquirrel"
subp = self._set_password_server_response(password)
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
ds.get_data()
self.assertRequestTypesSent(
subp, ["send_my_password", "saved_password"]
)
def _check_password_not_saved_for(self, response_string):
subp = self._set_password_server_response(response_string)
ds = DataSourceCloudStack(
{}, None, helpers.Paths({"run_dir": self.tmp})
)
with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait:
m_wait.return_value = True
ds.get_data()
self.assertRequestTypesSent(subp, ["send_my_password"])
def test_password_not_saved_if_empty(self):
self._check_password_not_saved_for("")
def test_password_not_saved_if_already_saved(self):
self._check_password_not_saved_for("saved_password")
def test_password_not_saved_if_bad_request(self):
self._check_password_not_saved_for("bad_request")
class TestGetLatestLease(CiTestCase):
def METHOD_NAME(self, bdir, files):
"""populate_dir_list([(name, data), (name, data)])
writes files to bdir, and updates timestamps to ensure
that their mtime increases with each file."""
start = int(time.time())
for num, fname in enumerate(reversed(files)):
fpath = os.path.sep.join((bdir, fname))
util.write_file(fpath, fname.encode())
os.utime(fpath, (start - num, start - num))
def _pop_and_test(self, files, expected):
lease_d = self.tmp_dir()
self.METHOD_NAME(lease_d, files)
self.assertEqual(
self.tmp_path(expected, lease_d),
IscDhclient.get_latest_lease(lease_d),
)
def test_skips_dhcpv6_files(self):
"""files started with dhclient6 should be skipped."""
expected = "dhclient.lease"
self._pop_and_test([expected, "dhclient6.lease"], expected)
def test_selects_dhclient_dot_files(self):
"""files named dhclient.lease or dhclient.leases should be used.
Ubuntu names files dhclient.eth0.leases dhclient6.leases and
sometimes dhclient.leases."""
self._pop_and_test(["dhclient.lease"], "dhclient.lease")
self._pop_and_test(["dhclient.leases"], "dhclient.leases")
def test_selects_dhclient_dash_files(self):
"""files named dhclient-lease or dhclient-leases should be used.
Redhat/Centos names files with dhclient--eth0.lease (centos 7) or
dhclient-eth0.leases (centos 6).
"""
self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease")
self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease")
def test_ignores_by_extension(self):
"""only .lease or .leases file should be considered."""
self._pop_and_test(
[
"dhclient.lease",
"dhclient.lease.bk",
"dhclient.lease-old",
"dhclient.leaselease",
],
"dhclient.lease",
)
def test_selects_newest_matching(self):
"""If multiple files match, the newest written should be used."""
lease_d = self.tmp_dir()
valid_1 = "dhclient.leases"
valid_2 = "dhclient.lease"
valid_1_path = self.tmp_path(valid_1, lease_d)
valid_2_path = self.tmp_path(valid_2, lease_d)
self.METHOD_NAME(lease_d, [valid_1, valid_2])
self.assertEqual(valid_2_path, IscDhclient.get_latest_lease(lease_d))
# now update mtime on valid_2 to be older than valid_1 and re-check.
mtime = int(os.path.getmtime(valid_1_path)) - 1
os.utime(valid_2_path, (mtime, mtime))
self.assertEqual(valid_1_path, IscDhclient.get_latest_lease(lease_d))
| null |
5,124 |
from pathlib import Path
import tarfile
from os import PathLike
from shutil import rmtree
import random
import string
import gzip
import shutil
import tempfile
def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):
"""
Creates TAR file from list of filenames provided. Currently only works with
all files existing in the same directory.
...
Parameters
----------
filenames : str or list
Filenames to be placed in TAR file
write_filename : str, pathlib.Path, None
TAR output filename. If not provided will use file name 'created_tarfile.tar'
write_directory : str, pathlib.Path, None
Path to directory to write TAR file. If the directory does not exist will
be created.
remove : boolean
Delete provided filenames after making TAR file
Returns
-------
list
List of files extracted from the TAR file or full path to created direcotry
containing extracted files.
"""
if write_filename is None:
write_filename = 'created_tarfile.tar'
if isinstance(filenames, (str, PathLike)):
filenames = [filenames]
if write_directory is not None:
write_directory = Path(write_directory)
write_directory.mkdir(parents=True, exist_ok=True)
write_filename = Path(write_filename).name
elif Path(write_filename).parent != Path('.'):
write_directory = Path(write_filename).parent
else:
write_directory = Path('.')
if not str(write_filename).endswith('.tar'):
write_filename = str(write_filename) + '.tar'
write_filename = Path(write_directory, write_filename)
tar_file_handle = tarfile.open(write_filename, "w")
for filename in filenames:
tar_file_handle.add(filename, arcname=Path(filename).name)
tar_file_handle.close()
if remove:
for filename in filenames:
Path(filename).unlink()
return str(write_filename)
def METHOD_NAME(tar_files, write_directory=None, temp_dir=False, randomize=True,
return_files=True, remove=False):
"""
Unpacks TAR file contents into provided base directory
...
Parameters
----------
tar_files : str or list
path to TAR file to be unpacked
write_directory : str or pathlib.Path
base path to extract contents of TAR files or create a new randomized directory
to extract contents of TAR file.
temp_dir : boolean
Should a temporary directory be created and TAR files extracted to the new directory.
write_directory and randomize are ignored if this option is used.
randomize : boolean
Create a new randomized directory to extract TAR files into.
return_files : boolean
When set will return a list of full path filenames to the extracted files.
When set to False will return full path to directory containing extracted files.
remove : boolean
Delete provided TAR files after extracting files.
Returns
-------
files : list or str
List of full path files extracted from the TAR file or full path to direcotry
containing extracted files.
"""
files = []
if isinstance(tar_files, (str, PathLike)):
tar_files = [tar_files]
out_dir = Path.cwd()
if temp_dir is True:
out_dir = Path(tempfile.TemporaryDirectory().name)
else:
if write_directory is not None:
out_dir = Path(write_directory)
else:
out_dir = Path(Path(tar_files[0]).parent)
if out_dir.is_dir() is False:
out_dir.mkdir(parents=True, exist_ok=True)
if randomize:
out_dir = Path(tempfile.mkdtemp(dir=out_dir))
for tar_file in tar_files:
try:
tar = tarfile.open(tar_file)
tar.extractall(path=out_dir)
result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]
files.extend(result)
tar.close()
except tarfile.ReadError:
print(f"\nCould not extract files from {tar_file}")
if return_files is False:
files = str(out_dir)
else:
files.sort()
if remove:
for tar_file in tar_files:
Path(tar_file).unlink()
return files
def cleanup_files(dirname=None, files=None):
"""
Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()
...
Parameters
----------
dirname : str, pathlib.Path, None
Path to directory of extracted files which will be removed.
files : str, pahtlib.Path, list, None
Full path file name(s) from extracted TAR file.
Assumes the directory this file exists in should be removed.
"""
if isinstance(files, (str, PathLike)):
files = [str(files)]
try:
if dirname is not None:
rmtree(dirname)
if files is not None and len(files) > 0 and Path(files[0]).is_file():
out_dir = Path(files[0]).parent
rmtree(out_dir)
except Exception as error:
print("\nError removing files:", error)
def is_gunzip_file(filepath):
"""
Function to test if file is a gunzip file.
Parameters
----------
filepath : str or pathlib.Path to file to test
Returns
-------
test : boolean
Result from testing if file is a gunzip file
"""
try:
with open(str(filepath), 'rb') as test_f:
return test_f.read(2) == b'\x1f\x8b'
except Exception:
return False
def pack_gzip(filename, write_directory=None, remove=False):
"""
Creates a gunzip file from a filename path
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in creation of gunzip version.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
write_filename = Path(filename).name + '.gz'
if write_directory is not None:
write_filename = Path(write_directory, write_filename)
Path(write_directory).mkdir(parents=True, exist_ok=True)
else:
write_filename = Path(Path(filename).parent, write_filename)
with open(filename, 'rb') as f_in:
with gzip.open(write_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
def unpack_gzip(filename, write_directory=None, remove=False):
"""
Extracts file from a gunzip file.
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in extraction of gunzip file.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
if write_directory is None:
write_directory = Path(filename).parent
write_filename = Path(filename).name
if write_filename.endswith('.gz'):
write_filename = write_filename.replace(".gz", "")
write_filename = Path(write_directory, write_filename)
with gzip.open(filename, "rb") as f_in:
with open(write_filename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
| null |
5,125 |
from __future__ import annotations
import ipaddress
from typing import Any, Optional, TYPE_CHECKING, List, Dict
from ..menu import MenuSelectionType, TextInput
from ..models.network_configuration import NetworkConfiguration, NicType, Nic
from ..networking import list_interfaces
from ..output import FormattedOutput, warn
from ..menu import ListManager, Menu
if TYPE_CHECKING:
_: Any
class ManualNetworkConfig(ListManager):
"""
subclass of ListManager for the managing of network configurations
"""
def __init__(self, prompt: str, preset: List[Nic]):
self._actions = [
str(_('Add interface')),
str(_('Edit interface')),
str(_('Delete interface'))
]
super().__init__(prompt, preset, [self._actions[0]], self._actions[1:])
def reformat(self, data: List[Nic]) -> Dict[str, Optional[Nic]]:
table = FormattedOutput.as_table(data)
rows = table.split('\n')
# these are the header rows of the table and do not map to any User obviously
# we're adding 2 spaces as prefix because the menu selector '> ' will be put before
# the selectable rows so the header has to be aligned
display_data: Dict[str, Optional[Nic]] = {f' {rows[0]}': None, f' {rows[1]}': None}
for row, iface in zip(rows[2:], data):
row = row.replace('|', '\\|')
display_data[row] = iface
return display_data
def selected_action_display(self, nic: Nic) -> str:
return nic.iface if nic.iface else ''
def handle_action(self, action: str, entry: Optional[Nic], data: List[Nic]):
if action == self._actions[0]: # add
iface = self._select_iface(data)
if iface:
nic = Nic(iface=iface)
nic = self._edit_iface(nic)
data += [nic]
elif entry:
if action == self._actions[1]: # edit interface
data = [d for d in data if d.iface != entry.iface]
data.append(self._edit_iface(entry))
elif action == self._actions[2]: # delete
data = [d for d in data if d != entry]
return data
def _select_iface(self, data: List[Nic]) -> Optional[str]:
all_ifaces = list_interfaces().values()
existing_ifaces = [d.iface for d in data]
available = set(all_ifaces) - set(existing_ifaces)
choice = Menu(str(_('Select interface to add')), list(available), skip=True).run()
if choice.type_ == MenuSelectionType.Skip:
return None
return choice.single_value
def _edit_iface(self, edit_nic: Nic) -> Nic:
iface_name = edit_nic.iface
modes = ['DHCP (auto detect)', 'IP (static)']
default_mode = 'DHCP (auto detect)'
prompt = _('Select which mode to configure for "{}" or skip to use default mode "{}"').format(iface_name, default_mode)
mode = Menu(prompt, modes, default_option=default_mode, skip=False).run()
if mode.value == 'IP (static)':
while 1:
prompt = _('Enter the IP and subnet for {} (example: 192.168.0.5/24): ').format(iface_name)
ip = TextInput(prompt, edit_nic.ip).run().strip()
# Implemented new check for correct IP/subnet input
try:
ipaddress.ip_interface(ip)
break
except ValueError:
warn("You need to enter a valid IP in IP-config mode")
# Implemented new check for correct gateway IP address
gateway = None
while 1:
gateway = TextInput(
_('Enter your gateway (router) IP address or leave blank for none: '),
edit_nic.gateway
).run().strip()
try:
if len(gateway) > 0:
ipaddress.ip_address(gateway)
break
except ValueError:
warn("You need to enter a valid gateway (router) IP address")
if edit_nic.dns:
display_dns = ' '.join(edit_nic.dns)
else:
display_dns = None
dns_input = TextInput(_('Enter your DNS servers (space separated, blank for none): '), display_dns).run().strip()
dns = []
if len(dns_input):
dns = dns_input.split(' ')
return Nic(iface=iface_name, ip=ip, gateway=gateway, dns=dns, dhcp=False)
else:
# this will contain network iface names
return Nic(iface=iface_name)
def METHOD_NAME(preset: Optional[NetworkConfiguration]) -> Optional[NetworkConfiguration]:
"""
Configure the network on the newly installed system
"""
options = {n.display_msg(): n for n in NicType}
preset_val = preset.type.display_msg() if preset else None
warning = str(_('Are you sure you want to reset this setting?'))
choice = Menu(
_('Select one network interface to configure'),
list(options.keys()),
preset_values=preset_val,
sort=False,
allow_reset=True,
allow_reset_warning_msg=warning
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Reset: return None
case MenuSelectionType.Selection:
nic_type = options[choice.single_value]
match nic_type:
case NicType.ISO:
return NetworkConfiguration(NicType.ISO)
case NicType.NM:
return NetworkConfiguration(NicType.NM)
case NicType.MANUAL:
preset_nics = preset.nics if preset else []
nics = ManualNetworkConfig('Configure interfaces', preset_nics).run()
if nics:
return NetworkConfiguration(NicType.MANUAL, nics)
return preset
| null |
5,126 |
"""Functionality related with node paths in a PyTables file.
Variables
=========
`__docformat`__
The format of documentation strings in this module.
"""
import re
import warnings
import keyword
from .exceptions import NaturalNameWarning
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
_python_id_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
"""Python identifier regular expression."""
_reserved_id_re = re.compile('^_[cfgv]_')
"""PyTables reserved identifier regular expression.
- c: class variables
- f: class public methods
- g: class private methods
- v: instance variables
"""
_hidden_name_re = re.compile('^_[pi]_')
"""Nodes with a name *matching* this expression are considered hidden.
For instance, ``name`` whould be visible while ``_i_name`` would not.
"""
_hidden_path_re = re.compile('/_[pi]_')
"""Nodes with a path *containing* this expression are considered hidden.
For instance, a node with a pathname like ``/a/b/c`` would be visible
while nodes with pathnames like ``/a/c/_i_x`` or ``/a/_p_x/y`` would
not.
"""
_warnInfo = (
"you will not be able to use natural naming to access this object; "
"using ``getattr()`` will still work, though")
"""Warning printed when a name will not be reachable through natural naming"""
def check_attribute_name(name):
"""Check the validity of the `name` of an attribute in AttributeSet.
If the name is not valid, a ``ValueError`` is raised. If it is
valid but it can not be used with natural naming, a
`NaturalNameWarning` is issued.
>>> warnings.simplefilter("ignore")
>>> check_attribute_name('a')
>>> check_attribute_name('a_b')
>>> check_attribute_name('a:b') # NaturalNameWarning
>>> check_attribute_name('/a/b') # NaturalNameWarning
>>> check_attribute_name('/') # NaturalNameWarning
>>> check_attribute_name('.') # NaturalNameWarning
>>> check_attribute_name('__members__')
Traceback (most recent call last):
...
ValueError: ``__members__`` is not allowed as an object name
>>> check_attribute_name(1)
Traceback (most recent call last):
...
TypeError: object name is not a string: 1
>>> check_attribute_name('')
Traceback (most recent call last):
...
ValueError: the empty string is not allowed as an object name
"""
if not isinstance(name, str): # Python >= 2.3
raise TypeError(f"object name is not a string: {name!r}")
if name == '':
raise ValueError("the empty string is not allowed as an object name")
# Check whether `name` is a valid Python identifier.
if not _python_id_re.match(name):
warnings.warn("object name is not a valid Python identifier: %r; "
"it does not match the pattern ``%s``; %s"
% (name, _python_id_re.pattern, _warnInfo),
NaturalNameWarning, stacklevel=2)
return
# However, Python identifiers and keywords have the same form.
if keyword.iskeyword(name):
warnings.warn("object name is a Python keyword: %r; %s"
% (name, _warnInfo), NaturalNameWarning, stacklevel=2)
return
# Still, names starting with reserved prefixes are not allowed.
if _reserved_id_re.match(name):
raise ValueError("object name starts with a reserved prefix: %r; "
"it matches the pattern ``%s``"
% (name, _reserved_id_re.pattern))
# ``__members__`` is the only exception to that rule.
if name == '__members__':
raise ValueError("``__members__`` is not allowed as an object name")
def METHOD_NAME(name):
"""Check the validity of the `name` of a Node object, which more limited
than attribute names.
If the name is not valid, a ``ValueError`` is raised. If it is
valid but it can not be used with natural naming, a
`NaturalNameWarning` is issued.
>>> warnings.simplefilter("ignore")
>>> check_name_validity('a')
>>> check_name_validity('a_b')
>>> check_name_validity('a:b') # NaturalNameWarning
>>> check_name_validity('/a/b')
Traceback (most recent call last):
...
ValueError: the ``/`` character is not allowed in object names: '/a/b'
>>> check_name_validity('.')
Traceback (most recent call last):
...
ValueError: ``.`` is not allowed as an object name
>>> check_name_validity('')
Traceback (most recent call last):
...
ValueError: the empty string is not allowed as an object name
"""
check_attribute_name(name)
# Check whether `name` is a valid HDF5 name.
# http://hdfgroup.org/HDF5/doc/UG/03_Model.html#Structure
if name == '.':
raise ValueError("``.`` is not allowed as an object name")
elif '/' in name:
raise ValueError("the ``/`` character is not allowed "
"in object names: %r" % name)
def join_path(parentpath, name):
"""Join a *canonical* `parentpath` with a *non-empty* `name`.
.. versionchanged:: 3.0
The *parentPath* parameter has been renamed into *parentpath*.
>>> join_path('/', 'foo')
'/foo'
>>> join_path('/foo', 'bar')
'/foo/bar'
>>> join_path('/foo', '/foo2/bar')
'/foo/foo2/bar'
>>> join_path('/foo', '/')
'/foo'
"""
if name.startswith('./'): # Support relative paths (mainly for links)
name = name[2:]
if parentpath == '/' and name.startswith('/'):
pstr = '%s' % name
elif parentpath == '/' or name.startswith('/'):
pstr = f'{parentpath}{name}'
else:
pstr = f'{parentpath}/{name}'
if pstr.endswith('/'):
pstr = pstr[:-1]
return pstr
def split_path(path):
"""Split a *canonical* `path` into a parent path and a node name.
The result is returned as a tuple. The parent path does not
include a trailing slash.
>>> split_path('/')
('/', '')
>>> split_path('/foo/bar')
('/foo', 'bar')
"""
lastslash = path.rfind('/')
ppath = path[:lastslash]
name = path[lastslash + 1:]
if ppath == '':
ppath = '/'
return (ppath, name)
def isvisiblename(name):
"""Does this `name` make the named node a visible one?"""
return _hidden_name_re.match(name) is None
def isvisiblepath(path):
"""Does this `path` make the named node a visible one?"""
return _hidden_path_re.search(path) is None
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| null |
5,127 |
from sympy.testing.pytest import raises
from sympy.polys.polymatrix import PolyMatrix
from sympy.polys import Poly
from sympy.core.singleton import S
from sympy.matrices.dense import Matrix
from sympy.polys.domains.integerring import ZZ
from sympy.polys.domains.rationalfield import QQ
from sympy.abc import x, y
def _test_polymatrix():
pm1 = PolyMatrix([[Poly(x**2, x), Poly(-x, x)], [Poly(x**3, x), Poly(-1 + x, x)]])
v1 = PolyMatrix([[1, 0], [-1, 0]], ring='ZZ[x]')
m1 = PolyMatrix([[1, 0], [-1, 0]], ring='ZZ[x]')
A = PolyMatrix([[Poly(x**2 + x, x), Poly(0, x)], \
[Poly(x**3 - x + 1, x), Poly(0, x)]])
B = PolyMatrix([[Poly(x**2, x), Poly(-x, x)], [Poly(-x**2, x), Poly(x, x)]])
assert A.ring == ZZ[x]
assert isinstance(pm1*v1, PolyMatrix)
assert pm1*v1 == A
assert pm1*m1 == A
assert v1*pm1 == B
pm2 = PolyMatrix([[Poly(x**2, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(-x**2, x, domain='QQ'), \
Poly(x**3, x, domain='QQ'), Poly(0, x, domain='QQ'), Poly(-x**3, x, domain='QQ')]])
assert pm2.ring == QQ[x]
v2 = PolyMatrix([1, 0, 0, 0, 0, 0], ring='ZZ[x]')
m2 = PolyMatrix([1, 0, 0, 0, 0, 0], ring='ZZ[x]')
C = PolyMatrix([[Poly(x**2, x, domain='QQ')]])
assert pm2*v2 == C
assert pm2*m2 == C
pm3 = PolyMatrix([[Poly(x**2, x), S.One]], ring='ZZ[x]')
v3 = S.Half*pm3
assert v3 == PolyMatrix([[Poly(S.Half*x**2, x, domain='QQ'), S.Half]], ring='QQ[x]')
assert pm3*S.Half == v3
assert v3.ring == QQ[x]
pm4 = PolyMatrix([[Poly(x**2, x, domain='ZZ'), Poly(-x**2, x, domain='ZZ')]])
v4 = PolyMatrix([1, -1], ring='ZZ[x]')
assert pm4*v4 == PolyMatrix([[Poly(2*x**2, x, domain='ZZ')]])
assert len(PolyMatrix(ring=ZZ[x])) == 0
assert PolyMatrix([1, 0, 0, 1], x)/(-1) == PolyMatrix([-1, 0, 0, -1], x)
def test_polymatrix_constructor():
M1 = PolyMatrix([[x, y]], ring=QQ[x,y])
assert M1.ring == QQ[x,y]
assert M1.domain == QQ
assert M1.gens == (x, y)
assert M1.shape == (1, 2)
assert M1.rows == 1
assert M1.cols == 2
assert len(M1) == 2
assert list(M1) == [Poly(x, (x, y), domain=QQ), Poly(y, (x, y), domain=QQ)]
M2 = PolyMatrix([[x, y]], ring=QQ[x][y])
assert M2.ring == QQ[x][y]
assert M2.domain == QQ[x]
assert M2.gens == (y,)
assert M2.shape == (1, 2)
assert M2.rows == 1
assert M2.cols == 2
assert len(M2) == 2
assert list(M2) == [Poly(x, (y,), domain=QQ[x]), Poly(y, (y,), domain=QQ[x])]
assert PolyMatrix([[x, y]], y) == PolyMatrix([[x, y]], ring=ZZ.frac_field(x)[y])
assert PolyMatrix([[x, y]], ring='ZZ[x,y]') == PolyMatrix([[x, y]], ring=ZZ[x,y])
assert PolyMatrix([[x, y]], (x, y)) == PolyMatrix([[x, y]], ring=QQ[x,y])
assert PolyMatrix([[x, y]], x, y) == PolyMatrix([[x, y]], ring=QQ[x,y])
assert PolyMatrix([x, y]) == PolyMatrix([[x], [y]], ring=QQ[x,y])
assert PolyMatrix(1, 2, [x, y]) == PolyMatrix([[x, y]], ring=QQ[x,y])
assert PolyMatrix(1, 2, lambda i,j: [x,y][j]) == PolyMatrix([[x, y]], ring=QQ[x,y])
assert PolyMatrix(0, 2, [], x, y).shape == (0, 2)
assert PolyMatrix(2, 0, [], x, y).shape == (2, 0)
assert PolyMatrix([[], []], x, y).shape == (2, 0)
assert PolyMatrix(ring=QQ[x,y]) == PolyMatrix(0, 0, [], ring=QQ[x,y]) == PolyMatrix([], ring=QQ[x,y])
raises(TypeError, lambda: PolyMatrix())
raises(TypeError, lambda: PolyMatrix(1))
assert PolyMatrix([Poly(x), Poly(y)]) == PolyMatrix([[x], [y]], ring=ZZ[x,y])
# XXX: Maybe a bug in parallel_poly_from_expr (x lost from gens and domain):
assert PolyMatrix([Poly(y, x), 1]) == PolyMatrix([[y], [1]], ring=QQ[y])
def METHOD_NAME():
assert (PolyMatrix([x]) == PolyMatrix([x])) is True
assert (PolyMatrix([y]) == PolyMatrix([x])) is False
assert (PolyMatrix([x]) != PolyMatrix([x])) is False
assert (PolyMatrix([y]) != PolyMatrix([x])) is True
assert PolyMatrix([[x, y]]) != PolyMatrix([x, y]) == PolyMatrix([[x], [y]])
assert PolyMatrix([x], ring=QQ[x]) != PolyMatrix([x], ring=ZZ[x])
assert PolyMatrix([x]) != Matrix([x])
assert PolyMatrix([x]).to_Matrix() == Matrix([x])
assert PolyMatrix([1], x) == PolyMatrix([1], x)
assert PolyMatrix([1], x) != PolyMatrix([1], y)
def test_polymatrix_from_Matrix():
assert PolyMatrix.from_Matrix(Matrix([1, 2]), x) == PolyMatrix([1, 2], x, ring=QQ[x])
assert PolyMatrix.from_Matrix(Matrix([1]), ring=QQ[x]) == PolyMatrix([1], x)
pmx = PolyMatrix([1, 2], x)
pmy = PolyMatrix([1, 2], y)
assert pmx != pmy
assert pmx.set_gens(y) == pmy
def test_polymatrix_repr():
assert repr(PolyMatrix([[1, 2]], x)) == 'PolyMatrix([[1, 2]], ring=QQ[x])'
assert repr(PolyMatrix(0, 2, [], x)) == 'PolyMatrix(0, 2, [], ring=QQ[x])'
def test_polymatrix_getitem():
M = PolyMatrix([[1, 2], [3, 4]], x)
assert M[:, :] == M
assert M[0, :] == PolyMatrix([[1, 2]], x)
assert M[:, 0] == PolyMatrix([1, 3], x)
assert M[0, 0] == Poly(1, x, domain=QQ)
assert M[0] == Poly(1, x, domain=QQ)
assert M[:2] == [Poly(1, x, domain=QQ), Poly(2, x, domain=QQ)]
def test_polymatrix_arithmetic():
M = PolyMatrix([[1, 2], [3, 4]], x)
assert M + M == PolyMatrix([[2, 4], [6, 8]], x)
assert M - M == PolyMatrix([[0, 0], [0, 0]], x)
assert -M == PolyMatrix([[-1, -2], [-3, -4]], x)
raises(TypeError, lambda: M + 1)
raises(TypeError, lambda: M - 1)
raises(TypeError, lambda: 1 + M)
raises(TypeError, lambda: 1 - M)
assert M * M == PolyMatrix([[7, 10], [15, 22]], x)
assert 2 * M == PolyMatrix([[2, 4], [6, 8]], x)
assert M * 2 == PolyMatrix([[2, 4], [6, 8]], x)
assert S(2) * M == PolyMatrix([[2, 4], [6, 8]], x)
assert M * S(2) == PolyMatrix([[2, 4], [6, 8]], x)
raises(TypeError, lambda: [] * M)
raises(TypeError, lambda: M * [])
M2 = PolyMatrix([[1, 2]], ring=ZZ[x])
assert S.Half * M2 == PolyMatrix([[S.Half, 1]], ring=QQ[x])
assert M2 * S.Half == PolyMatrix([[S.Half, 1]], ring=QQ[x])
assert M / 2 == PolyMatrix([[S(1)/2, 1], [S(3)/2, 2]], x)
assert M / Poly(2, x) == PolyMatrix([[S(1)/2, 1], [S(3)/2, 2]], x)
raises(TypeError, lambda: M / [])
def test_polymatrix_manipulations():
M1 = PolyMatrix([[1, 2], [3, 4]], x)
assert M1.transpose() == PolyMatrix([[1, 3], [2, 4]], x)
M2 = PolyMatrix([[5, 6], [7, 8]], x)
assert M1.row_join(M2) == PolyMatrix([[1, 2, 5, 6], [3, 4, 7, 8]], x)
assert M1.col_join(M2) == PolyMatrix([[1, 2], [3, 4], [5, 6], [7, 8]], x)
assert M1.applyfunc(lambda e: 2*e) == PolyMatrix([[2, 4], [6, 8]], x)
def test_polymatrix_ones_zeros():
assert PolyMatrix.zeros(1, 2, x) == PolyMatrix([[0, 0]], x)
assert PolyMatrix.eye(2, x) == PolyMatrix([[1, 0], [0, 1]], x)
def test_polymatrix_rref():
M = PolyMatrix([[1, 2], [3, 4]], x)
assert M.rref() == (PolyMatrix.eye(2, x), (0, 1))
raises(ValueError, lambda: PolyMatrix([1, 2], ring=ZZ[x]).rref())
raises(ValueError, lambda: PolyMatrix([1, x], ring=QQ[x]).rref())
def test_polymatrix_nullspace():
M = PolyMatrix([[1, 2], [3, 6]], x)
assert M.nullspace() == [PolyMatrix([-2, 1], x)]
raises(ValueError, lambda: PolyMatrix([1, 2], ring=ZZ[x]).nullspace())
raises(ValueError, lambda: PolyMatrix([1, x], ring=QQ[x]).nullspace())
assert M.rank() == 1
| null |
5,128 |
# stdlib
from functools import wraps
import re
import sys
from lxml import etree
from ncclient.operations.rpc import RPCError
from ncclient.xml_ import NCElement
from jnpr.junos import jxml as JXML
def timeoutDecorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
if "dev_timeout" in kwargs:
try:
dev = args[0].dev
except:
dev = args[0]
restore_timeout = dev.timeout
dev.timeout = kwargs.pop("dev_timeout", None)
try:
result = function(*args, **kwargs)
dev.timeout = restore_timeout
return result
except Exception:
dev.timeout = restore_timeout
raise
else:
try:
return function(*args, **kwargs)
except Exception:
raise
return wrapper
def METHOD_NAME(function):
@wraps(function)
def wrapper(*args, **kwargs):
if "normalize" in kwargs:
normalize = kwargs.pop("normalize", None)
try:
dev = args[0].dev
except:
dev = args[0]
if dev._normalize != normalize:
restore_transform = dev.transform
if normalize is False:
try:
dev.transform = dev._nc_transform
result = function(*args, **kwargs)
dev.transform = restore_transform
return result
except Exception:
dev.transform = restore_transform
raise
else:
try:
dev.transform = dev._norm_transform
result = function(*args, **kwargs)
dev.transform = restore_transform
return result
except Exception:
dev.transform = restore_transform
raise
else:
try:
return function(*args, **kwargs)
except Exception:
raise
else:
try:
return function(*args, **kwargs)
except Exception:
raise
return wrapper
def ignoreWarnDecorator(function):
"""
Ignore warnings if all <rpc-error> elements are at severity 'warning' and
match one of the values of the ignore_warning argument.
For example::
dev.rpc.get(ignore_warning=True)
dev.rpc.get(ignore_warning='vrrp subsystem not running')
dev.rpc.get(ignore_warning=['vrrp subsystem not running',
'statement not found'])
cu.load(cnf, ignore_warning='statement not found')
:ignore_warning: A boolean, string or list of string.
If the value is True, it will ignore all warnings regarldess of the
warning message. If the value is a string, it will ignore warning(s) if
the message of each warning matches the string. If the value is a list
of strings, ignore warning(s) if the message of each warning matches at
least one of the strings in the list.
.. note::
When the value of ignore_warning is a string, or list of strings,
the string is actually used as a case-insensitive regular
expression pattern. If the string contains only alpha-numeric
characters, as shown in the above examples, this results in a
case-insensitive substring match. However, any regular expression
pattern supported by the re library may be used for more
complicated match conditions.
"""
@wraps(function)
def wrapper(self, *args, **kwargs):
ignore_warning = kwargs.pop("ignore_warning", False)
rsp = None
try:
rsp = function(self, *args, **kwargs)
except RPCError as ex:
if hasattr(ex, "xml") and ignore_warning:
if hasattr(ex, "errors"):
errors = ex.errors
else:
errors = [ex]
for err in errors:
if err.severity == "warning":
if (
sys.version < "3"
and isinstance(ignore_warning, (str, unicode))
) or (sys.version >= "3" and isinstance(ignore_warning, str)):
if not re.search(ignore_warning, err.message, re.I):
# Message did not match.
raise ex
elif isinstance(ignore_warning, list):
for warn_msg in ignore_warning:
if re.search(warn_msg, err.message, re.I):
# Warning matches.
# Break skips else.
break
else:
# Message didn't match any of the
# ignore_warn pattern values.
raise ex
else:
# Not a warning (probably an error).
raise ex
# Every err was a warning that matched ignore_warning.
# Prepare the response which will get returned.
# ex.xml contains the raw xml response which was
# received, but might be rooted at an <rpc-error> element.
# Set rsp to the root <rpc-reply> element.
rsp = ex.xml.getroottree().getroot()
# 1) A normal response has been run through the XSLT
# transformation, but ex.xml has not. Do that now.
encode = None if sys.version < "3" else "unicode"
rsp = NCElement(
etree.tostring(rsp, encoding=encode), self.transform()
)._NCElement__doc
# 2) Now remove all of the <rpc-error> elements from
# the response. We've already confirmed they are
# all warnings
rsp = etree.fromstring(str(JXML.strip_rpc_error_transform(rsp)))
else:
# ignore_warning was false, or an RPCError which doesn't have
# an XML attribute. Raise it up for the caller to deal with.
raise ex
return rsp
return wrapper
def checkSAXParserDecorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
# args[0] is self
use_filter = kwargs.pop("use_filter", args[0]._use_filter)
restore_value = args[0]._use_filter
args[0]._use_filter = use_filter
try:
if args[0].D != None:
func = args[0].D.transform
result = function(*args, **kwargs)
args[0]._use_filter = restore_value
if args[0].D != None:
args[0].D.transform = func
return result
except Exception:
args[0]._use_filter = restore_value
raise
return wrapper
| null |
5,129 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self, reduction):
super(Net, self).__init__()
self.loss = P.NLLLoss(reduction=reduction)
def construct(self, predict, target, weight):
return self.loss(predict, target, weight)
class NLLLossGradNet(nn.Cell):
def __init__(self, reduction):
super(NLLLossGradNet, self).__init__()
self.grad = G.NLLLossGrad(reduction=reduction)
def construct(self, x, dout_x, target, weight, total_weight):
gout = self.grad(x, dout_x, target, weight, total_weight)
return gout
def nll_loss_template(nptype_input, nptype_weight, reduction):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
nll_loss_net = Net(reduction)
predict = Tensor(
np.array([[0.53, 0.74, -2.12], [1.29, -0.34, -1.13]]).astype(nptype_input))
target = Tensor(np.array([0, 1]).astype(np.int32))
weight = Tensor(np.array([0.45, -0.32, 1.21]).astype(nptype_weight))
loss, total_weight = nll_loss_net(predict, target, weight)
loss_np = loss.asnumpy()
total_weight_np = total_weight.asnumpy()
expected_tot_weight = np.array(0.129999995)
if reduction == 'none':
expected_loss = np.array([-0.238499984, -0.108800001])
elif reduction == 'mean':
expected_loss = np.array(-2.67153859)
elif reduction == 'sum':
expected_loss = np.array(-0.347299993)
if nptype_input == np.float32 and nptype_weight == np.float32:
ertol_loss = 1e-06
elif nptype_input == np.float16 or nptype_weight == np.float16:
ertol_loss = 1e-03
if nptype_weight == np.float32:
ertol_weight = 1e-06
elif nptype_weight == np.float16:
ertol_weight = 1e-03
np.testing.assert_allclose(loss_np, expected_loss, ertol_loss)
np.testing.assert_allclose(
total_weight_np, expected_tot_weight, ertol_weight)
def nll_loss_grad_template(nptype_input, nptype_weight, reduction):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
nll_loss_grad_net = NLLLossGradNet(reduction)
x = Tensor(
np.array([[0.53, 0.74, -2.12], [1.29, -0.34, -1.13]]).astype(nptype_input))
if reduction == "none":
dloss = Tensor(
np.array([3.24, -2.13]).astype(nptype_input))
else:
dloss = Tensor(np.array(1.23).astype(nptype_input))
target = Tensor(np.array([0, 1]).astype(np.int32))
weight = Tensor(np.array([0.45, -0.32, 1.21]).astype(nptype_weight))
total_weight = Tensor(np.array(0.13).astype(nptype_weight))
dx = nll_loss_grad_net(x, dloss, target, weight, total_weight)
dx_np = dx.asnumpy()
print(dx)
if reduction == "none":
dx_expected = np.array([[-1.45799994, 0, 0], [0, -0.681600034, 0]])
elif reduction == "mean":
dx_expected = np.array([[-4.25769234, 0, 0], [0, 3.02769232, 0]])
else:
dx_expected = np.array([[-0.553499997, 0, 0], [0, 0.393599987, 0]])
if nptype_input == np.float32 and nptype_weight == np.float32:
ertol_loss = 1e-06
else:
ertol_loss = 1e-02
np.testing.assert_allclose(dx_np, dx_expected, ertol_loss)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_no_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "none")
nll_loss_template(np.float32, np.float16, "none")
nll_loss_template(np.float16, np.float32, "none")
nll_loss_template(np.float16, np.float16, "none")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_mean_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "mean")
nll_loss_template(np.float32, np.float16, "mean")
nll_loss_template(np.float16, np.float32, "mean")
nll_loss_template(np.float16, np.float16, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_sum_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "sum")
nll_loss_template(np.float32, np.float16, "sum")
nll_loss_template(np.float16, np.float32, "sum")
nll_loss_template(np.float16, np.float16, "sum")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "mean")
nll_loss_grad_template(np.float32, np.float16, "mean")
nll_loss_grad_template(np.float16, np.float32, "mean")
nll_loss_grad_template(np.float16, np.float16, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_grad_sum_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "sum")
nll_loss_grad_template(np.float32, np.float16, "sum")
nll_loss_grad_template(np.float16, np.float32, "sum")
nll_loss_grad_template(np.float16, np.float16, "sum")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_grad_no_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "none")
nll_loss_grad_template(np.float32, np.float16, "none")
nll_loss_grad_template(np.float16, np.float32, "none")
nll_loss_grad_template(np.float16, np.float16, "none")
| null |
5,130 |
from __future__ import annotations
import math
import torch
import torch.nn.functional as F
from kornia.color import rgb_to_grayscale
from kornia.core import Module, Tensor
from kornia.core.check import KORNIA_CHECK, KORNIA_CHECK_IS_TENSOR, KORNIA_CHECK_SHAPE
from .gaussian import gaussian_blur2d
from .kernels import get_canny_nms_kernel, get_hysteresis_kernel
from .sobel import spatial_gradient
def canny(
input: Tensor,
low_threshold: float = 0.1,
high_threshold: float = 0.2,
kernel_size: tuple[int, int] | int = (5, 5),
sigma: tuple[float, float] | Tensor = (1, 1),
hysteresis: bool = True,
eps: float = 1e-6,
) -> tuple[Tensor, Tensor]:
r"""Find edges of the input image and filters them using the Canny algorithm.
.. image:: _static/img/canny.png
Args:
input: input image tensor with shape :math:`(B,C,H,W)`.
low_threshold: lower threshold for the hysteresis procedure.
high_threshold: upper threshold for the hysteresis procedure.
kernel_size: the size of the kernel for the gaussian blur.
sigma: the standard deviation of the kernel for the gaussian blur.
hysteresis: if True, applies the hysteresis edge tracking.
Otherwise, the edges are divided between weak (0.5) and strong (1) edges.
eps: regularization number to avoid NaN during backprop.
Returns:
- the canny edge magnitudes map, shape of :math:`(B,1,H,W)`.
- the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`.
.. note::
See a working example `here <https://kornia.github.io/tutorials/nbs/canny.html>`__.
Example:
>>> input = torch.rand(5, 3, 4, 4)
>>> magnitude, edges = canny(input) # 5x3x4x4
>>> magnitude.shape
torch.Size([5, 1, 4, 4])
>>> edges.shape
torch.Size([5, 1, 4, 4])
"""
KORNIA_CHECK_IS_TENSOR(input)
KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])
KORNIA_CHECK(
low_threshold <= high_threshold,
"Invalid input thresholds. low_threshold should be smaller than the high_threshold. Got: "
f"{low_threshold}>{high_threshold}",
)
KORNIA_CHECK(0 < low_threshold < 1, f'Invalid low threshold. Should be in range (0, 1). Got: {low_threshold}')
KORNIA_CHECK(0 < high_threshold < 1, f'Invalid high threshold. Should be in range (0, 1). Got: {high_threshold}')
device = input.device
dtype = input.dtype
# To Grayscale
if input.shape[1] == 3:
input = rgb_to_grayscale(input)
# Gaussian filter
blurred: Tensor = gaussian_blur2d(input, kernel_size, sigma)
# Compute the gradients
gradients: Tensor = spatial_gradient(blurred, normalized=False)
# Unpack the edges
gx: Tensor = gradients[:, :, 0]
gy: Tensor = gradients[:, :, 1]
# Compute gradient magnitude and angle
magnitude: Tensor = torch.sqrt(gx * gx + gy * gy + eps)
angle: Tensor = torch.atan2(gy, gx)
# Radians to Degrees
angle = 180.0 * angle / math.pi
# Round angle to the nearest 45 degree
angle = torch.round(angle / 45) * 45
# Non-maximal suppression
nms_kernels: Tensor = get_canny_nms_kernel(device, dtype)
nms_magnitude: Tensor = F.conv2d(magnitude, nms_kernels, padding=nms_kernels.shape[-1] // 2)
# Get the indices for both directions
positive_idx: Tensor = (angle / 45) % 8
positive_idx = positive_idx.long()
negative_idx: Tensor = ((angle / 45) + 4) % 8
negative_idx = negative_idx.long()
# Apply the non-maximum suppression to the different directions
channel_select_filtered_positive: Tensor = torch.gather(nms_magnitude, 1, positive_idx)
channel_select_filtered_negative: Tensor = torch.gather(nms_magnitude, 1, negative_idx)
channel_select_filtered: Tensor = torch.stack(
[channel_select_filtered_positive, channel_select_filtered_negative], 1
)
is_max: Tensor = channel_select_filtered.min(dim=1)[0] > 0.0
magnitude = magnitude * is_max
# Threshold
edges: Tensor = F.threshold(magnitude, low_threshold, 0.0)
low: Tensor = magnitude > low_threshold
high: Tensor = magnitude > high_threshold
edges = low * 0.5 + high * 0.5
edges = edges.to(dtype)
# Hysteresis
if hysteresis:
edges_old: Tensor = -torch.ones(edges.shape, device=edges.device, dtype=dtype)
hysteresis_kernels: Tensor = get_hysteresis_kernel(device, dtype)
while ((edges_old - edges).abs() != 0).any():
weak: Tensor = (edges == 0.5).float()
strong: Tensor = (edges == 1).float()
hysteresis_magnitude: Tensor = F.conv2d(
edges, hysteresis_kernels, padding=hysteresis_kernels.shape[-1] // 2
)
hysteresis_magnitude = (hysteresis_magnitude == 1).any(1, keepdim=True).to(dtype)
hysteresis_magnitude = hysteresis_magnitude * weak + strong
edges_old = edges.clone()
edges = hysteresis_magnitude + (hysteresis_magnitude == 0) * weak * 0.5
edges = hysteresis_magnitude
return magnitude, edges
class Canny(Module):
r"""Module that finds edges of the input image and filters them using the Canny algorithm.
Args:
input: input image tensor with shape :math:`(B,C,H,W)`.
low_threshold: lower threshold for the hysteresis procedure.
high_threshold: upper threshold for the hysteresis procedure.
kernel_size: the size of the kernel for the gaussian blur.
sigma: the standard deviation of the kernel for the gaussian blur.
hysteresis: if True, applies the hysteresis edge tracking.
Otherwise, the edges are divided between weak (0.5) and strong (1) edges.
eps: regularization number to avoid NaN during backprop.
Returns:
- the canny edge magnitudes map, shape of :math:`(B,1,H,W)`.
- the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`.
Example:
>>> input = torch.rand(5, 3, 4, 4)
>>> magnitude, edges = Canny()(input) # 5x3x4x4
>>> magnitude.shape
torch.Size([5, 1, 4, 4])
>>> edges.shape
torch.Size([5, 1, 4, 4])
"""
def __init__(
self,
low_threshold: float = 0.1,
high_threshold: float = 0.2,
kernel_size: tuple[int, int] | int = (5, 5),
sigma: tuple[float, float] | Tensor = (1, 1),
hysteresis: bool = True,
eps: float = 1e-6,
) -> None:
super().__init__()
KORNIA_CHECK(
low_threshold <= high_threshold,
"Invalid input thresholds. low_threshold should be smaller than the high_threshold. Got: "
f"{low_threshold}>{high_threshold}",
)
KORNIA_CHECK(0 < low_threshold < 1, f'Invalid low threshold. Should be in range (0, 1). Got: {low_threshold}')
KORNIA_CHECK(
0 < high_threshold < 1, f'Invalid high threshold. Should be in range (0, 1). Got: {high_threshold}'
)
# Gaussian blur parameters
self.kernel_size = kernel_size
self.sigma = sigma
# Double threshold
self.low_threshold = low_threshold
self.high_threshold = high_threshold
# Hysteresis
self.hysteresis = hysteresis
self.eps: float = eps
def __repr__(self) -> str:
return ''.join(
(
f'{type(self).__name__}(',
', '.join(
f'{name}={getattr(self, name)}' for name in sorted(self.__dict__) if not name.startswith('_')
),
')',
)
)
def METHOD_NAME(self, input: Tensor) -> tuple[Tensor, Tensor]:
return canny(
input, self.low_threshold, self.high_threshold, self.kernel_size, self.sigma, self.hysteresis, self.eps
)
| null |
5,131 |
from nose.tools import eq_, raises
import pyexcel as pe
from ._compact import OrderedDict
class Attributable:
def __init__(self, adict):
self.mydict = adict
def __getattr__(self, field):
return self.mydict[field]
class Objects:
def __init__(self):
self.objs = None
def METHOD_NAME(self, objs, batch_size):
self.objs = objs
self.batch_size = batch_size
def all(self):
return [Attributable(o) for o in self.objs]
class Field:
def __init__(self, name):
self.attname = name
class Meta:
instance = 1
def __init__(self):
self.model_name = "Sheet%d" % Meta.instance
self.concrete_fields = []
Meta.instance = Meta.instance + 1
def update(self, data):
for f in data:
self.concrete_fields.append(Field(f))
class FakeDjangoModel:
def __init__(self, model_name=None):
self.objects = Objects()
self._meta = Meta()
if model_name:
self._meta.model_name = model_name
def __call__(self, **keywords):
return keywords
def save(self):
pass
class TestVerticalSheet:
def setUp(self):
self.data = [["X", 1, 4], ["Y", 2, 5], ["Z", 3, 6]]
self.result = [{"Y": 2, "X": 1, "Z": 3}, {"Y": 5, "X": 4, "Z": 6}]
def test_model_save_to_django_model(self):
model = FakeDjangoModel()
pe.save_as(
array=self.data,
name_columns_by_row=0,
dest_model=model,
transpose_before=True,
)
assert model.objects.objs == self.result
def test_mapping_array(self):
data2 = [["A", 1, 4], ["B", 2, 5], ["C", 3, 6]]
mapdict = ["X", "Y", "Z"]
model = FakeDjangoModel()
pe.save_as(
array=data2,
name_columns_by_row=0,
dest_model=model,
dest_mapdict=mapdict,
transpose_before=True,
)
assert model.objects.objs == self.result
def test_mapping_dict(self):
"""
for vertical sheet, first transpose it and then
name columns by row 0
"""
data2 = [["A", 1, 4], ["B", 2, 5], ["C", 3, 6]]
mapdict = {"C": "Z", "A": "X", "B": "Y"}
model = FakeDjangoModel()
pe.save_as(
array=data2,
dest_model=model,
dest_mapdict=mapdict,
name_columns_by_row=0,
transpose_before=True,
)
eq_(model.objects.objs, self.result)
class TestSheet:
def setUp(self):
self.data = [["X", "Y", "Z"], [1, 2, 3], [4, 5, 6]]
self.result = [{"Y": 2, "X": 1, "Z": 3}, {"Y": 5, "X": 4, "Z": 6}]
def test_sheet_save_to_django_model(self):
model = FakeDjangoModel()
sheet = pe.Sheet(self.data, name_columns_by_row=0)
sheet.save_to_django_model(model)
assert model.objects.objs == self.result
def test_sheet_save_to_django_model_3(self):
model = FakeDjangoModel()
sheet = pe.Sheet(self.data)
sheet.name_columns_by_row(0)
def wrapper(row):
row[0] = row[0] + 1
return row
sheet.save_to_django_model(model, initializer=wrapper)
assert model.objects.objs == [
{"Y": 2, "X": 2, "Z": 3},
{"Y": 5, "X": 5, "Z": 6},
]
def test_model_save_to_django_model(self):
model = FakeDjangoModel()
pe.save_as(array=self.data, name_columns_by_row=0, dest_model=model)
assert model.objects.objs == self.result
def test_model_save_to_django_model_2(self):
model = FakeDjangoModel()
pe.save_as(array=self.data, dest_model=model, name_columns_by_row=0)
assert model.objects.objs == self.result
def test_load_sheet_from_django_model(self):
model = FakeDjangoModel()
sheet = pe.Sheet(self.data, name_columns_by_row=0)
sheet.save_to_django_model(model)
assert model.objects.objs == self.result
model._meta.update(["X", "Y", "Z"])
sheet2 = pe.get_sheet(model=model, sheet_name="test")
sheet2.name_columns_by_row(0)
assert sheet2.name == "test"
eq_(list(sheet2.to_records()), list(sheet.to_records()))
def test_mapping_array(self):
data2 = [["A", "B", "C"], [1, 2, 3], [4, 5, 6]]
mapdict = ["X", "Y", "Z"]
model = FakeDjangoModel()
pe.save_as(
array=data2,
name_columns_by_row=0,
dest_model=model,
dest_mapdict=mapdict,
)
assert model.objects.objs == self.result
@raises(Exception)
def test_mapping_array_exceptional_case(self):
data2 = [["A", "B", "C"], [1, 2, 3], [4, 5, 6]]
mapdict = ["X", "Y", "Z"]
model = FakeDjangoModel()
pe.save_as(array=data2, dest_model=model, dest_mapdict=mapdict)
assert model.objects.objs == self.result
def test_mapping_dict(self):
data2 = [["A", "B", "C"], [1, 2, 3], [4, 5, 6]]
mapdict = {"C": "Z", "A": "X", "B": "Y"}
model = FakeDjangoModel()
pe.save_as(
array=data2,
name_columns_by_row=0,
dest_model=model,
dest_mapdict=mapdict,
)
eq_(model.objects.objs, self.result)
class TestBook:
def setUp(self):
self.content = OrderedDict()
self.content.update(
{"Sheet1": [["X", "Y", "Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
)
self.content.update(
{"Sheet2": [["A", "B", "C"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
)
self.result1 = [
{"Y": 4, "X": 1, "Z": 7},
{"Y": 5, "X": 2, "Z": 8},
{"Y": 6, "X": 3, "Z": 9},
]
self.result2 = [
{"B": 4, "A": 1, "C": 7},
{"B": 5, "A": 2, "C": 8},
{"B": 6, "A": 3, "C": 9},
]
def test_book_save_to_models(self):
model1 = FakeDjangoModel("Sheet1")
model2 = FakeDjangoModel("Sheet2")
book = pe.Book(self.content)
book.save_to_django_models([model1, model2])
assert model1.objects.objs == self.result1
assert model2.objects.objs == self.result2
@raises(AttributeError)
def test_book_save_to_models_with_bulk_save_false(self):
"""
same to previous test but with different parameters
"""
model1 = FakeDjangoModel("Sheet1")
model2 = FakeDjangoModel("Sheet2")
book = pe.Book(self.content)
book.save_to_django_models([model1, model2], bulk_save=False)
def test_model_save_to_models(self):
model = FakeDjangoModel("Sheet1")
data = {"Sheet1": [["X", "Y", "Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
pe.save_book_as(dest_models=[model, None, None], bookdict=data)
assert model.objects.objs == self.result1
def test_load_book_from_django_model(self):
# if a book has more than one sheet
# and it saves to only one model, now it will fail
# with an exception.
model = FakeDjangoModel("Sheet1")
book = pe.Book(
{"Sheet1": [["X", "Y", "Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]}
)
book.save_to_django_models([model])
assert model.objects.objs == self.result1
model._meta.update(["X", "Y", "Z"])
book2 = pe.get_book(models=[model])
assert book2[0].to_array() == book[0].to_array()
@raises(Exception)
def test_more_sheets_than_models(self):
self.content.update({"IgnoreMe": [[1, 2, 3]]})
model = FakeDjangoModel("Sheet1")
pe.save_book_as(dest_models=[model], bookdict=self.content)
| null |
5,132 |
import contextlib
import functools
import importlib
import re
import sys
import warnings
def import_deprecated(name):
"""Import *name* while suppressing DeprecationWarning."""
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
return importlib.import_module(name)
def check_syntax_warning(testcase, statement, errtext='',
*, lineno=1, offset=None):
# Test also that a warning is emitted only once.
from test.support import check_syntax_error
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', SyntaxWarning)
compile(statement, '<testcase>', 'exec')
testcase.assertEqual(len(warns), 1, warns)
warn, = warns
testcase.assertTrue(issubclass(warn.category, SyntaxWarning),
warn.category)
if errtext:
testcase.assertRegex(str(warn.message), errtext)
testcase.assertEqual(warn.filename, '<testcase>')
testcase.assertIsNotNone(warn.lineno)
if lineno is not None:
testcase.assertEqual(warn.lineno, lineno)
# SyntaxWarning should be converted to SyntaxError when raised,
# since the latter contains more information and provides better
# error report.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('error', SyntaxWarning)
check_syntax_error(testcase, statement, errtext,
lineno=lineno, offset=offset)
# No warnings are leaked when a SyntaxError is raised.
testcase.assertEqual(warns, [])
def METHOD_NAME(*, category):
"""Decorator to suppress deprecation warnings.
Use of context managers to hide warnings make diffs
more noisy and tools like 'git blame' less useful.
"""
def decorator(test):
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=category)
return test(self, *args, **kwargs)
return wrapper
return decorator
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_no_warnings(testcase, message='', category=Warning, force_gc=False):
"""Context manager to check that no warnings are emitted.
This context manager enables a given warning within its scope
and checks that no warnings are emitted even with that warning
enabled.
If force_gc is True, a garbage collection is attempted before checking
for warnings. This may help to catch warnings emitted when objects
are deleted, such as ResourceWarning.
Other keyword arguments are passed to warnings.filterwarnings().
"""
from test.support import gc_collect
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always',
message=message,
category=category)
yield
if force_gc:
gc_collect()
testcase.assertEqual(warns, [])
@contextlib.contextmanager
def check_no_resource_warning(testcase):
"""Context manager to check that no ResourceWarning is emitted.
Usage:
with check_no_resource_warning(self):
f = open(...)
...
del f
You must remove the object which may emit ResourceWarning before
the end of the context manager.
"""
with check_no_warnings(testcase, category=ResourceWarning, force_gc=True):
yield
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def save_restore_warnings_filters():
old_filters = warnings.filters[:]
try:
yield
finally:
warnings.filters[:] = old_filters
def _warn_about_deprecation():
warnings.warn(
"This is used in test_support test to ensure"
" support.ignore_deprecations_from() works as expected."
" You should not be seeing this.",
DeprecationWarning,
stacklevel=0,
)
| null |
5,133 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fdw_utils
from unittest.mock import patch
class FDWDNodesTestCase(BaseTestGenerator):
"""This class will delete foreign data wrappers under test database."""
scenarios = utils.generate_scenarios('fdw_get_nodes_and_node',
fdw_utils.test_cases)
def METHOD_NAME(self):
""" This function will create extension and foreign data wrapper."""
super().METHOD_NAME()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.fdw_name = "fdw_{0}".format(str(uuid.uuid4())[1:8])
self.fdw_id = fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_name)
def get_fdw_nodes(self):
"""
This function returns fdw nodes
:return: fdw nodes
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/',
content_type='html/json')
def get_fdw_node(self):
"""
This functions returns the fdw node
:return: fdw node
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.fdw_id), content_type='html/json')
def runTest(self):
"""This function will fetch foreign data wrapper present under test
database."""
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_name)
if not fdw_response:
raise Exception("Could not find FDW.")
if self.is_positive_test:
if hasattr(self, "node"):
response = self.get_fdw_node()
else:
response = self.get_fdw_nodes()
else:
if hasattr(self, "error_fetching_fdw"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
if hasattr(self, "node"):
response = self.get_fdw_node()
else:
response = self.get_fdw_nodes()
if hasattr(self, "wrong_id"):
self.fdw_id = 99999
response = self.get_fdw_node()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function disconnect the test database and drop added extension
and dependant objects."""
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| null |
5,134 |
"""
author: Christian Bender
date: 21.12.2017
class: XORCipher
This class implements the XOR-cipher algorithm and provides
some useful methods for encrypting and decrypting strings and
files.
Overview about methods
- encrypt : list of char
- decrypt : list of char
- encrypt_string : str
- decrypt_string : str
- encrypt_file : boolean
- decrypt_file : boolean
"""
from __future__ import annotations
class XORCipher:
def __init__(self, key: int = 0):
"""
simple constructor that receives a key or uses
default key = 0
"""
# private field
self.__key = key
def encrypt(self, content: str, key: int) -> list[str]:
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(ch) ^ key) for ch in content]
def METHOD_NAME(self, content: str, key: int) -> list[str]:
"""
input: 'content' of type list and 'key' of type int
output: decrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, list)
key = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(ch) ^ key) for ch in content]
def encrypt_string(self, content: str, key: int = 0) -> str:
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def decrypt_string(self, content: str, key: int = 0) -> str:
"""
input: 'content' of type string and 'key' of type int
output: decrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def encrypt_file(self, file: str, key: int = 0) -> bool:
"""
input: filename (str) and a key (int)
output: returns true if encrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str) and isinstance(key, int)
try:
with open(file) as fin, open("encrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(line, key))
except OSError:
return False
return True
def decrypt_file(self, file: str, key: int) -> bool:
"""
input: filename (str) and a key (int)
output: returns true if decrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str) and isinstance(key, int)
try:
with open(file) as fin, open("decrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(line, key))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| null |
5,135 |
# SPDX-FileCopyrightText: Copyright DB Netz AG and the capellambse contributors
# SPDX-License-Identifier: Apache-2.0
"""Common classes used by all MelodyModel functions.
.. diagram:: [CDB] Common Types ORM
"""
from __future__ import annotations
import collections
import collections.abc as cabc
import typing as t
import capellambse
S = t.TypeVar("S", bound=t.Optional[str])
T = t.TypeVar("T", bound="ModelObject")
U = t.TypeVar("U")
XTYPE_ANCHORS = {
"capellambse.model": "org.polarsys.capella.core.data.capellamodeller",
"capellambse.model.crosslayer": "org.polarsys.capella.core.data",
"capellambse.model.layers": "org.polarsys.capella.core.data",
}
"""A mapping from anchor modules to Capella packages.
This dictionary maps Python modules and packages to the Capella packages
they represent. ``build_xtype`` and related functions/classes can then
use this information to automatically derive an ``xsi:type`` from any
class that is defined in such an anchor module (or a submodule of one).
"""
XTYPE_HANDLERS: dict[
str | None, dict[str, type[t.Any]]
] = collections.defaultdict(dict)
r"""Defines a mapping between ``xsi:type``\ s and wrapper classes.
The first layer's keys can be either ``None`` or the ``xsi:type`` of the
architectural layer that the wrapper should be applied to. In the case
of ``None``, the wrapper will be applied to all layers. Note that
layer-specific wrappers have precedence over layer-agnostic ones.
These keys map to a further dictionary. This second layer maps from the
``xsi:type``\ (s) that each wrapper handles to the wrapper class.
"""
def build_xtype(class_: type[ModelObject]) -> str:
anchor = package = ""
for a, p in XTYPE_ANCHORS.items():
if len(a) > len(anchor) and class_.__module__.startswith(a):
anchor = a
package = p
if not anchor:
raise TypeError(f"Module is not an xtype anchor: {class_.__module__}")
module = class_.__module__[len(anchor) :]
clsname = class_.__name__
return f"{package}{module}:{clsname}"
def enumliteral(
generic_element: GenericElement, attr: str, default: str = "NOT_SET"
) -> AttributeProperty | str:
uuid = generic_element._element.attrib.get(attr)
if uuid is None:
return default
return generic_element.from_model(
generic_element._model, generic_element._model._loader[uuid]
).name
def set_accessor(
cls: type[ModelObject], attr: str, accessor: Accessor
) -> None:
setattr(cls, attr, accessor)
accessor.__set_name__(cls, attr)
def set_self_references(*args: tuple[type[ModelObject], str]) -> None:
for cls, attr in args:
set_accessor(cls, attr, DirectProxyAccessor(cls, aslist=ElementList))
def xtype_handler( # pylint: disable=keyword-arg-before-vararg # PEP-570
arch: str | None = None, /, *xtypes: str
) -> cabc.Callable[[type[T]], type[T]]:
"""Register a class as handler for a specific ``xsi:type``.
``arch`` is the ``xsi:type`` of the desired architecture. It must
always be a simple string or None. In the latter case the definition
applies to all elements regardless of their architectural layer.
Architecture-specific definitions will always win over
architecture-independent ones.
Each string given in ``xtypes`` notes an ``xsi:type`` of elements
that this class handles. It is possible to specify multiple values,
in which case the class will be registered for each ``xsi:type``
under the architectural layer given in ``arch``.
Handler classes' ``__init__`` methods must accept two positional
arguments. The first argument is the :class:`MelodyModel` instance
which loaded the corresponding model, and the second one is the LXML
element that needs to be handled.
Example::
>>> @xtype_handler('arch:xtype', 'xtype:1', 'xtype:2')
... class Test:
... _xmltag = "ownedTests"
... def from_model(self, model, element, /):
... ... # Instantiate from model XML element
"""
if arch is not None and not isinstance(arch, str): # pragma: no cover
raise TypeError(
f"'arch' must be a str or None, not {type(arch).__name__}"
)
# Compile a list of all xtype strings
xtype_strs = []
for xtype in xtypes:
if isinstance(xtype, str):
xtype_strs.append(xtype)
else: # pragma: no cover
raise ValueError(
f"All `xtype`s must be str, not {type(xtype).__name__!r}"
)
def METHOD_NAME(cls: type[T]) -> type[T]:
# Avoid double registration when executing an extension as module
if cls.__module__ == "__main__":
return cls
if not xtype_strs:
xtype_strs.append(build_xtype(cls))
for xtype in xtype_strs:
if xtype in XTYPE_HANDLERS[arch]: # pragma: no cover
raise LookupError(f"Duplicate xsi:type {xtype} in {arch}")
XTYPE_HANDLERS[arch][xtype] = cls
return cls
return METHOD_NAME
from .accessors import *
from .element import *
from .properties import *
set_accessor(GenericElement, "parent", ParentAccessor(GenericElement))
| null |
5,136 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import math
import numpy as np
from mantid.kernel import V3D
class V3DTest(unittest.TestCase):
def test_default_construction_is_at_origin(self):
p = V3D()
self.assertEqual(p.X(), 0.0)
self.assertEqual(p.Y(), 0.0)
self.assertEqual(p.Z(), 0.0)
def test_construction_with_xyz(self):
p = V3D(1.5, 2.4, 6.5)
self.assertEqual(p.X(), 1.5)
self.assertEqual(p.Y(), 2.4)
self.assertEqual(p.Z(), 6.5)
def test_distance(self):
a = V3D(0.0, 0.0, 0.0)
b = V3D(2.0, 2.0, 2.0)
d = a.distance(b)
self.assertAlmostEqual(d, 2.0 * math.sqrt(3.0))
def test_angle(self):
a = V3D(2.0, 0.0, 0.0)
b = V3D(0.0, 1.0, 0.0)
c = V3D(1.0, 1.0, 0.0)
d = V3D(-1.0, 0.0, 0.0)
self.assertAlmostEqual(a.angle(a), 0.0)
self.assertAlmostEqual(a.angle(b), math.pi / 2.0)
self.assertAlmostEqual(a.angle(c), math.pi / 4.0)
self.assertAlmostEqual(a.angle(d), math.pi)
def test_cos_angle(self):
a = V3D(2.0, 0.0, 0.0)
b = V3D(0.0, 1.0, 0.0)
c = V3D(1.0, 1.0, 0.0)
d = V3D(-1.0, 0.0, 0.0)
self.assertAlmostEqual(a.cosAngle(a), 1.0)
self.assertAlmostEqual(a.cosAngle(b), 0.0)
self.assertAlmostEqual(a.cosAngle(c), 1.0 / np.sqrt(2.0))
self.assertAlmostEqual(a.cosAngle(d), -1.0)
def METHOD_NAME(self):
b = V3D(0.0, 0.0, 0.0)
a = V3D(9.9, 7.6, 0.0)
self.assertEqual(a.zenith(a), 0.0)
self.assertAlmostEqual(a.zenith(b), math.pi / 2.0)
a = V3D(-1.1, 0.0, 0.0)
self.assertAlmostEqual(a.zenith(b), math.pi / 2.0)
a = V3D(0.0, 0.0, 1.0)
self.assertEqual(a.zenith(b), 0.0)
a = V3D(1.0, 0.0, 1.0)
self.assertAlmostEqual(a.zenith(b), math.pi / 4.0)
a = V3D(1.0, 0.0, -1.0)
self.assertAlmostEqual(a.zenith(b), 3.0 * math.pi / 4.0)
def test_scalarprod(self):
a = V3D(1.0, 2.0, 1.0)
b = V3D(1.0, -2.0, -1.0)
sp = a.scalar_prod(b)
self.assertAlmostEqual(sp, -4.0)
def test_crossprod(self):
a = V3D(1.0, 0.0, 0.0)
b = V3D(0.0, 1.0, 0.0)
c = a.cross_prod(b)
self.assertAlmostEqual(c.X(), 0.0)
self.assertAlmostEqual(c.Y(), 0.0)
self.assertAlmostEqual(c.Z(), 1.0)
def test_norm(self):
p = V3D(1.0, -5.0, 8.0)
self.assertAlmostEqual(p.norm(), math.sqrt(90.0))
def test_norm2(self):
p = V3D(1.0, -5.0, 8.0)
self.assertAlmostEqual(p.norm2(), 90.0)
def test_equality_operators_use_value_comparison(self):
p1 = V3D(1.0, -5.0, 8.0)
p2 = V3D(1.0, -5.0, 8.0)
self.assertEqual(p1, p2)
def test_inequality_operators_use_value_comparison(self):
p1 = V3D(1.0, -5.0, 8.0)
p2 = V3D(1.0, -5.0, 8.0) # different objects, same value
self.assertFalse(p1 != p2)
p3 = V3D(1.0, -5.0, 10.0)
self.assertNotEqual(p1, p3)
def test_directionAngles_rads(self):
v = V3D(1, 1, 1)
inDegrees = False
angles = v.directionAngles(inDegrees)
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)), angles.X())
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)), angles.Y())
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)), angles.Z())
def test_directionAngles(self):
v = V3D(1, 1, 1)
angles = v.directionAngles()
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)) * 180 / math.pi, angles.X())
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)) * 180 / math.pi, angles.Y())
self.assertAlmostEqual(math.acos(1.0 / math.sqrt(3.0)) * 180 / math.pi, angles.Z())
def test_hash(self):
v1 = V3D(1, 1, 1)
v2 = V3D(1, 1, 1)
v3 = V3D(1, 0, 0)
a = set([v1, v2, v3])
self.assertEqual(len(a), 2)
def test_get_item(self):
v = V3D(2, 1, 3)
self.assertRaises(IndexError, v.__getitem__, 3)
self.assertRaises(IndexError, v.__getitem__, -4)
self.assertEqual(v[0], 2.0)
self.assertEqual(v[1], 1.0)
self.assertEqual(v[2], 3.0)
self.assertEqual(v[-3], 2.0)
self.assertEqual(v[-2], 1.0)
self.assertEqual(v[-1], 3.0)
def test_set_item(self):
v = V3D(2, 1, 3)
self.assertRaises(IndexError, v.__setitem__, 3, 0.0)
self.assertRaises(IndexError, v.__setitem__, -4, 0.0)
v[0] = 1.0
v[1] = 2.0
v[2] = 4.0
self.assertEqual(v[0], 1.0)
self.assertEqual(v[1], 2.0)
self.assertEqual(v[2], 4.0)
v[-3] = 3.0
v[-2] = 5.0
v[-1] = 6.0
self.assertEqual(v[0], 3.0)
self.assertEqual(v[1], 5.0)
self.assertEqual(v[2], 6.0)
def test_iterator(self):
times_two = [2 * x for x in V3D(3, 4, 5)]
self.assertEqual(times_two[0], 6.0)
self.assertEqual(times_two[1], 8.0)
self.assertEqual(times_two[2], 10.0)
def test_len(self):
self.assertEqual(len(V3D(2, 2, 2)), 3)
def test_numpy_conversion(self):
v = V3D(1, 2, 3)
v_as_numpy = np.array(v)
self.assertTrue(np.all(v_as_numpy == np.array([1, 2, 3])))
if __name__ == "__main__":
unittest.main()
| null |
5,137 |
from django.core import cache
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
panel_id = "CachePanel"
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set("foo", "bar")
cache.cache.get("foo")
cache.cache.delete("foo")
self.assertFalse(cache.cache.touch("foo"))
cache.cache.set("foo", "bar")
self.assertTrue(cache.cache.touch("foo"))
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 7)
def METHOD_NAME(self):
self.assertEqual(len(self.panel.calls), 0)
default_cache = cache.caches[cache.DEFAULT_CACHE_ALIAS]
second_cache = cache.caches["second"]
default_cache.set("foo", "bar")
second_cache.get("foo")
self.assertEqual(len(self.panel.calls), 2)
def test_hits_and_misses(self):
cache.cache.clear()
cache.cache.get("foo")
self.assertEqual(self.panel.hits, 0)
self.assertEqual(self.panel.misses, 1)
cache.cache.set("foo", 1)
cache.cache.get("foo")
self.assertEqual(self.panel.hits, 1)
self.assertEqual(self.panel.misses, 1)
cache.cache.get_many(["foo", "bar"])
self.assertEqual(self.panel.hits, 2)
self.assertEqual(self.panel.misses, 2)
cache.cache.set("bar", 2)
cache.cache.get_many(keys=["foo", "bar"])
self.assertEqual(self.panel.hits, 4)
self.assertEqual(self.panel.misses, 2)
def test_get_or_set_value(self):
cache.cache.get_or_set("baz", "val")
self.assertEqual(cache.cache.get("baz"), "val")
calls = [
(call["name"], call["args"], call["kwargs"]) for call in self.panel.calls
]
self.assertEqual(
calls,
[
("get_or_set", ("baz", "val"), {}),
("get", ("baz",), {}),
],
)
self.assertEqual(
self.panel.counts,
{
"add": 0,
"get": 1,
"set": 0,
"get_or_set": 1,
"touch": 0,
"delete": 0,
"clear": 0,
"get_many": 0,
"set_many": 0,
"delete_many": 0,
"has_key": 0,
"incr": 0,
"decr": 0,
"incr_version": 0,
"decr_version": 0,
},
)
def test_get_or_set_does_not_override_existing_value(self):
cache.cache.set("foo", "bar")
cached_value = cache.cache.get_or_set("foo", "other")
self.assertEqual(cached_value, "bar")
calls = [
(call["name"], call["args"], call["kwargs"]) for call in self.panel.calls
]
self.assertEqual(
calls,
[
("set", ("foo", "bar"), {}),
("get_or_set", ("foo", "other"), {}),
],
)
self.assertEqual(
self.panel.counts,
{
"add": 0,
"get": 0,
"set": 1,
"get_or_set": 1,
"touch": 0,
"delete": 0,
"clear": 0,
"get_many": 0,
"set_many": 0,
"delete_many": 0,
"has_key": 0,
"incr": 0,
"decr": 0,
"incr_version": 0,
"decr_version": 0,
},
)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_request.
"""
cache.cache.get("café")
response = self.panel.process_request(self.request)
# ensure the panel does not have content yet.
self.assertNotIn("café", self.panel.content)
self.panel.generate_stats(self.request, response)
# ensure the panel renders correctly.
content = self.panel.content
self.assertIn("café", content)
self.assertValidHTML(content)
def test_generate_server_timing(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set("foo", "bar")
cache.cache.get("foo")
cache.cache.delete("foo")
self.assertEqual(len(self.panel.calls), 3)
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
self.panel.generate_server_timing(self.request, response)
stats = self.panel.get_stats()
expected_data = {
"total_time": {
"title": "Cache {} Calls".format(stats["total_calls"]),
"value": stats["total_time"],
}
}
self.assertEqual(self.panel.get_server_timing_stats(), expected_data)
| null |
5,138 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantidqt.utils.qt.testing import start_qapplication
from mantid.api import AnalysisDataService, FileFinder
from unittest import mock
from mantid import ConfigService
from mantid.simpleapi import CreateWorkspace
from collections import Counter
from mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils import load_workspace_from_filename
from mantidqtinterfaces.Muon.GUI.Common.ADSHandler.muon_workspace_wrapper import MuonWorkspaceWrapper
from mantidqtinterfaces.Muon.GUI.Common.muon_group import MuonGroup
from mantidqtinterfaces.Muon.GUI.Common.muon_pair import MuonPair
from mantidqtinterfaces.Muon.GUI.Common.test_helpers.context_setup import setup_context
# Ony want to test frequency specific aspects
# the rest is covered in muon_context_test.py
@start_qapplication
class MuonContextWithFrequencyTest(unittest.TestCase):
def setUp(self):
AnalysisDataService.clear()
ConfigService["MantidOptions.InvisibleWorkspaces"] = "True"
self.filepath = FileFinder.findRuns("EMU00019489.nxs")[0]
self.load_result, self.run_number, self.filename, psi_data = load_workspace_from_filename(self.filepath)
self.assert_(not psi_data)
self.context = setup_context(True)
self.context.gui_context.update({"RebinType": "None"})
self.loaded_data = self.context.data_context._loaded_data
self.data_context = self.context.data_context
self.gui_context = self.context.gui_context
self.group_pair_context = self.context.group_pair_context
self.data_context.instrument = "EMU"
self.loaded_data.add_data(workspace=self.load_result, run=[self.run_number], filename=self.filename, instrument="EMU")
self.data_context.current_runs = [[self.run_number]]
self.data_context.update_current_data()
self.group_pair_context.reset_group_and_pairs_to_default(self.load_result["OutputWorkspace"][0].workspace, "EMU", "", 1)
self.run_list = [19489]
self.groups = [MuonGroup("bwd"), MuonGroup("fwd")]
self.rebins = [False, False]
self.pairs = [MuonPair("long", "bwd", "fwd")]
def METHOD_NAME(self):
ConfigService["MantidOptions.InvisibleWorkspaces"] = "False"
def _calculate_all_data(self):
self.context.calculate_all_counts()
for group, rebin in zip(self.groups, self.rebins):
self.context.calculate_asymmetry_for(self.run_list, group, rebin)
self.context.show_group(self.run_list, group, rebin)
for pair in self.pairs:
self.context.calculate_pair_for(self.run_list, pair)
self.context.show_pair(self.run_list, pair)
def populate_ADS(self):
self._calculate_all_data()
CreateWorkspace([0], [0], OutputWorkspace="EMU19489; PhaseQuad; PhaseTable EMU19489")
self.context.phase_context.add_phase_quad(MuonWorkspaceWrapper("EMU19489; PhaseQuad; PhaseTable EMU19489"), "19489")
def test_window(self):
self.assertEqual("Frequency Domain Analysis", self.context.window_title)
def test_get_workspace_names_returns_no_time_domain_workspaces(self):
self.populate_ADS()
workspace_list = self.context.get_workspace_names_for("19489", "fwd, bwd, long")
self.assertEqual(Counter(workspace_list), Counter())
def test_get_workspace_names_returns_nothing_if_no_parameters_passed(self):
self.populate_ADS()
self.context._frequency_context.plot_type = "All"
workspace_list = self.context.get_workspace_names_for()
self.assertEqual(workspace_list, [])
def test_get_workspaces_names_copes_with_no_freq_runs(self):
self.populate_ADS()
self.context._frequency_context.plot_type = "All"
workspace_list = self.context.get_workspace_names_for(runs="19489", group_and_pair="fwd, bwd, long, random, wrong")
self.assertEqual(Counter(workspace_list), Counter([]))
def test_call_freq_workspace_names(self):
self.context.get_names_of_frequency_domain_workspaces_to_fit = mock.Mock()
self.context._frequency_context.plot_type = "All"
self.context.get_workspace_names_for(runs="19489", group_and_pair="fwd, bwd")
self.context.get_names_of_frequency_domain_workspaces_to_fit.assert_called_once_with(
runs="19489", group_and_pair="fwd, bwd", frequency_type="All"
)
if __name__ == "__main__":
unittest.main(buffer=False, verbosity=2)
| null |
5,139 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Stephan Thiele <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class LinFsm:
class State:
WaitForBreak = 'WAIT_FOR_BREAK'
Sync = 'SYNC'
Pid = 'PID'
Data = 'DATA'
Checksum = 'CHECKSUM'
Error = 'ERROR'
def transit(self, target_state):
if not self._transition_allowed(target_state):
return False
self.state = target_state
return True
def _transition_allowed(self, target_state):
if target_state == LinFsm.State.Error:
return True
return target_state in self.allowed_state[self.state]
def reset(self):
self.state = LinFsm.State.WaitForBreak
def __init__(self):
a = dict()
a[LinFsm.State.WaitForBreak] = (LinFsm.State.Sync,)
a[LinFsm.State.Sync] = (LinFsm.State.Pid,)
a[LinFsm.State.Pid] = (LinFsm.State.Data,)
a[LinFsm.State.Data] = (LinFsm.State.Data, LinFsm.State.Checksum)
a[LinFsm.State.Checksum] = (LinFsm.State.WaitForBreak,)
a[LinFsm.State.Error] = (LinFsm.State.Sync,)
self.allowed_state = a
self.state = None
self.reset()
class Decoder(srd.Decoder):
api_version = 3
id = 'lin'
name = 'LIN'
longname = 'Local Interconnect Network'
desc = 'Local Interconnect Network (LIN) protocol.'
license = 'gplv2+'
inputs = ['uart']
outputs = []
tags = ['Automotive']
options = (
{'id': 'version', 'desc': 'Protocol version', 'default': 2, 'values': (1, 2), 'idn':'dec_lin_opt_version'},
)
annotations = (
('data', 'LIN data'),
('control', 'Protocol info'),
('error', 'Error descriptions'),
('inline_error', 'Protocol violations and errors'),
)
annotation_rows = (
('data', 'Data', (0, 1, 3)),
('error', 'Error', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.fsm = LinFsm()
self.lin_header = []
self.lin_rsp = []
self.lin_version = None
self.out_ann = None
self.ss_block = None
self.es_block = None
self.done_break = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.lin_version = self.options['version']
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def wipe_break_null_byte(self, value):
# Upon a break condition a null byte is received which must be ignored.
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if len(self.lin_rsp):
value = self.lin_rsp.pop()[2]
else:
self.lin_header.pop()
if value != 0:
self.fsm.transit(LinFsm.State.Error)
self.handle_error(None)
return False
return True
def handle_wait_for_break(self, value):
self.wipe_break_null_byte(value)
def handle_break(self, value):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if self.wipe_break_null_byte(value):
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
self.fsm.transit(LinFsm.State.Sync)
self.done_break = True
self.putx([1, ['Break condition', 'Break', 'Brk', 'B']])
def handle_sync(self, value):
self.fsm.transit(LinFsm.State.Pid)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_pid(self, value):
self.fsm.transit(LinFsm.State.Data)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_data(self, value):
self.lin_rsp.append((self.ss_block, self.es_block, value))
def handle_checksum(self):
sync = self.lin_header.pop(0) if len(self.lin_header) else None
self.put(sync[0], sync[1], self.out_ann, [0, ['Sync', 'S']])
if sync[2] != 0x55:
self.put(sync[0], sync[1], self.out_ann,
[2, ['Sync is not 0x55', 'Not 0x55', '!= 0x55']])
pid = self.lin_header.pop(0) if len(self.lin_header) else None
checksum = self.lin_rsp.pop() if len(self.lin_rsp) else None
if pid:
id_ = pid[2] & 0x3F
parity = pid[2] >> 6
expected_parity = self.calc_parity(pid[2])
parity_valid = parity == expected_parity
if not parity_valid:
self.put(pid[0], pid[1], self.out_ann, [2, ['P != %d' % expected_parity]])
ann_class = 0 if parity_valid else 3
self.put(pid[0], pid[1], self.out_ann, [ann_class, [
'ID: %02X Parity: %d (%s)' % (id_, parity, 'ok' if parity_valid else 'bad'),
'ID: 0x%02X' % id_, 'I: %d' % id_
]])
if len(self.lin_rsp):
checksum_valid = self.checksum_is_valid(pid[2], self.lin_rsp, checksum[2])
for b in self.lin_rsp:
self.put(b[0], b[1], self.out_ann, [0, ['Data: 0x%02X' % b[2], 'D: 0x%02X' % b[2]]])
ann_class = 0 if checksum_valid else 3
self.put(checksum[0], checksum[1], self.out_ann,
[ann_class, ['Checksum: 0x%02X' % checksum[2], 'Checksum', 'Chk', 'C']])
if not checksum_valid:
self.put(checksum[0], checksum[1], self.out_ann, [2, ['Checksum invalid']])
else:
pass # No response.
self.lin_header.clear()
self.lin_rsp.clear()
def handle_error(self, dummy):
self.putx([3, ['Error', 'Err', 'E']])
def checksum_is_valid(self, pid, data, checksum):
if self.lin_version == 2:
id_ = pid & 0x3F
if id_ != 60 and id_ != 61:
checksum += pid
for d in data:
checksum += d[2]
carry_bits = int(checksum / 256)
checksum += carry_bits
return checksum & 0xFF == 0xFF
@staticmethod
def calc_parity(pid):
id_ = [((pid & 0x3F) >> i) & 1 for i in range(8)]
p0 = id_[0] ^ id_[1] ^ id_[2] ^ id_[4]
p1 = not (id_[1] ^ id_[3] ^ id_[4] ^ id_[5])
return (p0 << 0) | (p1 << 1)
def METHOD_NAME(self):
if self.done_break and len(self.lin_rsp):
self.handle_checksum();
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
self.ss_block, self.es_block = ss, es
# Ignore all UART packets except the actual data packets or BREAK.
if ptype == 'BREAK':
self.handle_break(pdata)
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# Short LIN overview:
# - Message begins with a BREAK (0x00) for at least 13 bittimes.
# - Break is always followed by a SYNC byte (0x55).
# - Sync byte is followed by a PID byte (Protected Identifier).
# - PID byte is followed by 1 - 8 data bytes and a final checksum byte.
handler = getattr(self, 'handle_%s' % self.fsm.state.lower())
handler(pdata)
| null |
5,140 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init
from mantid.api import *
from mantid.kernel import *
from vesuvio.base import VesuvioBase
class VesuvioResolution(VesuvioBase):
_workspace_index = None
_mass = None
def category(self):
return "Inelastic\\Indirect\\Vesuvio"
def summary(self):
return "Calculates the resolution function for VESUVIO"
def PyInit(self):
self.declareProperty(
MatrixWorkspaceProperty(name="Workspace", defaultValue="", direction=Direction.Input), doc="Sample matrix workspace"
)
self.declareProperty(name="WorkspaceIndex", defaultValue=0, doc="Workspace index to use for resolution")
self.declareProperty(name="Mass", defaultValue=100.0, doc="The mass defining the recoil peak in AMU")
self.declareProperty(
WorkspaceProperty(name="OutputWorkspaceTOF", defaultValue="", direction=Direction.Output, optional=PropertyMode.Optional),
doc="Output resolution workspace in TOF",
)
self.declareProperty(
WorkspaceProperty(name="OutputWorkspaceYSpace", defaultValue="", direction=Direction.Output, optional=PropertyMode.Optional),
doc="Output resolution workspace in ySpace",
)
def METHOD_NAME(self):
"""
Does basic validation for inputs.
"""
issues = dict()
sample_ws = self.getProperty("Workspace").value
workspace_index = self.getProperty("WorkspaceIndex").value
if not isinstance(sample_ws, MatrixWorkspace):
issues["Workspace"] = "The Workspace must be a MatrixWorkspace"
elif workspace_index > sample_ws.getNumberHistograms() - 1:
issues["WorkspaceIndex"] = "Workspace index is out of range"
out_ws_tof = self.getPropertyValue("OutputWorkspaceTOF")
out_ws_ysp = self.getPropertyValue("OutputWorkspaceYSpace")
output_tof = out_ws_tof != ""
output_ysp = out_ws_ysp != ""
if not (output_tof or output_ysp):
warning_message = "Must output in either time of flight or ySpace"
issues["OutputWorkspaceTOF"] = warning_message
issues["OutputWorkspaceYSpace"] = warning_message
return issues
def PyExec(self):
sample_ws = self.getProperty("Workspace").value
out_ws_tof = self.getPropertyValue("OutputWorkspaceTOF")
out_ws_ysp = self.getPropertyValue("OutputWorkspaceYSpace")
self._workspace_index = self.getProperty("WorkspaceIndex").value
self._mass = self.getProperty("Mass").value
output_tof = out_ws_tof != ""
output_ysp = out_ws_ysp != ""
if output_tof:
res_tof = self._calculate_resolution(sample_ws)
self.setProperty("OutputWorkspaceTOF", res_tof)
if output_ysp:
y_space_conv = self._execute_child_alg(
"ConvertToYSpace", return_values="OutputWorkspace", InputWorkspace=sample_ws, Mass=self._mass
)
res_ysp = self._calculate_resolution(y_space_conv)
self.setProperty("OutputWorkspaceYSpace", res_ysp)
def _calculate_resolution(self, workspace):
"""
Calculates the resolution function using the VesuvioResolution fit function.
@param workspace The sample workspace
"""
function = "name=VesuvioResolution, Mass=%f" % self._mass
fit_naming_stem = "__vesuvio_res_fit"
# Execute the resolution function using fit.
# Functions can't currently be executed as stand alone objects,
# so for now we will run fit with zero iterations to achieve the same result.
fit_ws = self._execute_child_alg(
"Fit",
return_values="OutputWorkspace",
Function=function,
InputWorkspace=workspace,
MaxIterations=0,
CreateOutput=True,
Output=fit_naming_stem,
WorkspaceIndex=self._workspace_index,
OutputCompositeMembers=False,
)
# Extract just the function values from the fit spectrum
res_ws = self._execute_child_alg("ExtractSingleSpectrum", InputWorkspace=fit_ws, WorkspaceIndex=1)
return res_ws
AlgorithmFactory.subscribe(VesuvioResolution)
| null |
5,141 |
# This file is part of cloud-init. See LICENSE file for license information.
import copy
from unittest import mock
from cloudinit import distros, helpers, importer, sources
from cloudinit.sources import DataSourceCloudSigma
from cloudinit.sources.helpers.cloudsigma import Cepko
from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
"cpus_instead_of_cores": False,
"mem": 1073741824,
"meta": {
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
"cloudinit-user-data": "#cloud-config\n\n...",
},
"name": "test_server",
"requirements": [],
"smp": 1,
"tags": ["much server", "very performance"],
"uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890",
"vnc_password": "9e84d6cb49e46379",
"vendor_data": {
"location": "zrh",
"cloudinit": "#cloud-config\n\n...",
},
}
DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma"
class CepkoMock(Cepko):
def __init__(self, mocked_context):
self.result = mocked_context
def all(self):
return self
class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def setUp(self):
super(DataSourceCloudSigmaTest, self).setUp()
self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
self.add_patch(
DS_PATH + ".override_ds_detect",
"m_is_container",
return_value=True,
)
distro_cls = distros.fetch("ubuntu")
distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
sys_cfg={}, distro=distro, paths=self.paths
)
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
self.datasource.get_data()
self.assertEqual(
"test_server", self.datasource.get_hostname().hostname
)
self.datasource.metadata["name"] = ""
self.assertEqual("65b2fb23", self.datasource.get_hostname().hostname)
utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
self.datasource.metadata["name"] = utf8_hostname
self.assertEqual("65b2fb23", self.datasource.get_hostname().hostname)
def test_get_public_ssh_keys(self):
self.datasource.get_data()
self.assertEqual(
[SERVER_CONTEXT["meta"]["ssh_public_key"]],
self.datasource.get_public_ssh_keys(),
)
def test_get_instance_id(self):
self.datasource.get_data()
self.assertEqual(
SERVER_CONTEXT["uuid"], self.datasource.get_instance_id()
)
def test_platform(self):
"""All platform-related attributes are set."""
self.datasource.get_data()
self.assertEqual(self.datasource.cloud_name, "cloudsigma")
self.assertEqual(self.datasource.platform_type, "cloudsigma")
self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)")
def test_metadata(self):
self.datasource.get_data()
self.assertEqual(self.datasource.metadata, SERVER_CONTEXT)
def test_user_data(self):
self.datasource.get_data()
self.assertEqual(
self.datasource.userdata_raw,
SERVER_CONTEXT["meta"]["cloudinit-user-data"],
)
def test_encoded_user_data(self):
encoded_context = copy.deepcopy(SERVER_CONTEXT)
encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
self.datasource.cepko = CepkoMock(encoded_context)
self.datasource.get_data()
self.assertEqual(self.datasource.userdata_raw, b"hi world\n")
def test_vendor_data(self):
self.datasource.get_data()
self.assertEqual(
self.datasource.vendordata_raw,
SERVER_CONTEXT["vendor_data"]["cloudinit"],
)
def test_lack_of_vendor_data(self):
stripped_context = copy.deepcopy(SERVER_CONTEXT)
del stripped_context["vendor_data"]
self.datasource.cepko = CepkoMock(stripped_context)
self.datasource.get_data()
self.assertIsNone(self.datasource.vendordata_raw)
def METHOD_NAME(self):
stripped_context = copy.deepcopy(SERVER_CONTEXT)
del stripped_context["vendor_data"]["cloudinit"]
self.datasource.cepko = CepkoMock(stripped_context)
self.datasource.get_data()
self.assertIsNone(self.datasource.vendordata_raw)
class DsLoads(test_helpers.TestCase):
def test_get_datasource_list_returns_in_local(self):
deps = (sources.DEP_FILESYSTEM,)
ds_list = DataSourceCloudSigma.get_datasource_list(deps)
self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma])
@mock.patch.object(
importer,
"match_case_insensitive_module_name",
lambda name: f"DataSource{name}",
)
def test_list_sources_finds_ds(self):
found = sources.list_sources(
["CloudSigma"],
(sources.DEP_FILESYSTEM,),
["cloudinit.sources"],
)
self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
| null |
5,142 |
from __future__ import annotations
import asyncio
import collections
from collections import defaultdict, deque
from contextlib import suppress
from operator import attrgetter
from typing import NamedTuple
import discord
from bs4 import BeautifulSoup
from pydis_core.utils import scheduling
import bot
from bot.constants import Channels
from bot.log import get_logger
from . import _cog, doc_cache
from ._parsing import get_symbol_markdown
from ._redis_cache import StaleItemCounter
log = get_logger(__name__)
class StaleInventoryNotifier:
"""Handle sending notifications about stale inventories through `DocItem`s to dev log."""
symbol_counter = StaleItemCounter()
def __init__(self):
self._init_task = scheduling.create_task(
self._init_channel(),
name="StaleInventoryNotifier channel init"
)
self._warned_urls = set()
async def _init_channel(self) -> None:
"""Wait for guild and get channel."""
await bot.instance.wait_until_guild_available()
self._dev_log = bot.instance.get_channel(Channels.dev_log)
async def METHOD_NAME(self, doc_item: _cog.DocItem) -> None:
"""Send a warning to dev log if one wasn't already sent for `item`'s url."""
if doc_item.url not in self._warned_urls:
# Only warn if the item got less than 3 warnings
# or if it has been more than 3 weeks since the last warning
if await self.symbol_counter.increment_for(doc_item) < 3:
self._warned_urls.add(doc_item.url)
await self._init_task
embed = discord.Embed(
description=f"Doc item `{doc_item.symbol_id=}` present in loaded documentation inventories "
f"not found on [site]({doc_item.url}), inventories may need to be refreshed."
)
await self._dev_log.send(embed=embed)
class QueueItem(NamedTuple):
"""Contains a `DocItem` and the `BeautifulSoup` object needed to parse it."""
doc_item: _cog.DocItem
soup: BeautifulSoup
def __eq__(self, other: QueueItem | _cog.DocItem):
if isinstance(other, _cog.DocItem):
return self.doc_item == other
return NamedTuple.__eq__(self, other)
class ParseResultFuture(asyncio.Future):
"""
Future with metadata for the parser class.
`user_requested` is set by the parser when a Future is requested by an user and moved to the front,
allowing the futures to only be waited for when clearing if they were user requested.
"""
def __init__(self):
super().__init__()
self.user_requested = False
class BatchParser:
"""
Get the Markdown of all symbols on a page and send them to redis when a symbol is requested.
DocItems are added through the `add_item` method which adds them to the `_page_doc_items` dict.
`get_markdown` is used to fetch the Markdown; when this is used for the first time on a page,
all of the symbols are queued to be parsed to avoid multiple web requests to the same page.
"""
def __init__(self):
self._queue: deque[QueueItem] = collections.deque()
self._page_doc_items: dict[str, list[_cog.DocItem]] = defaultdict(list)
self._item_futures: dict[_cog.DocItem, ParseResultFuture] = defaultdict(ParseResultFuture)
self._parse_task = None
self.stale_inventory_notifier = StaleInventoryNotifier()
async def get_markdown(self, doc_item: _cog.DocItem) -> str | None:
"""
Get the result Markdown of `doc_item`.
If no symbols were fetched from `doc_item`s page before,
the HTML has to be fetched and then all items from the page are put into the parse queue.
Not safe to run while `self.clear` is running.
"""
if doc_item not in self._item_futures and doc_item not in self._queue:
self._item_futures[doc_item].user_requested = True
async with bot.instance.http_session.get(doc_item.url, raise_for_status=True) as response:
soup = await bot.instance.loop.run_in_executor(
None,
BeautifulSoup,
await response.text(encoding="utf8"),
"lxml",
)
self._queue.extendleft(QueueItem(item, soup) for item in self._page_doc_items[doc_item.url])
log.debug(f"Added items from {doc_item.url} to the parse queue.")
if self._parse_task is None:
self._parse_task = scheduling.create_task(self._parse_queue(), name="Queue parse")
else:
self._item_futures[doc_item].user_requested = True
with suppress(ValueError):
# If the item is not in the queue then the item is already parsed or is being parsed
self._move_to_front(doc_item)
return await self._item_futures[doc_item]
async def _parse_queue(self) -> None:
"""
Parse all items from the queue, setting their result Markdown on the futures and sending them to redis.
The coroutine will run as long as the queue is not empty, resetting `self._parse_task` to None when finished.
"""
log.trace("Starting queue parsing.")
try:
while self._queue:
item, soup = self._queue.pop()
markdown = None
if (future := self._item_futures[item]).done():
# Some items are present in the inventories multiple times under different symbol names,
# if we already parsed an equal item, we can just skip it.
continue
try:
markdown = await bot.instance.loop.run_in_executor(None, get_symbol_markdown, soup, item)
if markdown is not None:
await doc_cache.set(item, markdown)
else:
# Don't wait for this coro as the parsing doesn't depend on anything it does.
scheduling.create_task(
self.stale_inventory_notifier.METHOD_NAME(item), name="Stale inventory warning"
)
except Exception:
log.exception(f"Unexpected error when handling {item}")
future.set_result(markdown)
del self._item_futures[item]
await asyncio.sleep(0.1)
finally:
self._parse_task = None
log.trace("Finished parsing queue.")
def _move_to_front(self, item: QueueItem | _cog.DocItem) -> None:
"""Move `item` to the front of the parse queue."""
# The parse queue stores soups along with the doc symbols in QueueItem objects,
# in case we're moving a DocItem we have to get the associated QueueItem first and then move it.
item_index = self._queue.index(item)
queue_item = self._queue[item_index]
del self._queue[item_index]
self._queue.append(queue_item)
log.trace(f"Moved {item} to the front of the queue.")
def add_item(self, doc_item: _cog.DocItem) -> None:
"""Map a DocItem to its page so that the symbol will be parsed once the page is requested."""
self._page_doc_items[doc_item.url].append(doc_item)
async def clear(self) -> None:
"""
Clear all internal symbol data.
Wait for all user-requested symbols to be parsed before clearing the parser.
"""
for future in filter(attrgetter("user_requested"), self._item_futures.values()):
await future
if self._parse_task is not None:
self._parse_task.cancel()
self._queue.clear()
self._page_doc_items.clear()
self._item_futures.clear()
| null |
5,143 |
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import coremltools as ct
def _get_visible_items(d):
return [x for x in dir(d) if not x.startswith("_")]
def _check_visible_modules(actual, expected):
assert set(actual) == set(expected), "API mis-matched. Got %s, expected %s" % (
actual,
expected,
)
EXPECTED_MODULES = [
"ClassifierConfig",
"ComputeUnit",
"EnumeratedShapes",
"ImageType",
"RangeDim",
"SPECIFICATION_VERSION",
"Shape",
"TensorType",
"colorlayout",
"compression_utils",
"convert",
"converters",
"libcoremlpython",
"models",
"PassPipeline",
"proto",
"precision",
"target",
"utils",
"version",
"test",
"transform",
"libmodelpackage",
"libmilstoragepython",
"optimize",
]
class TestApiVisibilities:
"""Test public coremltools API visibilities."""
def METHOD_NAME(self):
if not ct.utils._is_macos():
EXPECTED_MODULES.remove("libcoremlpython")
_check_visible_modules(_get_visible_items(ct), EXPECTED_MODULES)
def test_utils(self):
expected = [
"compile_model",
"convert_double_to_float_multiarray_type",
"evaluate_classifier",
"evaluate_classifier_with_probabilities",
"evaluate_regressor",
"evaluate_transformer",
"make_pipeline",
"load_spec",
"rename_feature",
"save_spec",
]
_check_visible_modules(_get_visible_items(ct.utils), expected)
def test_models(self):
expected = [
"CompiledMLModel",
"MLModel",
"datatypes",
"feature_vectorizer",
"ml_program",
"model",
"nearest_neighbors",
"neural_network",
"pipeline",
"tree_ensemble",
"utils",
]
_check_visible_modules(_get_visible_items(ct.models), expected)
def test_models_mlmodel(self):
expected = [
"author",
"get_compiled_model_path",
"get_spec",
"input_description",
"license",
"output_description",
"predict",
"save",
"short_description",
"user_defined_metadata",
"version",
"weights_dir",
]
_check_visible_modules(_get_visible_items(ct.models.MLModel), expected)
def test_models_neural_network(self):
expected = [
"AdamParams",
"NeuralNetworkBuilder",
"SgdParams",
"builder",
"flexible_shape_utils",
"optimization_utils",
"printer",
"quantization_utils",
"spec_inspection_utils",
"update_optimizer_utils",
"utils",
]
_check_visible_modules(_get_visible_items(ct.models.neural_network), expected)
def test_models_neural_network_utils(self):
expected = ["NeuralNetworkBuilder", "make_image_input", "make_nn_classifier"]
_check_visible_modules(
_get_visible_items(ct.models.neural_network.utils), expected
)
def test_models_tree_ensemble(self):
expected = [
"TreeEnsembleBase",
"TreeEnsembleClassifier",
"TreeEnsembleRegressor",
"set_classifier_interface_params",
"set_regressor_interface_params",
]
_check_visible_modules(_get_visible_items(ct.models.tree_ensemble), expected)
def test_models_pipeline(self):
expected = [
"Pipeline",
"PipelineClassifier",
"PipelineRegressor",
"set_classifier_interface_params",
"set_regressor_interface_params",
"set_training_features",
"set_transform_interface_params",
]
_check_visible_modules(_get_visible_items(ct.models.pipeline), expected)
def test_converters(self):
expected = [
"ClassifierConfig",
"ColorLayout",
"EnumeratedShapes",
"ImageType",
"RangeDim",
"Shape",
"TensorType",
"convert",
"libsvm",
"mil",
"sklearn",
"xgboost",
]
_check_visible_modules(_get_visible_items(ct.converters), expected)
def test_optimize(self):
expected = [
"coreml",
"torch",
]
_check_visible_modules(_get_visible_items(ct.optimize), expected)
def test_optimize_coreml(self):
expected = [
"OpLinearQuantizerConfig",
"OpMagnitudePrunerConfig",
"OpPalettizerConfig",
"OptimizationConfig",
"OpThresholdPrunerConfig",
"linear_quantize_weights",
"palettize_weights",
"prune_weights",
"decompress_weights",
"get_weights_metadata",
"CoreMLWeightMetaData",
"CoreMLOpMetaData",
]
_check_visible_modules(_get_visible_items(ct.optimize.coreml), expected)
def test_converters_libsvm(self):
_check_visible_modules(_get_visible_items(ct.converters.libsvm), ["convert"])
def test_converters_sklearn(self):
_check_visible_modules(_get_visible_items(ct.converters.sklearn), ["convert"])
def test_converters_xgboost(self):
_check_visible_modules(_get_visible_items(ct.converters.xgboost), ["convert"])
def test_models_neural_network_quantization_utils(self):
expected = [
"AdvancedQuantizedLayerSelector",
"MatrixMultiplyLayerSelector",
"ModelMetrics",
"NoiseMetrics",
"OutputMetric",
"QuantizedLayerSelector",
"TopKMetrics",
"activate_int8_int8_matrix_multiplications",
"compare_models",
"quantize_weights",
]
_check_visible_modules(
_get_visible_items(ct.models.neural_network.quantization_utils), expected
)
def test_compression_utils(self):
expected = [
"affine_quantize_weights",
"palettize_weights",
"sparsify_weights",
"decompress_weights",
]
_check_visible_modules(
_get_visible_items(ct.compression_utils), expected
)
def test_models_neural_network_flexible_shape_utils(self):
expected = [
"NeuralNetworkImageSize",
"NeuralNetworkImageSizeRange",
"NeuralNetworkMultiArrayShape",
"NeuralNetworkMultiArrayShapeRange",
"Shape",
"ShapeRange",
"Size",
"add_enumerated_image_sizes",
"add_enumerated_multiarray_shapes",
"add_multiarray_ndshape_enumeration",
"set_multiarray_ndshape_range",
"update_image_size_range",
"update_multiarray_shape_range",
]
_check_visible_modules(
_get_visible_items(ct.models.neural_network.flexible_shape_utils), expected
)
def test_models_neural_network_update_optimizer_utils(self):
expected = ["AdamParams", "Batch", "RangeParam", "SgdParams"]
_check_visible_modules(
_get_visible_items(ct.models.neural_network.update_optimizer_utils),
expected,
)
def test_models_neural_network_optimization_utils(self):
_check_visible_modules(
_get_visible_items(ct.models.neural_network.optimization_utils), [],
)
| null |
5,144 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas \
.fts_templates.tests import utils as fts_template_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils import server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression import trigger_funcs_utils as fts_template_funcs_utils
from regression.python_test_utils import test_utils as utils
from . import utils as fts_templates_utils
class FTSTemplatesDependencyDependentTestCase(BaseTestGenerator):
""" This class will get the nodes/node FTS templates
under test schema. """
scenarios = utils.generate_scenarios(
'get_fts_template_nodes_and_node',
fts_templates_utils.test_cases
)
def setUp(self):
super().setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "fts_template_func_%s" % str(uuid.uuid4())[1:8]
self.fts_templates_name = "fts_template_delete_%s" % (
str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90500:
message = "Event triggers are not supported by PG9.4 " \
"and PPAS9.4 and below."
self.skipTest(message)
self.function_info = fts_template_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.fts_templates_id = fts_templates_utils. \
create_fts_template(
self.server, self.db_name, self.schema_name,
self.fts_templates_name)
def runTest(self):
""" This function will add new FTS templates under test schema. """
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
fts_dict_response = fts_templates_utils.verify_fts_template(
self.server, self.db_name, self.fts_templates_name
)
if not fts_dict_response:
raise Exception("Could not find the FTS Templates.")
if self.is_positive_test:
if hasattr(self, "node"):
response = self.get_fts_templates_node()
else:
response = self.METHOD_NAME()
else:
if hasattr(self, "error_fetching_fts_template"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
if hasattr(self, "node"):
response = self.get_fts_templates_node()
else:
response = self.METHOD_NAME()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def METHOD_NAME(self):
"""
This functions returns the fts templates nodes
:return: fts templates nodes
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' + str(self.schema_id) + '/',
content_type='html/json')
def get_fts_templates_node(self):
"""
This functions returns the fts templates node
:return: fts templates node
"""
if hasattr(self, "set_wrong_fts_templates_value"):
self.fts_templates_id = 0
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' + str(self.schema_id) + '/' +
str(self.fts_templates_id), content_type='html/json')
def tearDown(self):
"""This function delete the fts_templates and disconnect the test
database."""
fts_template_utils.delete_fts_template(self.server, self.db_name,
self.schema_name,
self.fts_templates_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id)
| null |
5,145 |
import unittest
import cantera as ct
from . import utilities
class TestMixture(utilities.CanteraTest):
@classmethod
def setUpClass(self):
utilities.CanteraTest.setUpClass()
self.phase1 = ct.Solution('h2o2.yaml', transport_model=None)
self.phase2 = ct.Solution('air.yaml')
def setUp(self):
self.mix = ct.Mixture([(self.phase1, 1.0), (self.phase2, 2.0)])
def test_sizes(self):
self.assertEqual(self.mix.n_phases, 2)
self.assertEqual(self.mix.n_species,
self.phase1.n_species + self.phase2.n_species)
E = set(self.phase1.element_names) | set(self.phase2.element_names)
self.assertEqual(len(E), self.mix.n_elements)
def test_element_index(self):
m_H = self.mix.element_index('H')
self.assertEqual(m_H, self.mix.element_index(m_H))
with self.assertRaisesRegex(ValueError, 'No such element'):
self.mix.element_index('W')
with self.assertRaisesRegex(ValueError, 'No such element'):
self.mix.element_index(41)
with self.assertRaisesRegex(TypeError, 'must be a string or a number'):
self.mix.element_index(None)
def test_speciesIndex(self):
names = self.mix.species_names
kOH = names.index('OH')
kN2 = names.index('N2O')
self.assertEqual(self.mix.species_name(kOH), 'OH')
self.assertEqual(self.mix.species_name(kN2), 'N2O')
self.assertEqual(self.mix.species_index(0, 'OH'), kOH)
self.assertEqual(self.mix.species_index(self.phase1, 'OH'), kOH)
self.assertEqual(self.mix.species_index(self.phase1.name, 'OH'), kOH)
self.assertEqual(self.mix.species_index(0, self.phase1.species_index('OH')), kOH)
self.assertEqual(self.mix.species_index(1, self.phase2.species_index('N2O')), kN2)
self.assertEqual(self.mix.species_index(1, 'N2O'), kN2)
with self.assertRaisesRegex(IndexError, 'out of range'):
self.mix.species_index(3, 'OH')
with self.assertRaisesRegex(ValueError, 'No such species'):
self.mix.species_index(1, 'OH')
with self.assertRaisesRegex(ValueError, 'out of range'):
self.mix.species_index(0, -2)
with self.assertRaisesRegex(ValueError, 'No such species'):
self.mix.species_index(1, 'CO2')
def test_n_atoms(self):
names = self.mix.species_names
kOH = names.index('OH')
kN2 = names.index('N2')
mH = self.mix.element_index('H')
mN = self.mix.element_index('N')
self.assertEqual(self.mix.n_atoms(kOH, 'H'), 1)
self.assertEqual(self.mix.n_atoms(kOH, 'O'), 1)
self.assertEqual(self.mix.n_atoms(kOH, mH), 1)
self.assertEqual(self.mix.n_atoms(kOH, mN), 0)
self.assertEqual(self.mix.n_atoms(kN2, mN), 2)
self.assertEqual(self.mix.n_atoms(kN2, mH), 0)
def METHOD_NAME(self):
self.assertEqual(self.phase1, self.mix.phase(0))
self.assertEqual(self.phase2, self.mix.phase(1))
phaseNames = self.mix.phase_names
self.assertEqual(len(phaseNames), self.mix.n_phases)
self.assertEqual(phaseNames[0], self.phase1.name)
self.assertEqual(phaseNames[1], self.phase2.name)
def test_phase_index(self):
self.assertEqual(self.mix.phase_index(self.phase1), 0)
self.assertEqual(self.mix.phase_index(self.phase2), 1)
self.assertEqual(self.mix.phase_index(self.phase2.name), 1)
self.assertEqual(self.mix.phase_index(1), 1)
with self.assertRaises(KeyError):
self.mix.phase_index('foobar')
with self.assertRaises(IndexError):
self.mix.phase_index(2)
def test_properties(self):
self.mix.T = 350
self.assertEqual(self.mix.T, 350)
self.mix.P = 2e5
self.assertEqual(self.mix.P, 2e5)
self.assertEqual(self.mix.T, 350)
self.assertGreater(self.mix.max_temp, self.mix.min_temp)
def test_charge(self):
C = sum(self.mix.phase_charge(i) for i in range(self.mix.n_phases))
self.assertEqual(self.mix.charge, C)
def test_phase_moles(self):
M = self.mix.phase_moles()
self.assertEqual(M[0], self.mix.phase_moles(0))
self.assertEqual(M[1], self.mix.phase_moles('air'))
self.mix.set_phase_moles('air', 4)
self.assertEqual(self.mix.phase_moles(1), 4)
def test_species_moles(self):
self.mix.species_moles = 'H2:1.0, NO2:4.0'
P = self.mix.phase_moles()
S = self.mix.species_moles
self.assertEqual(P[0], 1)
self.assertEqual(P[1], 4)
self.assertEqual(S[self.mix.species_index(0, 'H2')], 1)
self.assertEqual(S[self.mix.species_index(1, 'NO2')], 4)
S[2] = 7
self.mix.species_moles = S
self.assertNear(self.mix.species_moles[2], S[2])
self.assertNear(self.mix.phase_moles(0), sum(S[:self.phase1.n_species]))
with self.assertRaises(ValueError):
self.mix.species_moles = (1,2,3)
with self.assertRaises(TypeError):
self.mix.species_moles = 9
def test_element_moles(self):
self.mix.species_moles = 'H2:1.0, OH:4.0'
self.assertNear(self.mix.element_moles('H'), 6)
self.assertNear(self.mix.element_moles('O'), 4)
self.assertNear(self.mix.element_moles('N'), 0)
def test_chemical_potentials(self):
C = self.mix.chemical_potentials
C1 = self.phase1.chemical_potentials
C2 = self.phase2.chemical_potentials
self.assertArrayNear(C[:self.phase1.n_species], C1)
self.assertArrayNear(C[self.phase1.n_species:], C2)
def test_equilibrate1(self):
self.mix.species_moles = 'H2:1.0, O2:0.5, N2:1.0'
self.mix.T = 400
self.mix.P = 2 * ct.one_atm
E1 = [self.mix.element_moles(m) for m in range(self.mix.n_elements)]
self.mix.equilibrate('TP', solver='vcs', estimate_equil=-1)
E2 = [self.mix.element_moles(m) for m in range(self.mix.n_elements)]
self.assertArrayNear(E1, E2)
self.assertNear(self.mix.T, 400)
self.assertNear(self.mix.P, 2 * ct.one_atm)
@unittest.expectedFailure # See https://github.com/Cantera/cantera/issues/1023
def test_equilibrate2(self):
self.mix.species_moles = 'H2:1.0, O2:0.5, N2:1.0'
self.mix.T = 400
self.mix.P = 2 * ct.one_atm
E1 = [self.mix.element_moles(m) for m in range(self.mix.n_elements)]
self.mix.equilibrate('TP', solver='gibbs')
E2 = [self.mix.element_moles(m) for m in range(self.mix.n_elements)]
self.assertArrayNear(E1, E2)
self.assertNear(self.mix.T, 400)
self.assertNear(self.mix.P, 2 * ct.one_atm)
def test_invalid_property(self):
x = self.mix
with self.assertRaises(AttributeError):
x.foobar = 300
with self.assertRaises(AttributeError):
x.foobar
def test_invalid_phase_type(self):
water = ct.Water()
with self.assertRaisesRegex(ct.CanteraError, 'not compatible'):
self.mix = ct.Mixture([(self.phase1, 1.0), (water, 2.0)])
| null |
5,146 |
from unittest import mock
import pytest
from zigpy.config import CONF_OTA_SONOFF
import zigpy.ota
import zigpy.ota.image
import zigpy.ota.provider as ota_p
from tests.async_mock import AsyncMock, patch
MANUFACTURER_ID = 4742
IMAGE_TYPE = 1
@pytest.fixture
def sonoff_prov():
p = ota_p.Sonoff()
p.enable()
return p
@pytest.fixture
def sonoff_image_with_version():
def METHOD_NAME(version=4353, image_type=IMAGE_TYPE):
METHOD_NAME = zigpy.ota.provider.SONOFFImage(
manufacturer_id=MANUFACTURER_ID,
image_type=image_type,
version=version,
image_size=131086,
url=mock.sentinel.url,
)
return METHOD_NAME
return METHOD_NAME
@pytest.fixture
def sonoff_image(sonoff_image_with_version):
return sonoff_image_with_version()
@pytest.fixture
def sonoff_key():
return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE)
async def test_sonoff_init(sonoff_prov):
sonoff_prov.enable = mock.MagicMock()
sonoff_prov.refresh_firmware_list = AsyncMock()
r = await sonoff_prov.initialize_provider({CONF_OTA_SONOFF: True})
assert r is None
assert sonoff_prov.enable.call_count == 1
assert sonoff_prov.refresh_firmware_list.call_count == 1
async def test_sonoff_get_image_no_cache(sonoff_prov, sonoff_image):
sonoff_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)
sonoff_prov._cache = mock.MagicMock()
sonoff_prov._cache.__getitem__.side_effect = KeyError()
sonoff_prov.refresh_firmware_list = AsyncMock()
# SONOFF manufacturer_id, but not in cache
assert sonoff_image.key not in sonoff_prov._cache
r = await sonoff_prov.get_image(sonoff_image.key)
assert r is None
assert sonoff_prov.refresh_firmware_list.call_count == 1
assert sonoff_prov._cache.__getitem__.call_count == 1
assert sonoff_image.fetch_image.call_count == 0
async def test_sonoff_get_image(sonoff_prov, sonoff_key, sonoff_image):
sonoff_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)
sonoff_prov._cache = mock.MagicMock()
sonoff_prov._cache.__getitem__.return_value = sonoff_image
sonoff_prov.refresh_firmware_list = AsyncMock()
r = await sonoff_prov.get_image(sonoff_key)
assert r is mock.sentinel.image
assert sonoff_prov._cache.__getitem__.call_count == 1
assert sonoff_prov._cache.__getitem__.call_args[0][0] == sonoff_image.key
assert sonoff_image.fetch_image.call_count == 1
@patch("aiohttp.ClientSession.get")
async def test_sonoff_refresh_list(mock_get, sonoff_prov, sonoff_image_with_version):
METHOD_NAME = sonoff_image_with_version(version=4353, image_type=1)
mock_get.return_value.__aenter__.return_value.json = AsyncMock(
return_value=[
{
"fw_binary_url": "https://zigbee-ota.sonoff.tech/releases/86-0001-00001101.zigbee",
"fw_file_version": 4353,
"fw_filesize": 131086,
"fw_image_type": 1,
"fw_manufacturer_id": 4742,
"model_id": "ZBMINI-L",
}
]
)
mock_get.return_value.__aenter__.return_value.status = 200
mock_get.return_value.__aenter__.return_value.reason = "OK"
await sonoff_prov.refresh_firmware_list()
assert mock_get.call_count == 1
assert len(sonoff_prov._cache) == 1
assert METHOD_NAME.key in sonoff_prov._cache
cached = sonoff_prov._cache[METHOD_NAME.key]
assert cached.image_type == METHOD_NAME.image_type
assert (
cached.url == "https://zigbee-ota.sonoff.tech/releases/86-0001-00001101.zigbee"
)
assert not sonoff_prov.expired
@patch("aiohttp.ClientSession.get")
async def test_sonoff_refresh_list_locked(
mock_get, sonoff_prov, sonoff_image_with_version
):
await sonoff_prov._locks[ota_p.LOCK_REFRESH].acquire()
mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])
await sonoff_prov.refresh_firmware_list()
assert mock_get.call_count == 0
@patch("aiohttp.ClientSession.get")
async def test_sonoff_refresh_list_failed(mock_get, sonoff_prov):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])
mock_get.return_value.__aenter__.return_value.status = 434
mock_get.return_value.__aenter__.return_value.reason = "UNK"
with patch.object(sonoff_prov, "update_expiration") as update_exp:
await sonoff_prov.refresh_firmware_list()
assert mock_get.call_count == 1
assert update_exp.call_count == 0
@patch("aiohttp.ClientSession.get")
async def test_sonoff_fetch_image(mock_get, sonoff_image_with_version):
image = zigpy.ota.image.OTAImage(
header=zigpy.ota.image.OTAImageHeader(
upgrade_file_id=200208670,
header_version=256,
header_length=56,
field_control=zigpy.ota.image.FieldControl(0),
manufacturer_id=4742,
image_type=1,
file_version=4353,
stack_version=2,
header_string="",
image_size=66,
),
subelements=[
zigpy.ota.image.SubElement(
tag_id=zigpy.ota.image.ElementTagId.UPGRADE_IMAGE, data=b"abcd"
)
],
)
METHOD_NAME = sonoff_image_with_version(version=4353, image_type=1)
METHOD_NAME.url = mock.sentinel.url
mock_get.return_value.__aenter__.return_value.read = AsyncMock(
return_value=image.serialize()
)
r = await METHOD_NAME.fetch_image()
assert isinstance(r, zigpy.ota.image.OTAImage)
assert mock_get.call_count == 1
assert mock_get.call_args[0][0] == mock.sentinel.url
assert r == image
| null |
5,147 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
test msssim
"""
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
class MSSSIMNet(nn.Cell):
def __init__(self, max_val=1.0, power_factors=_MSSSIM_WEIGHTS, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
super(MSSSIMNet, self).__init__()
self.net = nn.MSSSIM(max_val, power_factors, filter_size, filter_sigma, k1, k2)
def construct(self, img1, img2):
return self.net(img1, img2)
def test_compile():
factors = (0.033, 0.033, 0.033)
net = MSSSIMNet(power_factors=factors)
img1 = Tensor(np.random.random((8, 3, 128, 128)))
img2 = Tensor(np.random.random((8, 3, 128, 128)))
_cell_graph_executor.compile(net, img1, img2)
def test_compile_grayscale():
max_val = 255
factors = (0.033, 0.033, 0.033)
net = MSSSIMNet(max_val=max_val, power_factors=factors)
img1 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
img2 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_max_val_negative():
max_val = -1
with pytest.raises(ValueError):
_ = MSSSIMNet(max_val)
def test_msssim_max_val_bool():
max_val = True
with pytest.raises(TypeError):
_ = MSSSIMNet(max_val)
def test_msssim_max_val_zero():
max_val = 0
with pytest.raises(ValueError):
_ = MSSSIMNet(max_val)
def test_msssim_power_factors_set():
with pytest.raises(TypeError):
_ = MSSSIMNet(power_factors={0.033, 0.033, 0.033})
def METHOD_NAME():
with pytest.raises(TypeError):
_ = MSSSIMNet(filter_size=1.1)
def test_msssim_filter_size_zero():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_size=0)
def test_msssim_filter_sigma_zero():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_sigma=0.0)
def test_msssim_filter_sigma_negative():
with pytest.raises(ValueError):
_ = MSSSIMNet(filter_sigma=-0.1)
def test_msssim_different_shape():
shape_1 = (8, 3, 128, 128)
shape_2 = (8, 3, 256, 256)
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random(shape_1))
img2 = Tensor(np.random.random(shape_2))
net = MSSSIMNet(power_factors=factors)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_different_dtype():
dtype_1 = mstype.float32
dtype_2 = mstype.float16
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_1)
img2 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_2)
net = MSSSIMNet(power_factors=factors)
with pytest.raises(TypeError):
_cell_graph_executor.compile(net, img1, img2)
def test_msssim_invalid_5d_input():
shape_1 = (8, 3, 128, 128)
shape_2 = (8, 3, 256, 256)
invalid_shape = (8, 3, 128, 128, 1)
factors = (0.033, 0.033, 0.033)
img1 = Tensor(np.random.random(shape_1))
invalid_img1 = Tensor(np.random.random(invalid_shape))
img2 = Tensor(np.random.random(shape_2))
invalid_img2 = Tensor(np.random.random(invalid_shape))
net = MSSSIMNet(power_factors=factors)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, invalid_img1, img2)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, img1, invalid_img2)
with pytest.raises(ValueError):
_cell_graph_executor.compile(net, invalid_img1, invalid_img2)
| null |
5,148 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
import sys
import time
import tempfile
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import StaleElementReferenceException, \
TimeoutException
from regression.feature_utils.base_feature_test import BaseFeatureTest
from regression.feature_utils.locators import QueryToolLocators
class CheckFileManagerFeatureTest(BaseFeatureTest):
"""Tests to check file manager for XSS."""
scenarios = [
("File manager feature test",
dict())
]
def before(self):
if os.name == 'nt':
self.skipTest("This test is skipped for Windows. As Windows "
"does not allow the '<' and '>' character while "
"specifying the file name.")
self.page.add_server(self.server)
self.wait = WebDriverWait(self.page.driver, 10)
filename = self.server_information['type'] + \
str(self.server_information['server_version'])
self.XSS_FILE = '<img src=x ' + filename + '=alert("1")>.sql'
self.tmpDir = os.path.join(tempfile.gettempdir(), 'pga4_test')
# Create temp directory
if not os.path.exists(self.tmpDir):
os.makedirs(self.tmpDir)
if self.parallel_ui_tests:
xss_file_path = self.XSS_FILE
else:
xss_file_path = os.path.join(self.tmpDir, self.XSS_FILE)
# Remove any previous file
if os.path.isfile(xss_file_path):
os.remove(xss_file_path)
def after(self):
self.page.close_query_tool(False)
self.page.remove_server(self.server)
def METHOD_NAME(self):
print("Tests to check if File manager is vulnerable to XSS... ",
file=sys.stderr, end="")
self._navigate_to_query_tool()
self.page.fill_codemirror_area_with("SELECT 1;")
self._create_new_file()
self._open_file_manager_and_check_xss_file()
print("OK.", file=sys.stderr)
print("File manager sorting of data", file=sys.stderr)
self._check_file_sorting()
print("OK.", file=sys.stderr)
def _navigate_to_query_tool(self):
self.page.expand_database_node("Server", self.server['name'],
self.server['db_password'],
self.test_db)
self.page.open_query_tool()
def _create_new_file(self):
self.page.find_by_css_selector(QueryToolLocators.btn_save_file) \
.click()
# Set the XSS value in input
WebDriverWait(self.driver, 15).until(EC.presence_of_element_located(
(By.XPATH, QueryToolLocators.change_file_types_dd_xpath)))
# Save the file
if not self.parallel_ui_tests:
self.page.fill_input_by_css_selector(
QueryToolLocators.folder_path_css, '',
key_after_input=Keys.ENTER)
self.page.fill_input_by_css_selector(
QueryToolLocators.folder_path_css,
self.tmpDir, input_keys=True, key_after_input=Keys.ENTER)
self.page.find_by_css_selector(
QueryToolLocators.folder_path_css).send_keys(Keys.ENTER)
input_file_path_ele = \
self.page.find_by_xpath(QueryToolLocators.save_file_path_xpath)
input_file_path_ele.send_keys(self.XSS_FILE)
self.page.click_modal('Save')
self.page.wait_for_query_tool_loading_indicator_to_disappear()
def _open_file_manager_and_check_xss_file(self):
load_file = self.page.find_by_css_selector(
QueryToolLocators.btn_load_file_css)
load_file.click()
WebDriverWait(self.driver, 15).until(EC.presence_of_element_located(
(By.XPATH, QueryToolLocators.change_file_types_dd_xpath)))
# Open the file
if not self.parallel_ui_tests:
self.page.fill_input_by_css_selector(
QueryToolLocators.folder_path_css, '',
key_after_input=Keys.ENTER)
self.page.fill_input_by_css_selector(
QueryToolLocators.folder_path_css,
self.tmpDir, key_after_input=Keys.ENTER)
self.page.find_by_css_selector(
QueryToolLocators.folder_path_css).send_keys(Keys.ENTER)
time.sleep(2)
self.page.fill_input_by_css_selector(
QueryToolLocators.search_file_edit_box_css, self.XSS_FILE,
input_keys=True)
self.wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, QueryToolLocators.select_file_content_css)))
table = self.page.driver.find_element(
By.CSS_SELECTOR, QueryToolLocators.select_file_content_css)
retry_count = 0
while retry_count < 5:
try:
contents = table.get_attribute('innerHTML')
break
except (StaleElementReferenceException, TimeoutException):
retry_count += 1
self.page.click_modal('Cancel')
self.page.wait_for_query_tool_loading_indicator_to_disappear()
filename = self.server_information['type'] + \
str(self.server_information['server_version'])
self._check_escaped_characters(
contents,
'<img src=x ' + filename +
'=alert("1")>.sql', 'File manager'
)
def _check_escaped_characters(self, source_code, string_to_find, source):
# For XSS we need to search against element's html code
assert source_code.find(
string_to_find
) != -1, "{0} might be vulnerable to XSS, source code is: {1}".format(
source, source_code)
def _check_file_sorting(self):
load_file = self.page.find_by_css_selector(
QueryToolLocators.btn_load_file_css)
load_file.click()
WebDriverWait(self.driver, 15).until(EC.presence_of_element_located(
(By.XPATH, QueryToolLocators.change_file_types_dd_xpath)))
# Intermittently facing issue on first click it is not successful
# so tried couple of times.
success = self.page.retry_click(
(By.CSS_SELECTOR,
"div [role='grid'] div[role='columnheader'][aria-colindex='1']"),
(By.CSS_SELECTOR,
"div [role='grid'] div[role='columnheader']"
"[aria-colindex='1'][aria-sort='ascending']"))
if not success:
raise RuntimeError("Unable to sort in ascending order while "
"clicked on 'Name' column")
# Added time.sleep so that the element to be clicked.
time.sleep(0.05)
# Click and Check for sort Descending
# Intermittently facing issue on first click it is not successful
# so tried couple of times.
success = self.page.retry_click(
(By.CSS_SELECTOR,
"div [role='grid'] div[role='columnheader'][aria-colindex='1']"),
(By.CSS_SELECTOR,
"div [role='grid'] div[role='columnheader']"
"[aria-colindex='1'][aria-sort='descending']"))
if not success:
raise RuntimeError("Unable to sort in descending order while "
"clicked on 'Name' column")
self.page.click_modal('Cancel')
| null |
5,149 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.functional import vmap
from mindspore.common import dtype as ms_type
class LrnNet(nn.Cell):
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
super(LrnNet, self).__init__()
self.depth_radius = depth_radius
self.bias = bias
self.alpha = alpha
self.beta = beta
self.norm_region = norm_region
self.lrn = P.LRN(depth_radius, bias, alpha, beta, norm_region)
def construct(self, input_x):
output = self.lrn(input_x)
return output
class LrnVMapNet(nn.Cell):
def __init__(self, forward_net, in_axes, out_axes):
super(LrnVMapNet, self).__init__()
self.net = forward_net
self.in_axes = in_axes
self.out_axes = out_axes
def construct(self, input_x):
return vmap(self.net, self.in_axes, self.out_axes)(input_x)
def METHOD_NAME(data_type):
"""
Feature: generate a lrn numpy benchmark.
Description: The input shape need to match to output shape.
Expectation: match to np mindspore LRN.
"""
y_exp = np.array([[[[1.6239204, -0.61149347],
[-0.5279556, -1.0724881]],
[[0.86518127, -2.3005495],
[1.7440975, -0.760866]],
[[0.31895563, -0.2492632],
[1.4615093, -2.059218]]]]).astype(data_type)
return y_exp
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_lrn(data_type):
"""
Feature: Test LRN.
Description: The input shape need to match to output shape.
Expectation: match to np benchmark.
"""
context.set_context(mode=context.GRAPH_MODE)
input_data = np.array([[[[1.6243454, -0.6117564],
[-0.5281718, -1.0729686]],
[[0.86540765, -2.3015387],
[1.7448118, -0.7612069]],
[[0.3190391, -0.24937038],
[1.4621079, -2.0601406]]]]).astype(data_type)
loss = 1e-6
if data_type == np.float16:
loss = 1e-3
benchmark_output = METHOD_NAME(data_type)
lrn = LrnNet(depth_radius=2, bias=1.0, alpha=0.0001, beta=0.75)
output = lrn(Tensor(input_data))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=loss, atol=loss)
context.set_context(mode=context.PYNATIVE_MODE)
output = lrn(Tensor(input_data))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=loss, atol=loss)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_lrn_vmap():
"""
Feature: Test LRN Vmap on CPU.
Description: The output shape match to input shape.
Expectation: match to np benchmark.
"""
context.set_context(mode=context.GRAPH_MODE)
data_type = np.float32
loss = 1e-6
input_x = np.array([[[[[1.6243454, -0.6117564],
[-0.5281718, -1.0729686]],
[[0.86540765, -2.3015387],
[1.7448118, -0.7612069]],
[[0.3190391, -0.24937038],
[1.4621079, -2.0601406]]]],
[[[[1.6243454, -0.6117564],
[-0.5281718, -1.0729686]],
[[0.86540765, -2.3015387],
[1.7448118, -0.7612069]],
[[0.3190391, -0.24937038],
[1.4621079, -2.0601406]]]]]).astype(data_type)
benchmark_output = np.array([[[[[1.6239204, -0.61149347],
[-0.5279556, -1.0724881]],
[[0.86518127, -2.3005495],
[1.7440975, -0.760866]],
[[0.31895563, -0.2492632],
[1.4615093, -2.059218]]]],
[[[[1.6239204, -0.61149347],
[-0.5279556, -1.0724881]],
[[0.86518127, -2.3005495],
[1.7440975, -0.760866]],
[[0.31895563, -0.2492632],
[1.4615093, -2.059218]]]]]).astype(data_type)
lrn = LrnNet(depth_radius=2, bias=1.0, alpha=0.0001, beta=0.75)
in_axes = 0
out_axes = 0
output = LrnVMapNet(lrn, in_axes, out_axes)(Tensor(input_x))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=loss, atol=loss)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_lrn_dy_shape():
"""
Feature: Test LRN Dynamic Shape.
Description: The output shape match to input shape.
Expectation: match to np benchmark.
"""
context.set_context(mode=context.GRAPH_MODE)
ms_data_type = ms_type.float32
data_type = np.float32
# The shape of x is (1, 3, 2, 2)
x = np.array([[[[1.6243454, -0.6117564],
[-0.5281718, -1.0729686]],
[[0.86540765, -2.3015387],
[1.7448118, -0.7612069]],
[[0.3190391, -0.24937038],
[1.4621079, -2.0601406]]]]).astype(data_type)
loss = 1e-6
benchmark_output = METHOD_NAME(data_type)
lrn = LrnNet(depth_radius=2, bias=1.0, alpha=0.0001, beta=0.75)
input_dyn = Tensor(shape=[1, 3, 2, None], dtype=ms_data_type)
lrn.set_inputs(input_dyn)
output = lrn(Tensor(x))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=loss, atol=loss)
context.set_context(mode=context.PYNATIVE_MODE)
input_dyn = Tensor(shape=[1, 3, 2, None], dtype=ms_data_type)
lrn.set_inputs(input_dyn)
output = lrn(Tensor(x))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=loss, atol=loss)
| null |
5,150 |
import datetime
import pytest
import zigpy.application
import zigpy.ota
import zigpy.ota.image
import zigpy.ota.provider
import zigpy.ota.validators
from .async_mock import AsyncMock, MagicMock, patch, sentinel
MANUFACTURER_ID = sentinel.manufacturer_id
IMAGE_TYPE = sentinel.image_type
@pytest.fixture
def image_with_version():
def METHOD_NAME(version=100):
METHOD_NAME = zigpy.ota.image.OTAImage()
METHOD_NAME.header = zigpy.ota.image.OTAImageHeader()
METHOD_NAME.header.manufacturer_id = MANUFACTURER_ID
METHOD_NAME.header.image_type = IMAGE_TYPE
METHOD_NAME.header.file_version = version
METHOD_NAME.subelements = [
zigpy.ota.image.SubElement.deserialize(b"\x00\x00\x04\x00\x00\x00abcdef")[0]
]
return METHOD_NAME
return METHOD_NAME
@pytest.fixture
def image(image_with_version):
return image_with_version()
@pytest.fixture
def key():
return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE)
@pytest.fixture
def ota():
app = MagicMock(spec_set=zigpy.application.ControllerApplication)
tradfri = MagicMock(spec_set=zigpy.ota.provider.Trådfri)
check_invalid = MagicMock(
spec_set=zigpy.ota.validators.check_invalid,
return_value=False,
)
with patch("zigpy.ota.provider.Trådfri", tradfri):
with patch("zigpy.ota.check_invalid", check_invalid):
yield zigpy.ota.OTA(app)
async def test_ota_initialize(ota):
ota.async_event = AsyncMock()
await ota.initialize()
assert ota.async_event.call_count == 1
assert ota.async_event.call_args[0][0] == "initialize_provider"
assert ota.not_initialized is False
async def test_get_image_empty(ota, image, key):
ota.async_event = AsyncMock(return_value=[None, None])
assert len(ota._image_cache) == 0
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
assert len(ota._image_cache) == 0
assert res is None
assert ota.async_event.call_count == 1
assert ota.async_event.call_args[0][0] == "get_image"
assert ota.async_event.call_args[0][1] == key
async def test_get_image_new(ota, image, key, image_with_version, monkeypatch):
newer = image_with_version(image.header.file_version + 1)
ota.async_event = AsyncMock(return_value=[None, image, newer])
assert len(ota._image_cache) == 0
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
# got new image in the cache
assert len(ota._image_cache) == 1
assert res.image.header == newer.header
assert res.image.subelements == newer.subelements
assert ota.async_event.call_count == 1
assert ota.async_event.call_args[0][0] == "get_image"
assert ota.async_event.call_args[0][1] == key
ota.async_event.reset_mock()
assert len(ota._image_cache) == 1
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
# should get just the cached image
assert len(ota._image_cache) == 1
assert res.image.header == newer.header
assert res.image.subelements == newer.subelements
assert ota.async_event.call_count == 0
# on cache expiration, ping listeners
ota.async_event.reset_mock()
assert len(ota._image_cache) == 1
monkeypatch.setattr(
zigpy.ota,
"TIMEDELTA_0",
zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1),
)
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
assert len(ota._image_cache) == 1
assert res.image.header == newer.header
assert res.image.subelements == newer.subelements
assert ota.async_event.call_count == 1
async def test_get_image_invalid(ota, image, image_with_version):
corrupted = image_with_version(image.header.file_version)
zigpy.ota.check_invalid.side_effect = [True]
ota.async_event = AsyncMock(return_value=[None, corrupted])
assert len(ota._image_cache) == 0
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
assert len(ota._image_cache) == 0
assert res is None
@pytest.mark.parametrize("v1", [0, 1])
@pytest.mark.parametrize("v2", [0, 1])
async def test_get_image_invalid_then_valid_versions(v1, v2, ota, image_with_version):
image = image_with_version(100 + v1)
image.header.header_string = b"\x12" * 32
corrupted = image_with_version(100 + v2)
corrupted.header.header_string = b"\x11" * 32
ota.async_event = AsyncMock(return_value=[corrupted, image])
zigpy.ota.check_invalid.side_effect = [True, False]
res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE)
# The valid image is always picked, even if the versions match
assert res.image.header.header_string == image.header.header_string
def test_cached_image_expiration(image, monkeypatch):
cached = zigpy.ota.CachedImage.new(image)
assert cached.expired is False
monkeypatch.setattr(
zigpy.ota,
"TIMEDELTA_0",
zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1),
)
assert cached.expired is True
def test_cached_image_no_expiration(image, monkeypatch):
cached = zigpy.ota.CachedImage()
monkeypatch.setattr(
zigpy.ota,
"TIMEDELTA_0",
zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1),
)
assert cached.expired is False
def test_cached_image_expiration_delay():
d = b"\x1e\xf1\xee\x0b\x00\x018\x00"
d += b"\x00\x00"
d += (
b"|\x11\x01!rE!\x12\x02\x00EBL tradfri_light_basic\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x38\x00\x00\x00"
)
METHOD_NAME = zigpy.ota.image.OTAImage.deserialize(d)[0]
cached = zigpy.ota.CachedImage.new(METHOD_NAME)
orig_expiration = cached.expires_on
cached.get_image_block(0, 40)
assert cached.expires_on == orig_expiration
new_expiration = (
cached.expires_on
- zigpy.ota.CachedImage.DEFAULT_EXPIRATION
+ zigpy.ota.DELAY_EXPIRATION
- datetime.timedelta(seconds=10)
)
cached.expires_on = new_expiration
cached.get_image_block(0, 40)
assert cached.expires_on > new_expiration
def test_cached_image_serialization_cache(image):
image = MagicMock(image)
image.serialize.side_effect = [b"data"]
cached = zigpy.ota.CachedImage.new(image)
assert cached.cached_data is None
assert image.serialize.call_count == 0
assert cached.get_image_block(0, 1) == b"d"
assert cached.get_image_block(1, 1) == b"a"
assert cached.get_image_block(2, 1) == b"t"
assert image.serialize.call_count == 1
async def test_get_image_salus(ota, image, image_with_version):
SALUS_ID = 4216
newer = image_with_version(image.header.file_version + 1)
ota.async_event = AsyncMock(return_value=[None, image, newer])
assert len(ota._image_cache) == 0
await ota.get_ota_image(SALUS_ID, "model123")
# got new image in the cache
assert len(ota._image_cache) == 1
| null |
5,151 |
"""Rebuild history feature tests."""
""""
The tests here use three replicas for a nexus, named 'source', 'target' and 'newchild'.
Full rebuild test - removes 'target' and adds 'newchild'.
Partial rebuild test(not implemented yet) - removes 'target', add 'target' back.
"""
import pytest
from pytest_bdd import (
given,
scenario,
then,
when,
parsers,
)
import os
import subprocess
import time
from v1.mayastor import container_mod, mayastor_mod
from v1.volume import Volume
import grpc
import nexus_pb2 as pb
def megabytes(n):
return n * 1024 * 1024
@pytest.fixture
def nexus_state(mayastor_nexus, find_nexus, nexus_uuid):
yield find_nexus(nexus_uuid)
@pytest.fixture(scope="module")
def local_files():
files = [
f"/tmp/disk-rebuild-{base}.img" for base in ["source", "target", "newchild"]
]
for path in files:
subprocess.run(
["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"],
check=True,
)
yield
for path in files:
subprocess.run(["sudo", "rm", "-f", path], check=True)
@pytest.fixture(scope="module")
def source_uri(local_files):
yield "aio:///tmp/disk-rebuild-source.img?blk_size=4096"
@pytest.fixture(scope="module")
def target_uri(local_files):
yield "aio:///tmp/disk-rebuild-target.img?blk_size=4096"
@pytest.fixture(scope="module")
def new_child_uri(local_files):
yield "aio:///tmp/disk-rebuild-newchild.img?blk_size=4096"
@pytest.fixture(scope="module")
def nexus_uuid():
yield "2c58c9f0-da89-4cb9-8097-dc67fa132493"
@pytest.fixture(scope="module")
def nexus_name():
yield "nexus-1"
@pytest.fixture(scope="module")
def mayastor_instance(mayastor_mod):
yield mayastor_mod["ms0"]
@pytest.fixture(scope="module")
def find_nexus(mayastor_instance):
def METHOD_NAME(uuid):
for nexus in mayastor_instance.nexus_rpc.ListNexus(
pb.ListNexusOptions()
).nexus_list:
if nexus.uuid == uuid:
return nexus
return None
yield METHOD_NAME
@pytest.fixture
def mayastor_nexus(mayastor_instance, nexus_name, nexus_uuid, source_uri, target_uri):
nexus = mayastor_instance.nexus_rpc.CreateNexus(
pb.CreateNexusRequest(
name=nexus_name,
uuid=nexus_uuid,
size=megabytes(64),
children=[source_uri, target_uri],
minCntlId=50,
maxCntlId=59,
resvKey=0xABCDEF0012345678,
preemptKey=0,
)
)
yield nexus
mayastor_instance.nexus_rpc.DestroyNexus(pb.DestroyNexusRequest(uuid=nexus_uuid))
@scenario(
"features/rebuild_history.feature", "Record Full rebuild of a faulted replica"
)
def test_record_full_rebuild_of_a_faulted_replica():
"""Record Full rebuild of a faulted replica."""
@scenario(
"features/rebuild_history.feature", "Record Partial rebuild of a faulted replica"
)
def test_record_partial_rebuild_of_a_faulted_replica():
"""Record Partial rebuild of a faulted replica."""
@given("a volume with multiple replicas, undergoing IO")
def get_nexus(mayastor_nexus):
pass
@given("the io-engine is running and a volume is created with more than one replicas")
def nexus_ready(mayastor_nexus):
pass
@when("as a result, a new replica is hosted on a different pool")
def add_replica(mayastor_instance, nexus_uuid, new_child_uri):
"""add new_child_uri that has a different uri than originally removed replica"""
mayastor_instance.nexus_rpc.AddChildNexus(
pb.AddChildNexusRequest(uuid=nexus_uuid, uri=new_child_uri, norebuild=True)
)
@when("one of the replica becomes faulted")
def remove_replica(mayastor_instance, nexus_uuid, target_uri):
""" "remove target_uri from the nexus"""
mayastor_instance.nexus_rpc.RemoveChildNexus(
pb.RemoveChildNexusRequest(uuid=nexus_uuid, uri=target_uri)
)
@when("the replica comes back online on the same pool")
def _():
"""the replica comes back online on the same pool."""
raise NotImplementedError
@then("a full rebuild of the new replica starts")
def start_rebuild(mayastor_instance, nexus_uuid, new_child_uri):
"""a full rebuild of the new replica starts."""
mayastor_instance.nexus_rpc.StartRebuild(
pb.StartRebuildRequest(nexus_uuid=nexus_uuid, uri=new_child_uri)
)
time.sleep(2)
@then("a partial rebuild of the replica starts")
def _():
"""a partial rebuild of the replica starts."""
raise NotImplementedError
@then("the replica rebuild event is recorded upon rebuild completion")
def rebuild_recorded(mayastor_instance, nexus_uuid, new_child_uri):
response = retrieve_rebuild_history(mayastor_instance, nexus_uuid)
history = response.records
assert len(history) == 1
@then("this event history is retrievable")
def retrieve_rebuild_history(mayastor_instance, nexus_uuid):
"""this full replica rebuild event is recorded and is retrievable."""
response = mayastor_instance.nexus_rpc.GetRebuildHistory(
pb.RebuildHistoryRequest(uuid=nexus_uuid)
)
return response
| null |
5,152 |
from skrf.mathFunctions import LOG_OF_NEG
import skrf as rf
import unittest
import numpy as npy
from numpy import log, pi, isnan, inf
from numpy.testing import assert_equal, assert_almost_equal
import pytest
from skrf.constants import EIG_MIN
class TestUnitConversions(unittest.TestCase):
"""
Test unit-conversion functions
"""
def setUp(self):
pass
def METHOD_NAME(self):
"""
Test complex to magnitude conversion with:
5 = 3 + 4j
"""
assert_almost_equal(rf.complex_2_magnitude(3+4j), 5.0)
def test_complex_2_db10(self):
"""
Test complex to db10 conversion with:
10 [dB] = 10 * log10(6+8j)
"""
assert_almost_equal(rf.complex_2_db10(6+8j), 10.0)
def test_complex_2_degree(self):
"""
Test complex to degree conversion with:
90 = angle(0 + 1j)
"""
assert_almost_equal(rf.complex_2_degree(0+1j), 90.0)
def test_complex_2_quadrature(self):
"""
Test complex to quadrature conversion with:
2, pi = abs(2j), angle(2j) * abs(2j)
"""
assert_almost_equal(rf.complex_2_quadrature(0+2j), (2, pi))
def test_complex_components(self):
"""
Test complex components:
"""
assert_almost_equal(rf.complex_components(0+2j), (0, 2, 90, 2, pi))
def test_complex_2_reim(self):
"""
Test complex to (real, imag) conversion:
"""
assert_almost_equal(rf.complex_2_reim(1+2j), (1,2))
def test_magnitude_2_db(self):
"""
Test magnitude to db conversion
"""
assert_almost_equal(rf.magnitude_2_db(10, True), 20)
assert_almost_equal(rf.magnitude_2_db(10, False), 20)
with pytest.warns(RuntimeWarning, match="divide by zero"):
assert_almost_equal(rf.magnitude_2_db(0), -inf)
with pytest.warns(RuntimeWarning, match="invalid value encountered in log10"):
assert_almost_equal(rf.magnitude_2_db(-1, True), LOG_OF_NEG)
assert_almost_equal(rf.magnitude_2_db([10, -1], True), [20, LOG_OF_NEG])
self.assertTrue(isnan(rf.magnitude_2_db(-1, False)))
assert_equal(rf.mag_2_db, rf.magnitude_2_db) # Just an alias
def test_mag_2_db10(self):
"""
Test magnitude to db10 conversion
"""
assert_almost_equal(rf.mag_2_db10(10, True), 10)
assert_almost_equal(rf.mag_2_db10(10, False), 10)
with pytest.warns(RuntimeWarning, match="divide by zero"):
assert_almost_equal(rf.magnitude_2_db(0), -inf)
with pytest.warns(RuntimeWarning, match="invalid value encountered in log10"):
assert_almost_equal(rf.mag_2_db10(-1, True), LOG_OF_NEG)
assert_almost_equal(rf.mag_2_db10([10, -1], True), [10, LOG_OF_NEG])
self.assertTrue(isnan(rf.mag_2_db10(-1, False)))
def test_db10_2_mag(self):
"""
Test db10 to mag conversion
"""
assert_almost_equal(rf.db10_2_mag(3+4j), 10**((3+4j)/10))
def test_magdeg_2_reim(self):
"""
Test (mag,deg) to (re+j*im)
"""
assert_almost_equal(rf.magdeg_2_reim(1, 90), (0+1j))
def test_dbdeg_2_reim(self):
"""
Test (db, deg) to (re+j*im)
"""
assert_almost_equal(rf.dbdeg_2_reim(20,90), (0+10j))
def test_np_2_db(self):
"""
Test Np to dB conversion with:
1 [Np] = 20/ln(10) [dB]
"""
assert_almost_equal(rf.np_2_db(1), 20/log(10))
def test_db_2_np(self):
"""
Test dB to Np conversion with:
1 [dB] = ln(10)/20 [Np]
"""
assert_almost_equal(rf.db_2_np(1), log(10)/20)
def test_radian_2_degree(self):
assert_almost_equal(rf.radian_2_degree(pi), 180)
def test_feet_2_meter(self):
"""
Test feet to meter length conversion
"""
assert_almost_equal(rf.feet_2_meter(0.01), 0.003048)
assert_almost_equal(rf.feet_2_meter(1), 0.3048)
def test_meter_2_feet(self):
"""
Test meter to feet length conversion
"""
assert_almost_equal(rf.meter_2_feet(0.01), 0.0328084)
assert_almost_equal(rf.meter_2_feet(1), 3.28084)
def test_db_per_100feet_2_db_per_100meter(self):
"""
Test attenuation unit conversion dB/100feet to dB/100m
"""
assert_almost_equal(rf.db_per_100feet_2_db_per_100meter(), rf.meter_2_feet(), decimal=2)
assert_almost_equal(rf.db_per_100feet_2_db_per_100meter(2.5), 8.2, decimal=2)
assert_almost_equal(rf.db_per_100feet_2_db_per_100meter(0.28), 0.92, decimal=2)
def test_inf_to_num(self):
"""
Test inf_to_num function
"""
# scalar
assert_equal(rf.inf_to_num(npy.inf), rf.INF)
assert_equal(rf.inf_to_num(-npy.inf), -rf.INF)
# array
x = npy.array([0, npy.inf, 0, -npy.inf])
y = npy.array([0, rf.INF, 0, -rf.INF])
assert_equal(rf.inf_to_num(x), y)
def test_rsolve(self):
A = npy.random.random((3, 2, 2)) + 1j*npy.random.random((3, 2, 2))
B = npy.random.random((3, 2, 2)) + 1j*npy.random.random((3, 2, 2))
# Make sure they are not singular
A = rf.nudge_eig(A)
B = rf.nudge_eig(B)
x = rf.rsolve(A, B)
npy.testing.assert_allclose(x @ A, B)
def test_nudge_eig(self):
A = npy.zeros((3, 2, 2))
cond_A = npy.linalg.cond(A)
A2 = rf.nudge_eig(A)
self.assertFalse(A is A2)
self.assertTrue(npy.all(npy.linalg.cond(A2) < cond_A))
npy.testing.assert_allclose(A2, A, atol=1e-9)
def test_nudge_eig2(self):
A = npy.diag([1, 1, 1, 1]).reshape(1, 4, 4)
A2 = rf.nudge_eig(A)
self.assertTrue(A is A2)
def test_nudge_default_params(self):
"Test default params and passing different optional params"
# check that Minimum eigenvalue is correctly passed
A = npy.zeros((3, 2, 2))
A2 = rf.nudge_eig(A)
npy.testing.assert_allclose(A2[:,0,0], EIG_MIN)
A3 = rf.nudge_eig(A, min_eig=1e-10)
npy.testing.assert_allclose(A3[:,0,0], 1e-10
| null |
5,153 |
#!/usr/bin/env python3
"""This script is an implementation of policy-rc.d
For further information on policy-rc.d see *1
*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
"""
import collections
import glob
import os
import logging
import sys
import time
import uuid
import yaml
SystemPolicy = collections.namedtuple(
'SystemPolicy',
[
'policy_requestor_name',
'policy_requestor_type',
'service',
'blocked_actions'])
DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d'
DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d'
def read_policy_file(policy_file):
"""Return system policies from given file.
:param file_name: Name of file to read.
:type file_name: str
:returns: Policy
:rtype: List[SystemPolicy]
"""
policies = []
if os.path.exists(policy_file):
with open(policy_file, 'r') as f:
policy = yaml.safe_load(f)
for service, actions in policy['blocked_actions'].items():
service = service.replace('.service', '')
policies.append(SystemPolicy(
policy_requestor_name=policy['policy_requestor_name'],
policy_requestor_type=policy['policy_requestor_type'],
service=service,
blocked_actions=actions))
return policies
def get_policies(policy_config_dir):
"""Return all system policies in policy_config_dir.
:param policy_config_dir: Name of file to read.
:type policy_config_dir: str
:returns: Policy
:rtype: List[SystemPolicy]
"""
_policy = []
for f in glob.glob('{}/*.policy'.format(policy_config_dir)):
_policy.extend(read_policy_file(f))
return _policy
def record_blocked_action(service, action, blocking_policies, policy_log_dir):
"""Record that an action was requested but deniedl
:param service: Service that was blocked
:type service: str
:param action: Action that was blocked.
:type action: str
:param blocking_policies: Policies that blocked the action on the service.
:type blocking_policies: List[SystemPolicy]
:param policy_log_dir: Directory to place the blocking action record.
:type policy_log_dir: str
"""
if not os.path.exists(policy_log_dir):
os.mkdir(policy_log_dir)
seconds = round(time.time())
for policy in blocking_policies:
if not os.path.exists(policy_log_dir):
os.mkdir(policy_log_dir)
file_name = '{}/{}-{}-{}.deferred'.format(
policy_log_dir,
policy.policy_requestor_type,
policy.policy_requestor_name,
uuid.uuid1())
with open(file_name, 'w') as f:
data = {
'timestamp': seconds,
'service': service,
'action': action,
'reason': 'Package update',
'policy_requestor_type': policy.policy_requestor_type,
'policy_requestor_name': policy.policy_requestor_name}
yaml.dump(data, f)
def METHOD_NAME(service, action, policy_config_dir):
"""Record that an action was requested but deniedl
:param service: Service that action is requested against.
:type service: str
:param action: Action that is requested.
:type action: str
:param policy_config_dir: Directory that stores policy files.
:type policy_config_dir: str
:returns: Policies
:rtype: List[SystemPolicy]
"""
service = service.replace('.service', '')
blocking_policies = [
policy
for policy in get_policies(policy_config_dir)
if policy.service == service and action in policy.blocked_actions]
return blocking_policies
def process_action_request(service, action, policy_config_dir, policy_log_dir):
"""Take the requested action against service and check if it is permitted.
:param service: Service that action is requested against.
:type service: str
:param action: Action that is requested.
:type action: str
:param policy_config_dir: Directory that stores policy files.
:type policy_config_dir: str
:param policy_log_dir: Directory that stores policy files.
:type policy_log_dir: str
:returns: Tuple of whether the action is permitted and explanation.
:rtype: (boolean, str)
"""
blocking_policies = METHOD_NAME(
service,
action,
policy_config_dir)
if blocking_policies:
policy_msg = [
'{} {}'.format(p.policy_requestor_type, p.policy_requestor_name)
for p in sorted(blocking_policies)]
message = '{} of {} blocked by {}'.format(
action,
service,
', '.join(policy_msg))
record_blocked_action(
service,
action,
blocking_policies,
policy_log_dir)
action_permitted = False
else:
message = "Permitting {} {}".format(service, action)
action_permitted = True
return action_permitted, message
def main():
logging.basicConfig(
filename='/var/log/policy-rc.d.log',
level=logging.DEBUG,
format='%(asctime)s %(message)s')
service = sys.argv[1]
action = sys.argv[2]
permitted, message = process_action_request(
service,
action,
DEFAULT_POLICY_CONFIG_DIR,
DEFAULT_POLICY_LOG_DIR)
logging.info(message)
# https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
# Exit status codes:
# 0 - action allowed
# 1 - unknown action (therefore, undefined policy)
# 100 - unknown initscript id
# 101 - action forbidden by policy
# 102 - subsystem error
# 103 - syntax error
# 104 - [reserved]
# 105 - behaviour uncertain, policy undefined.
# 106 - action not allowed. Use the returned fallback actions
# (which are implied to be "allowed") instead.
if permitted:
return 0
else:
return 101
if __name__ == "__main__":
rc = main()
sys.exit(rc)
| null |
5,154 |
from __future__ import annotations
import io
from textwrap import dedent
import pytest
from ruamel.yaml.comments import CommentedMap
from meltano.core.behavior.canonical import Canonical
from meltano.core.yaml import yaml
definition = {
# {'a': None, 'b': 1, 'c': None, 'd': 3, 'e': None, ...}
chr(ord("a") + i): i if i % 2 else None
for i in range(10)
}
class TestCanonical:
@pytest.fixture()
def subject(self):
return Canonical(**definition)
def test_canonical(self, subject):
# make sure the Nones are removed
assert len(list(subject)) == 5
subject.test = "hello"
yaml_definition = "\n".join(f"{k}: {v}" for k, v in iter(subject))
buf = io.StringIO()
yaml.dump(subject, buf)
buf.seek(0)
assert buf.read().strip() == yaml_definition
def test_false(self, subject):
subject.false_value = False
assert subject.canonical()["false_value"] is False
def test_nested(self, subject):
nested = Canonical(test="value")
subject.nested = nested
assert Canonical.as_canonical(subject)["nested"] == Canonical.as_canonical(
nested,
)
def test_nested_empty(self, subject):
nested = Canonical(test="")
subject.nested = nested
assert "nested" not in Canonical.as_canonical(subject)
def test_update_canonical(self, subject):
subject.update(Canonical(test="value"))
assert subject.test == "value"
def METHOD_NAME(self, subject):
subject.update({"test": "value"})
assert subject.test == "value"
def test_update_kwargs(self, subject):
subject.update(test="value")
assert subject.test == "value"
def test_with_attrs(self, subject):
subject.test = "value"
assert subject.with_attrs().canonical() == subject.canonical()
new = subject.with_attrs(test="other_value")
assert new.test == "other_value"
assert new.canonical() == {**subject.canonical(), "test": "other_value"}
new = subject.with_attrs(new_test="new_value")
assert new.new_test == "new_value"
assert new.canonical() == {**subject.canonical(), "new_test": "new_value"}
def test_defaults(self, subject):
with pytest.raises(AttributeError):
subject.test # noqa: B018, WPS428
subject.test = None
assert subject.test is None
# This would typically be set from a Canonical subclass
subject._defaults["test"] = lambda _: "default"
# Default values show up when getting an attr
assert subject.test == "default"
# But they're not included in the canonical representation
assert "test" not in subject.canonical()
subject.test = "changed"
assert subject.test == "changed"
assert subject.canonical()["test"] == "changed"
def test_fallbacks(self, subject):
# Calling an unknown attribute is not supported
with pytest.raises(AttributeError):
subject.unknown # noqa: B018, WPS428
fallback = Canonical(unknown="value", known="value")
# This would typically be set from a Canonical subclass
subject._fallback_to = fallback
# Unknown attributes fall back
assert subject.unknown == "value"
assert "unknown" not in subject.canonical()
# Known attributes don't fall back
subject.known = None
assert subject.known is None
# Unless we make them
subject._fallbacks.add("known")
assert subject.known == "value"
assert "known" not in subject.canonical()
# Unless there is nothing to fallback to
subject._fallback_to = None
assert subject.known is None
# Defaults are still applied
subject._defaults["known"] = lambda _: "default"
assert subject.known == "default"
assert "known" not in subject.canonical()
# Until a value is set
subject.known = "value"
assert subject.known == "value"
assert subject.canonical()["known"] == "value"
def test_preserve_comments(self):
contents = """\
# This is a top-level comment
test: value
object:
# Comment in an object
key: value # Comment in a nested value
array:
# Comment in an array
- value # Comment in an array element
"""
contents = dedent(contents)
mapping = yaml.load(io.StringIO(contents))
subject = Canonical.parse(mapping)
assert subject.test == "value"
assert subject.object["key"] == "value"
assert subject.array[0] == "value"
obj = subject.canonical()
assert isinstance(obj, CommentedMap)
out_stream = io.StringIO()
yaml.dump(obj, out_stream)
out_stream.seek(0)
new_contents = out_stream.read()
assert new_contents == contents
def test_annotations(self):
original = CommentedMap(
{"a": 1, "annotations": {"cloud": {"data": 123}}, "z": -1},
)
obj = Canonical.parse(original)
assert obj.a == 1
with pytest.raises(AttributeError):
assert obj.annotations
assert obj.z == -1
assert obj.canonical() == original
| null |
5,155 |
from random import choice
import numpy
from cogent3.maths.stats.special import igam
try:
from math import factorial
except ImportError: # python version < 2.6
from cogent3.maths.stats.special import Gamma
factorial = lambda x: Gamma(x + 1)
def chi_square(x, p, df=1):
"""returns the chisquare statistic and it's probability"""
N = len(x)
end = N
sim = numpy.logical_not(numpy.logical_xor(x[0 : end - p], x[p:end])) * 1
s = ((numpy.ones((N - p,), float) - sim) ** 2).sum()
D = s / (N - p)
p_val = 1 - igam(df / 2.0, D / 2)
return D, p_val
def g_statistic(X, p=None, idx=None):
"""
return g statistic and p value
arguments:
X - the periodicity profile (e.g. DFT magnitudes, autocorrelation etc)
X needs to contain only those period values being considered,
i.e. only periods in the range [llim, ulim]
"""
# X should be real
X = abs(numpy.array(X))
if p is None:
power = X.max(0)
idx = X.argmax(0)
else:
assert idx is not None
power = X[idx]
g_obs = power / X.sum()
M = numpy.floor(1 / g_obs)
pmax = len(X)
result = numpy.zeros((int(M + 1),), float)
pmax_fact = factorial(pmax)
for index in range(1, min(pmax, int(M)) + 1):
v = (-1) ** (index - 1) * pmax_fact / factorial(pmax - index) / factorial(index)
v *= (1 - index * g_obs) ** (pmax - 1)
result[index] = v
p_val = result.sum()
return g_obs, p_val
def seq_to_symbols(seq, motifs, motif_length, result=None):
"""return symbolic represetation of the sequence
Parameters
----------
seq
a sequence
motifs
a list of sequence motifs
motif_length
length of first motif
result : ndarray
working array. Contents are reset to zero before encoding
"""
if result is None:
result = numpy.zeros(len(seq), numpy.uint8)
else:
result.fill(0)
if motif_length is None:
motif_length = len(motifs[0])
for i in range(len(seq) - motif_length + 1):
if seq[i : i + motif_length] in motifs:
result[i] = 1
return result
class SeqToSymbols(object):
"""class for converting all occurrences of motifs in passed sequence
to 1/0 otherwise"""
def __init__(self, motifs, length=None, motif_length=None):
super(SeqToSymbols, self).__init__()
if type(motifs) == str:
motifs = [motifs]
for i in range(len(motifs)):
try:
motifs[i] = motifs[i].encode("utf8")
except AttributeError:
pass
self.motifs = motifs
self.length = length
self.motif_length = motif_length or len(motifs[0])
self.working = None
if length is not None:
self.METHOD_NAME(length)
def METHOD_NAME(self, length):
"""sets a result array for length"""
self.working = numpy.zeros(length, numpy.uint8)
self.length = length
def __call__(self, seq, result=None):
if result is None and self.working is None:
self.METHOD_NAME(len(seq))
elif self.working is not None:
if len(seq) != self.working.shape[0]:
self.METHOD_NAME(len(seq))
result = self.working
result.fill(0)
if type(seq) == str:
seq = seq.encode("utf8")
elif type(seq) != bytes:
seq = b"".join(seq)
return seq_to_symbols(seq, self.motifs, self.motif_length, result)
def circular_indices(vector, start, length, num):
"""
Parameters
----------
vector : list[int]
sequential integers
start : int
index to start sampling from
length : int
length of returned vector
num : int
k-mer size to support
"""
if start > length:
start = start - length
if start + num < length:
return vector[start : start + num]
# get all till end, then from beginning
return vector[start:] + vector[: start + num - length]
def sampled_places(block_size, length):
"""returns randomly sampled positions with block_size to make a new vector
with length
"""
# Main condition is to identify when a draw would run off end, we want to
# draw from beginning
num_seg, remainder = divmod(length, block_size)
vector = list(range(length))
result = []
for _ in range(num_seg):
i = choice(vector)
result += circular_indices(vector, i, length, block_size)
if remainder:
result += circular_indices(vector, i + block_size, length, remainder)
assert len(result) == length, len(result)
return result
def blockwise_bootstrap(
signal, calc, block_size, num_reps, seq_to_symbols=None, num_stats=None
):
"""returns observed statistic and the probability from the bootstrap
test of observing more `power' by chance than that estimated from the
observed signal
Parameters
----------
signal
a series, can be a sequence object
calc
function to calculate the period power, e.g. ipdft, hybrid,
auto_corr or any other statistic.
block_size
size of contiguous values for resampling
num_reps
number of randomly generated permutations
seq_to_symbols
function to convert a sequence to 1/0. If not
provided, the raw data is used.
num_stats
the number of statistics being evaluated for each
interation. Default to 1.
"""
signal_length = len(signal)
dtype = "c" if seq_to_symbols is not None else None
signal = numpy.array(list(signal), dtype=dtype)
if seq_to_symbols is not None:
symbolic = seq_to_symbols(signal)
data = symbolic
else:
data = signal
obs_stat = calc(data)
if seq_to_symbols is not None and sum(data) == 0:
p = [numpy.array([1.0, 1.0, 1.0]), 1.0][num_stats == 1]
return obs_stat, p
if num_stats is None:
try:
num_stats = calc.get_num_stats()
except AttributeError:
num_stats = 1
count = 0 if num_stats == 1 else numpy.zeros(num_stats)
for _ in range(num_reps):
# get sample positions
sampled_indices = sampled_places(block_size, signal_length)
new_signal = signal.take(sampled_indices)
if seq_to_symbols is not None:
symbolic = seq_to_symbols(new_signal)
data = symbolic
else:
data = new_signal
sim_stat = calc(data)
# count if > than observed
if num_stats > 1:
count[sim_stat >= obs_stat] += 1
elif sim_stat >= obs_stat:
count += 1
return obs_stat, count / num_reps
| null |
5,156 |
#!/usr/bin/env python
# Copyright (c) 2012- The University of Notre Dame.
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
""" Weaver test script """
from weaver.function import PythonFunction
import os
import shutil
import subprocess
import sys
EXAMPLES_DIR = 'examples'
OUTPUT_DIR = os.path.join('/tmp', os.environ['USER'] + '-weaver-tests')
NESTED_ABSTRACTIONS = False
INLINE_TASKS = 1
def run_weaver(script_path, execute=False, engine_arguments=None, engine_wrapper=None, workflow_arguments=None):
script_name = os.path.splitext(os.path.basename(script_path))[0]
output_path = os.path.join(OUTPUT_DIR, '{0}'.format(script_name))
log_path = os.path.join(OUTPUT_DIR, '{0}.log'.format(script_name))
command = './weaver.py {0} {1} {2} {3} {4} -d all -l {5} -o {6} {7} {8}'.format(
'-a' if NESTED_ABSTRACTIONS else '',
'-t ' + str(INLINE_TASKS),
'-x' if execute else '',
'-w ' + engine_wrapper if engine_wrapper else '',
'-e "' + engine_arguments + '"' if engine_arguments else '',
log_path, output_path, script_path,
' '.join(map(str, workflow_arguments or [])))
command = PythonFunction.PYTHON_VERSION + ' ' + command
process = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
result = process.communicate()[0].decode()
if process.returncode != 0:
raise RuntimeError
return result
def _test_execution(script_name, **kwds):
try:
kwds['execute'] = True
run_weaver(os.path.join(EXAMPLES_DIR, script_name), **kwds)
return True
except (OSError, RuntimeError):
return False
def test_allpairs():
return _test_execution('allpairs.py')
def test_arguments():
try:
result = run_weaver(os.path.join(EXAMPLES_DIR, 'arguments.py'), execute=False, workflow_arguments=['hello', 'world'])
return result.strip() == "['hello', 'world']"
except (OSError, RuntimeError):
return False
def METHOD_NAME():
return _test_execution('batch.py')
def test_bxgrid():
return _test_execution('bxgrid.py', engine_wrapper='parrot_run')
def test_collect():
return _test_execution('collect.py', engine_arguments='-g ref_count')
def test_functions():
return _test_execution('functions.py')
def test_iterate():
return _test_execution('iterate.py')
def test_group():
return _test_execution('group.py')
def test_map():
return _test_execution('map.py')
def test_merge():
return _test_execution('merge.py')
def test_nests():
return _test_execution('nests.py')
def test_options():
try:
result = run_weaver(os.path.join(EXAMPLES_DIR, 'options.py'), execute=False)
return result == """Options(cpu=2, memory=512M, disk=10G, batch=None, local=None, collect=None, environment={})
Options(cpu=4, memory=512M, disk=10G, batch=None, local=None, collect=None, environment={})
Options(cpu=4, memory=512M, disk=10G, batch=None, local=None, collect=None, environment={})
Options(cpu=4, memory=512M, disk=1G, batch=None, local=None, collect=None, environment={})
"""
except (OSError, RuntimeError):
return False
def test_stash():
return _test_execution('stash.py')
def test_scripts():
return _test_execution('scripts.py')
def test_subnests():
return _test_execution('subnests.py')
TESTS = [
('allpairs', test_allpairs),
('arguments', test_arguments),
('batch', METHOD_NAME),
#('bxgrid', test_bxgrid),
('collect', test_collect),
('functions', test_functions),
('group', test_group),
('iterate', test_iterate),
('map', test_map),
('merge', test_merge),
('nests', test_nests),
('options', test_options),
('stash', test_stash),
('scripts', test_scripts),
('subnests', test_subnests),
]
def run_tests():
if os.path.exists(OUTPUT_DIR):
print('Clearing {0} directory'.format(OUTPUT_DIR))
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR)
print('Running tests {0} Nested Abstractions and Inlined Tasks ({1})'.format(
'with' if NESTED_ABSTRACTIONS else 'without', INLINE_TASKS))
for test_name, test_func in TESTS:
sys.stdout.write('{0:>10} ... '.format(test_name))
sys.stdout.flush()
if test_func():
sys.stdout.write('success\n')
else:
sys.stdout.write('failure\n')
print('')
if __name__ == '__main__':
if len(sys.argv) > 1:
_TESTS = []
for arg in sys.argv[1:]:
_TESTS.append((arg, eval('test_' + arg)))
TESTS = _TESTS
NESTED_ABSTRACTIONS = False
INLINE_TASKS = 1
run_tests()
NESTED_ABSTRACTIONS = False
INLINE_TASKS = 4
run_tests()
NESTED_ABSTRACTIONS = True
INLINE_TASKS = 1
run_tests()
NESTED_ABSTRACTIONS = True
INLINE_TASKS = 4
run_tests()
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| null |
5,157 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test SST2 dataset operators
"""
import mindspore.dataset as ds
DATA_DIR = '../data/dataset/testSST2/'
def test_sst2_dataset_basic():
"""
Feature: SST2Dataset
Description: Read data from train file
Expectation: The data is processed successfully
"""
buffer = []
data = ds.SST2Dataset(DATA_DIR, usage="train", shuffle=False)
data = data.repeat(2)
data = data.skip(3)
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
buffer.append(d)
assert len(buffer) == 7
def test_sst2_dataset_quoted():
"""
Feature: SST2Dataset
Description: Read the data and compare it to expectations
Expectation: The data is processed successfully
"""
data = ds.SST2Dataset(DATA_DIR, usage="test", shuffle=False)
buffer = []
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
buffer.extend([d['sentence']])
assert buffer == ["test read SST2dataset 1 .",
"test read SST2dataset 2 .",
"test read SST2dataset 3 ."]
def test_sst2_dataset_usage():
"""
Feature: SST2Dataset.
Description: Tead all files with usage all.
Expectation: The data is processed successfully.
"""
buffer = []
data = ds.SST2Dataset(DATA_DIR, usage="dev", shuffle=False)
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
buffer.append(d)
assert len(buffer) == 4
def METHOD_NAME():
"""
Feature: SST2Dataset
Description: Test get_dataset_size function
Expectation: The data is processed successfully
"""
data = ds.SST2Dataset(DATA_DIR, usage="dev", shuffle=False)
size = data.get_dataset_size()
assert size == 4
def test_sst2_dataset_distribution():
"""
Feature: SST2Dataset
Description: Test in a distributed state
Expectation: The data is processed successfully
"""
data = ds.SST2Dataset(DATA_DIR, usage="train", shuffle=False, num_shards=2, shard_id=0)
count = 0
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
count += 1
assert count == 3
def test_sst2_dataset_num_samples():
"""
Feature: SST2Dataset
Description: Test num_samples parameter
Expectation: The data is processed successfully
"""
data = ds.SST2Dataset(DATA_DIR, usage="test", shuffle=False, num_samples=2)
count = 0
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
count += 1
assert count == 2
def test_sst2_dataset_exception():
"""
Feature: SST2Dataset
Description: Test the wrong input
Expectation: Unable to read data properly
"""
def exception_func(item):
raise Exception("Error occur!")
try:
data = ds.SST2Dataset(DATA_DIR, usage="test", shuffle=False)
data = data.map(operations=exception_func, input_columns=["sentence"], num_parallel_workers=1)
for _ in data.create_dict_iterator():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data file" in str(e)
try:
data = ds.SST2Dataset(DATA_DIR, usage="test", shuffle=False)
data = data.map(operations=exception_func, input_columns=["sentence"], num_parallel_workers=1)
for _ in data.create_dict_iterator():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data file" in str(e)
if __name__ == "__main__":
test_sst2_dataset_basic()
test_sst2_dataset_quoted()
test_sst2_dataset_usage()
METHOD_NAME()
test_sst2_dataset_distribution()
test_sst2_dataset_num_samples()
test_sst2_dataset_exception()
| null |
5,158 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
class AssignSub(nn.Cell):
def __init__(self, value):
super(AssignSub, self).__init__()
self.var = Parameter(value, name="var")
self.sub = P.AssignSub()
def METHOD_NAME(self, y):
res = self.sub(self.var, y)
return res
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_assign_sub():
"""
Feature: assign sub kernel
Description: test assignsub
Expectation: just test
"""
expect1 = np.zeros([1, 3, 3, 3])
expect2 = np.array([[[[0, -1, -2],
[-3, -4, -5],
[-6, -7, -8]],
[[-9, -10, -11],
[-12, -13, -14],
[-15, -16, -17]],
[[-18, -19, -20],
[-21, -22, -23],
[-24, -25, -26]]]])
x1 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
y1 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
x2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
y2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
sub = AssignSub(x1)
output1 = sub(y1)
assert (output1.asnumpy() == expect1).all()
sub = AssignSub(output1)
output2 = sub(y1)
assert (output2.asnumpy() == expect2).all()
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
sub = AssignSub(x2)
output1 = sub(y2)
assert (output1.asnumpy() == expect1).all()
sub = AssignSub(output1)
output2 = sub(y2)
assert (output2.asnumpy() == expect2).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_assign_sub_float16():
"""
Feature: None
Description: test assignsub float16
Expectation: just test
"""
expect3 = np.zeros([1, 3, 3, 3])
expect4 = np.array([[[[0, -1, -2],
[-3, -4, -5],
[-6, -7, -8]],
[[-9, -10, -11],
[-12, -13, -14],
[-15, -16, -17]],
[[-18, -19, -20],
[-21, -22, -23],
[-24, -25, -26]]]])
x1 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float16))
y1 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float16))
x2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float16))
y2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float16))
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
sub = AssignSub(x1)
output1 = sub(y1)
assert (output1.asnumpy() == expect3).all()
sub = AssignSub(output1)
output2 = sub(y1)
assert (output2.asnumpy() == expect4).all()
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
sub = AssignSub(x2)
output1 = sub(y2)
assert (output1.asnumpy() == expect3).all()
sub = AssignSub(output1)
output2 = sub(y2)
assert (output2.asnumpy() == expect4).all()
| null |
5,159 |
import gzip
from datetime import timedelta
from typing import Any
import pytest
from django.utils import timezone
from freezegun.api import FrozenDateTimeFactory
from storages.backends.s3boto3 import S3Boto3Storage
from thunderstore.cache.storage import get_cache_storage
from thunderstore.community.factories import CommunityFactory
from thunderstore.community.models import Community
from thunderstore.repository.models.cache import APIV1PackageCache
from thunderstore.utils.makemigrations import StubStorage
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community_without_community(
community: Community,
) -> None:
# Make sure a community is in the DB to ensure a random one isn't returned
assert community.pk
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
community_a = CommunityFactory()
community_b = CommunityFactory()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
is None
)
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
APIV1PackageCache.update_for_community(community_a, b"")
APIV1PackageCache.update_for_community(community_b, b"")
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
cache_a = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
cache_b = APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
assert cache_a.pk != cache_b.pk
assert cache_a.community == community_a
assert cache_b.community == community_b
APIV1PackageCache.update_for_community(community_a, b"")
cache_a2 = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
assert cache_a2.pk != cache_a.pk
cache_b.delete()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
@pytest.mark.django_db
def test_api_v1_packge_cache_update_for_community(community: Community) -> None:
content = b"this is a test message"
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
)
is None
)
latest = APIV1PackageCache.update_for_community(community, content=content)
assert latest.content_type == "application/json"
assert latest.content_encoding == "gzip"
assert latest.community.pk == community.pk
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
).pk
== latest.pk
)
with gzip.GzipFile(fileobj=latest.data, mode="r") as f:
result = f.read()
assert result == content
@pytest.mark.django_db
def test_api_v1_package_cache_drop_stale_cache(
freezer: FrozenDateTimeFactory, settings: Any
) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
start = timezone.now()
community_a = CommunityFactory()
community_b = CommunityFactory()
cache_a1 = APIV1PackageCache.update_for_community(community_a, b"")
cache_b1 = APIV1PackageCache.update_for_community(community_b, b"")
communityless_cache = APIV1PackageCache.update_for_community(community_a, b"")
communityless_cache.community = None
communityless_cache.save()
# B1 is within 1 hours of B2 so should not be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=30))
cache_b2 = APIV1PackageCache.update_for_community(community_b, b"")
cache_b2.last_modified = start + timedelta(minutes=30)
cache_b2.save()
# A1 is over 60 minutes older than A2 and should be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=61))
cache_a2 = APIV1PackageCache.update_for_community(community_a, b"")
cache_a2.last_modified = start + timedelta(minutes=61)
cache_a2.save()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 1
APIV1PackageCache.drop_stale_cache()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a1.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a2.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b1.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b2.pk).count() == 1
@pytest.mark.django_db
def test_api_v1_package_cache_drop_stale_cache_none(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
CommunityFactory() # Create a community without a community site
assert APIV1PackageCache.drop_stale_cache() is None # Ensure no crash
@pytest.mark.django_db
def test_api_v1_package_cache_delete_file_transactions_disabled(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
with pytest.raises(RuntimeError, match="Must not be called during a transaction"):
cache.delete_file()
@pytest.mark.django_db(transaction=True)
def test_api_v1_package_cache_delete_file_transactionless_allowed(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
cache.delete_file()
@pytest.mark.django_db
def METHOD_NAME(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete_file()
assert not storage.exists(name)
cache.refresh_from_db()
assert cache.is_deleted is True
assert bool(cache.data) is False
@pytest.mark.django_db
def test_api_v1_package_cache_delete(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete()
assert not storage.exists(name)
@pytest.mark.django_db
def test_api_v1_package_cache_queryset_delete_disallowed():
with pytest.raises(NotImplementedError, match="Delete is not supported for"):
APIV1PackageCache.objects.all().delete()
def test_api_v1_packge_cache_storage_is_stub_during_makemigrations(mocker):
mocker.patch("sys.argv", ["manage.py", "makemigrations"])
storage = get_cache_storage()
assert isinstance(storage, StubStorage)
def test_api_v1_packge_cache_storage_is_s3_during_run(mocker):
mocker.patch("sys.argv", ["manage.py", "runserver"])
storage = get_cache_storage()
assert isinstance(storage, S3Boto3Storage)
| null |
5,160 |
import os
import unittest
import uuid
from unittest.mock import PropertyMock, patch
import boto3
from backend.common.utils.aws import AwsSecret
from backend.common.utils.secret_config import SecretConfig
from tests.unit.backend.fixtures.environment_setup import EnvironmentSetup, fixture_file_path
from tests.unit.backend.fixtures.existing_aws_secret_test_fixture import ExistingAwsSecretTestFixture
class BogoComponentConfig(SecretConfig):
def __init__(self, *args, **kwargs):
super(BogoComponentConfig, self).__init__("bogo_component", **kwargs)
class TestSecretConfig(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# AwsSecret.debug_logging = True
# To reduce eventual consistency issues, get everyone using the same Secrets Manager session
cls.secrets_mgr = boto3.client("secretsmanager", endpoint_url=os.getenv("BOTO_ENDPOINT_URL") or None)
cls.patcher = patch("backend.common.utils.aws.boto3.client")
boto3_client = cls.patcher.start()
boto3_client.return_value = cls.secrets_mgr
@classmethod
def tearDownClass(cls):
AwsSecret.debug_logging = False
cls.patcher.stop()
super().tearDownClass()
def setUp(self):
super().setUp()
self.deployment_env = "bogo_env_{}".format(uuid.uuid4())
self.secret_name = f"corpora/bogo_component/{self.deployment_env}/secrets"
BogoComponentConfig.reset()
def test_from_file(self):
with EnvironmentSetup({"CONFIG_SOURCE": fixture_file_path("bogo_config.json")}):
config = BogoComponentConfig(deployment=self.deployment_env)
self.assertEqual("value_from_file", config.secret1)
def test_from_aws(self):
with ExistingAwsSecretTestFixture(
secret_name=self.secret_name, secret_value='{"secret1":"secret1_from_cloud"}'
), EnvironmentSetup({"CONFIG_SOURCE": None}):
config = BogoComponentConfig(deployment=self.deployment_env, source="aws")
self.assertEqual("secret1_from_cloud", config.secret1)
def test_custom_secret_name(self):
custom_secret_name = f"corpora/bogo_component/{self.deployment_env}/custom-secret-name"
with ExistingAwsSecretTestFixture(secret_name=custom_secret_name, secret_value='{"secret1":"custom"}'):
class BogoComponentCustomConfig(SecretConfig):
def __init__(self, *args, **kwargs):
super(BogoComponentCustomConfig, self).__init__(
"bogo_component", secret_name="custom-secret-name", **kwargs
)
config = BogoComponentCustomConfig(deployment=self.deployment_env, source="aws")
self.assertEqual("custom", config.secret1)
def test_singletonness(self):
with patch("backend.common.utils.aws.AwsSecret.value", new_callable=PropertyMock) as mock_aws_secret_value:
mock_aws_secret_value.return_value = '{"secret2": "foo"}'
config1 = BogoComponentConfig(deployment=self.deployment_env, source="aws")
self.assertEqual("foo", config1.secret2)
config2 = BogoComponentConfig(deployment=self.deployment_env, source="aws")
self.assertEqual("foo", config2.secret2)
mock_aws_secret_value.assert_called_once()
# TRUTH TABLE
# ITEM IS IN CONFIG | ITEM IS IN ENV | use_env IS SET | RESULT
# no | no | no | exception
# no | no | yes | exception
# no | yes | no | exception
# no | yes | yes | return env value
# yes | no | no | return config value
# yes | no | yes | return config value
# yes | yes | no | return config value
# yes | yes | no | return config value
# yes | yes | yes | return env value
def test_when_item_is_not_in_config_not_in_env_we_raise(self):
with EnvironmentSetup({"CONFIG_SOURCE": None}):
BogoComponentConfig.use_env = True
with self.assertRaises(RuntimeError):
config = BogoComponentConfig(deployment=self.deployment_env)
print(config.secret_that_we_never_put_into_config)
def test_when_item_is_not_in_config_but_is_in_env_and_use_env_is_not_set_we_raise(self):
with ExistingAwsSecretTestFixture(secret_name=self.secret_name, secret_value="{}"):
BogoComponentConfig.use_env = False
with EnvironmentSetup({"CONFIG_SOURCE": None, "SECRET1": "secret1_from_env"}), self.assertRaises(
RuntimeError
):
config = BogoComponentConfig(deployment=self.deployment_env)
print(config.secret1)
def METHOD_NAME(self):
with ExistingAwsSecretTestFixture(secret_name=self.secret_name, secret_value="{}"):
BogoComponentConfig.use_env = True
with EnvironmentSetup({"CONFIG_SOURCE": None, "SECRET1": "secret1_from_env"}):
config = BogoComponentConfig(deployment=self.deployment_env)
self.assertEqual("secret1_from_env", config.secret1)
def test_when_item_is_in_config_but_not_in_env_and_use_env_is_not_set_we_use_config(self):
with ExistingAwsSecretTestFixture(
secret_name=self.secret_name, secret_value='{"secret1":"secret1_from_cloud"}'
):
BogoComponentConfig.use_env = False
with EnvironmentSetup({"CONFIG_SOURCE": None}):
config = BogoComponentConfig(deployment=self.deployment_env)
self.assertEqual("secret1_from_cloud", config.secret1)
def test_when_item_is_in_config_but_not_in_env_and_use_env_is_set_we_use_config(self):
with ExistingAwsSecretTestFixture(
secret_name=self.secret_name, secret_value='{"secret1":"secret1_from_cloud"}'
):
BogoComponentConfig.use_env = True
with EnvironmentSetup({"CONFIG_SOURCE": None, "SECRET1": "secret1_from_env"}):
config = BogoComponentConfig(deployment=self.deployment_env)
self.assertEqual("secret1_from_env", config.secret1)
def test_when_item_is_in_config_and_is_in_env_and_use_env_is_set_we_use_env(self):
with ExistingAwsSecretTestFixture(
secret_name=self.secret_name, secret_value='{"secret1":"secret1_from_cloud"}'
):
BogoComponentConfig.use_env = True
with EnvironmentSetup({"CONFIG_SOURCE": None, "SECRET1": "secret1_from_env"}):
config = BogoComponentConfig(deployment=self.deployment_env)
self.assertEqual("secret1_from_env", config.secret1)
| null |
5,161 |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import argparse
import mindspore.context as context
import mindspore.dataset as ds
import mindspore.dataset.transforms as C
import mindspore.dataset.vision as CV
import mindspore.nn as nn
from mindspore.common import dtype as mstype
from mindspore.dataset.vision import Inter
from mindspore.train import Model, LossMonitor, Accuracy
from mindspore.common.initializer import TruncatedNormal
from mindspore.communication.management import init
parser = argparse.ArgumentParser(description='test_ps_lenet')
parser.add_argument("--device_target", type=str, default="GPU")
parser.add_argument("--dataset_path", type=str, default="/home/workspace/mindspore_dataset/mnist")
args, _ = parser.parse_known_args()
device_target = args.device_target
dataset_path = args.dataset_path
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
context.set_ps_context(enable_ps=True)
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def METHOD_NAME(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10, channel=1):
super(LeNet5, self).__init__()
self.num_class = num_class
self.conv1 = conv(channel, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = METHOD_NAME(16 * 5 * 5, 120)
self.fc2 = METHOD_NAME(120, 84)
self.fc3 = METHOD_NAME(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
def create_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
# apply DatasetOps
buffer_size = 10000
mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds
if __name__ == "__main__":
init()
network = LeNet5(10)
network.set_param_ps()
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
ds_train = create_dataset(os.path.join(dataset_path, "train"), 32, 1)
model.train(3, ds_train, callbacks=[LossMonitor()], dataset_sink_mode=False)
ds_eval = create_dataset(os.path.join(dataset_path, "test"), 32, 1)
acc = model.eval(ds_eval, dataset_sink_mode=False)
print("Accuracy:", acc['Accuracy'])
assert acc['Accuracy'] > 0.7
| null |
5,162 |
from fastapi import APIRouter, FastAPI
from fastapi.responses import HTMLResponse, JSONResponse, PlainTextResponse
from fastapi.testclient import TestClient
class OverrideResponse(JSONResponse):
media_type = "application/x-override"
app = FastAPI()
router_a = APIRouter()
router_a_a = APIRouter()
router_a_b_override = APIRouter() # Overrides default class
router_b_override = APIRouter() # Overrides default class
router_b_a = APIRouter()
router_b_a_c_override = APIRouter() # Overrides default class again
@app.get("/")
def get_root():
return {"msg": "Hello World"}
@app.get("/override", response_class=PlainTextResponse)
def get_path_override():
return "Hello World"
@router_a.get("/")
def get_a():
return {"msg": "Hello A"}
@router_a.get("/override", response_class=PlainTextResponse)
def get_a_path_override():
return "Hello A"
@router_a_a.get("/")
def get_a_a():
return {"msg": "Hello A A"}
@router_a_a.get("/override", response_class=PlainTextResponse)
def get_a_a_path_override():
return "Hello A A"
@router_a_b_override.get("/")
def get_a_b():
return "Hello A B"
@router_a_b_override.get("/override", response_class=HTMLResponse)
def get_a_b_path_override():
return "Hello A B"
@router_b_override.get("/")
def get_b():
return "Hello B"
@router_b_override.get("/override", response_class=HTMLResponse)
def get_b_path_override():
return "Hello B"
@router_b_a.get("/")
def get_b_a():
return "Hello B A"
@router_b_a.get("/override", response_class=HTMLResponse)
def get_b_a_path_override():
return "Hello B A"
@router_b_a_c_override.get("/")
def get_b_a_c():
return "Hello B A C"
@router_b_a_c_override.get("/override", response_class=OverrideResponse)
def get_b_a_c_path_override():
return {"msg": "Hello B A C"}
router_b_a.include_router(
router_b_a_c_override, prefix="/c", default_response_class=HTMLResponse
)
router_b_override.include_router(router_b_a, prefix="/a")
router_a.include_router(router_a_a, prefix="/a")
router_a.include_router(
router_a_b_override, prefix="/b", default_response_class=PlainTextResponse
)
app.include_router(router_a, prefix="/a")
app.include_router(
router_b_override, prefix="/b", default_response_class=PlainTextResponse
)
client = TestClient(app)
json_type = "application/json"
text_type = "text/plain; charset=utf-8"
html_type = "text/html; charset=utf-8"
override_type = "application/x-override"
def test_app():
with client:
response = client.get("/")
assert response.json() == {"msg": "Hello World"}
assert response.headers["content-type"] == json_type
def test_app_override():
with client:
response = client.get("/override")
assert response.content == b"Hello World"
assert response.headers["content-type"] == text_type
def test_router_a():
with client:
response = client.get("/a")
assert response.json() == {"msg": "Hello A"}
assert response.headers["content-type"] == json_type
def test_router_a_override():
with client:
response = client.get("/a/override")
assert response.content == b"Hello A"
assert response.headers["content-type"] == text_type
def test_router_a_a():
with client:
response = client.get("/a/a")
assert response.json() == {"msg": "Hello A A"}
assert response.headers["content-type"] == json_type
def test_router_a_a_override():
with client:
response = client.get("/a/a/override")
assert response.content == b"Hello A A"
assert response.headers["content-type"] == text_type
def test_router_a_b():
with client:
response = client.get("/a/b")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == text_type
def test_router_a_b_override():
with client:
response = client.get("/a/b/override")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == html_type
def test_router_b():
with client:
response = client.get("/b")
assert response.content == b"Hello B"
assert response.headers["content-type"] == text_type
def test_router_b_override():
with client:
response = client.get("/b/override")
assert response.content == b"Hello B"
assert response.headers["content-type"] == html_type
def test_router_b_a():
with client:
response = client.get("/b/a")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == text_type
def test_router_b_a_override():
with client:
response = client.get("/b/a/override")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == html_type
def test_router_b_a_c():
with client:
response = client.get("/b/a/c")
assert response.content == b"Hello B A C"
assert response.headers["content-type"] == html_type
def METHOD_NAME():
with client:
response = client.get("/b/a/c/override")
assert response.json() == {"msg": "Hello B A C"}
assert response.headers["content-type"] == override_type
| null |
5,163 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for depthwise conv2d op fusing."""
import os
import shutil
import tempfile
import tensorflow.compat.v2 as tf
from tensorflowjs.converters import fuse_depthwise_conv2d
from tensorflowjs.converters import graph_rewrite_util
from tensorflowjs.converters import tf_saved_model_conversion_v2
class FuseDepthwiseConv2dTest(tf.test.TestCase):
def setUp(self):
super(FuseDepthwiseConv2dTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(FuseDepthwiseConv2dTest, self).tearDown()
def testFuseDepthwiseConv2dNativeWithBias(self):
layers = [
tf.keras.layers.DepthwiseConv2D(
1, bias_initializer=tf.initializers.constant(0.25))
]
model = tf.keras.Sequential(layers)
tf.keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
def execute_model(tensor):
return model(tensor)
graph = tf_saved_model_conversion_v2._freeze_saved_model_v2(
execute_model.get_concrete_function(input_tensor))
graph_def = graph.as_graph_def()
optimized_graph_def = fuse_depthwise_conv2d.fuse_depthwise_conv2d(graph_def)
depthwise_conv2d_count = 0
depthwise_conv2d = None
for node in optimized_graph_def.node:
self.assertNotEqual("BiasAdd", node.op)
self.assertNotEqual("DepthwiseConv2dNative", node.op)
if node.op == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
depthwise_conv2d_count += 1
depthwise_conv2d = node
self.assertEqual(depthwise_conv2d_count, 1)
self.assertEqual(depthwise_conv2d.attr['fused_ops'].list.s, [b'BiasAdd'])
self.assertEqual(depthwise_conv2d.attr['num_args'].i, 1)
def testFuseDepthwiseConv2dNativeWithBiasAndActivation(self):
layers = [
tf.keras.layers.DepthwiseConv2D(
1, bias_initializer=tf.initializers.constant(0.25)),
tf.keras.layers.ReLU()
]
model = tf.keras.Sequential(layers)
tf.keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
def execute_model(tensor):
return model(tensor)
graph = tf_saved_model_conversion_v2._freeze_saved_model_v2(
execute_model.get_concrete_function(input_tensor))
graph_def = graph.as_graph_def()
optimized_graph_def = fuse_depthwise_conv2d.fuse_depthwise_conv2d(graph_def)
depthwise_conv2d_count = 0
depthwise_conv2d = None
for node in optimized_graph_def.node:
self.assertNotEqual("BiasAdd", node.op)
self.assertNotEqual("DepthwiseConv2dNative", node.op)
self.assertNotEqual("Relu", node.op)
if node.op == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
depthwise_conv2d_count += 1
depthwise_conv2d = node
self.assertEqual(depthwise_conv2d_count, 1)
self.assertEqual(
depthwise_conv2d.attr['fused_ops'].list.s, [b'BiasAdd', b'Relu'])
self.assertEqual(depthwise_conv2d.attr['num_args'].i, 1)
def METHOD_NAME(self):
layers = [
tf.keras.layers.DepthwiseConv2D(1, use_bias=False),
tf.keras.layers.ReLU()
]
model = tf.keras.Sequential(layers)
tf.keras.backend.set_learning_phase(0)
input_tensor = tf.constant([1.0, 1.0], shape=[1, 1, 1, 2])
@tf.function
def execute_model(tensor):
return model(tensor)
graph = tf_saved_model_conversion_v2._freeze_saved_model_v2(
execute_model.get_concrete_function(input_tensor))
graph_def = graph.as_graph_def()
optimized_graph_def = fuse_depthwise_conv2d.fuse_depthwise_conv2d(graph_def)
depthwise_conv2d_count = 0
depthwise_conv2d = None
for node in optimized_graph_def.node:
self.assertNotEqual("BiasAdd", node.op)
self.assertNotEqual("DepthwiseConv2dNative", node.op)
self.assertNotEqual("Relu", node.op)
if node.op == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
depthwise_conv2d_count += 1
depthwise_conv2d = node
self.assertEqual(depthwise_conv2d_count, 1)
self.assertEqual(
depthwise_conv2d.attr['fused_ops'].list.s, [b'NoOp', b'Relu'])
self.assertEqual(depthwise_conv2d.attr['num_args'].i, 0)
if __name__ == '__main__':
tf.test.main()
| null |
5,164 |
class Pagination:
def __init__(self, record, pagination_config):
#: the pagination config
self.config = pagination_config
#: the current page's record
self.current = record
#: the current page number (1 indexed)
self.page = record.page_num
#: the number of items to be displayed on a page.
self.per_page = pagination_config.per_page
#: the total number of items matching the query
self.total = pagination_config.count_total_items(record)
@property
def METHOD_NAME(self):
"""The children for this page."""
return self.config.slice_query_for_page(self.current, self.page)
@property
def pages(self):
"""The total number of pages."""
pages = (self.total + self.per_page - 1) // self.per_page
# Even when there are no children, we want at least one page
return max(pages, 1)
@property
def prev_num(self):
"""The page number of the previous page."""
if self.page > 1:
return self.page - 1
return None
@property
def has_prev(self):
"""True if a previous page exists."""
return self.page > 1
@property
def prev(self):
"""The record for the previous page."""
if not self.has_prev:
return None
return self.config.get_record_for_page(self.current, self.page - 1)
@property
def has_next(self):
"""True if a following page exists."""
return self.page < self.pages
@property
def next_num(self):
"""The page number of the following page."""
if self.page < self.pages:
return self.page + 1
return None
@property
def next(self):
"""The record for the following page."""
if not self.has_next:
return None
return self.config.get_record_for_page(self.current, self.page + 1)
def for_page(self, page):
"""Returns the record for a specific page."""
if 1 <= page <= self.pages:
return self.config.get_record_for_page(self.current, page)
return None
def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2):
"""Iterate over the page numbers in the pagination, with elision.
In the general case, this returns the concatenation of three ranges:
1. A range (always starting at page one) at the beginning
of the page number sequence. The length of the this
range is specified by the ``left_edge`` argument (which
may be zero).
2. A range around the current page. This range will
include ``left_current`` pages before, and
``right_current`` pages after the current page. This
range always includes the current page.
3. Finally, a range (always ending at the last page) at
the end of the page sequence. The length of this range
is specified by the ``right_edge`` argument.
If any of these ranges overlap, they will be merged. A
``None`` will be inserted between non-overlapping ranges to
signify that pages have been elided.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>...</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
# pylint: disable=chained-comparison
if (
num <= left_edge
or (
num >= self.page - left_current and num <= self.page + right_current
)
or num > self.pages - right_edge
):
if last + 1 != num:
yield None
yield num
last = num
if last != self.pages:
yield None
| null |
5,165 |
# Copyright (C) 2011 Canonical Ltd.
#
# Author: Scott Moser <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
"""Power State Change: Change power state"""
import errno
import logging
import os
import re
import subprocess
import time
from textwrap import dedent
from cloudinit import subp, util
from cloudinit.cloud import Cloud
from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
EXIT_FAIL = 254
MODULE_DESCRIPTION = """\
This module handles shutdown/reboot after all config modules have been run. By
default it will take no action, and the system will keep running unless a
package installation/upgrade requires a system reboot (e.g. installing a new
kernel) and ``package_reboot_if_required`` is true.
Using this module ensures that cloud-init is entirely finished with
modules that would be executed.
An example to distinguish delay from timeout:
If you delay 5 (5 minutes) and have a timeout of
120 (2 minutes), then the max time until shutdown will be 7 minutes, though
it could be as soon as 5 minutes. Cloud-init will invoke 'shutdown +5' after
the process finishes, or when 'timeout' seconds have elapsed.
.. note::
With Alpine Linux any message value specified is ignored as Alpine's halt,
poweroff, and reboot commands do not support broadcasting a message.
"""
meta: MetaSchema = {
"id": "cc_power_state_change",
"name": "Power State Change",
"title": "Change power state",
"description": MODULE_DESCRIPTION,
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [
dedent(
"""\
power_state:
delay: now
mode: poweroff
message: Powering off
timeout: 2
condition: true
"""
),
dedent(
"""\
power_state:
delay: 30
mode: reboot
message: Rebooting machine
condition: test -f /var/tmp/reboot_me
"""
),
],
"activate_by_schema_keys": ["power_state"],
}
__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
def givecmdline(pid):
# Returns the cmdline for the given process id. In Linux we can use procfs
# for this but on BSD there is /usr/bin/procstat.
try:
# Example output from procstat -c 1
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
(output, _err) = subp.subp(["procstat", "-c", str(pid)])
line = output.splitlines()[1]
m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
except IOError:
return None
def METHOD_NAME(cond):
if isinstance(cond, bool):
LOG.debug("Static Condition: %s", cond)
return cond
pre = "check_condition command (%s): " % cond
try:
proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
proc.communicate()
ret = proc.returncode
if ret == 0:
LOG.debug("%sexited 0. condition met.", pre)
return True
elif ret == 1:
LOG.debug("%sexited 1. condition not met.", pre)
return False
else:
LOG.warning("%sunexpected exit %s. do not apply change.", pre, ret)
return False
except Exception as e:
LOG.warning("%sUnexpected error: %s", pre, e)
return False
def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:
try:
(args, timeout, condition) = load_power_state(cfg, cloud.distro)
if args is None:
LOG.debug("no power_state provided. doing nothing")
return
except Exception as e:
LOG.warning("%s Not performing power state change!", str(e))
return
if condition is False:
LOG.debug("Condition was false. Will not perform state change.")
return
mypid = os.getpid()
cmdline = givecmdline(mypid)
if not cmdline:
LOG.warning("power_state: failed to get cmdline of current process")
return
devnull_fp = open(os.devnull, "w")
LOG.debug("After pid %s ends, will execute: %s", mypid, " ".join(args))
util.fork_cb(
run_after_pid_gone,
mypid,
cmdline,
timeout,
condition,
execmd,
[args, devnull_fp],
)
def load_power_state(cfg, distro):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
pstate = cfg.get("power_state")
if pstate is None:
return (None, None, None)
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
modes_ok = ["halt", "poweroff", "reboot"]
mode = pstate.get("mode")
if mode not in distro.shutdown_options_map:
raise TypeError(
"power_state[mode] required, must be one of: %s. found: '%s'."
% (",".join(modes_ok), mode)
)
args = distro.shutdown_command(
mode=mode,
delay=pstate.get("delay", "now"),
message=pstate.get("message"),
)
try:
timeout = float(pstate.get("timeout", 30.0))
except ValueError as e:
raise ValueError(
"failed to convert timeout '%s' to float." % pstate["timeout"]
) from e
condition = pstate.get("condition", True)
if not isinstance(condition, (str, list, bool)):
raise TypeError("condition type %s invalid. must be list, bool, str")
return (args, timeout, condition)
def doexit(sysexit):
os._exit(sysexit)
def execmd(exe_args, output=None, data_in=None):
ret = 1
try:
proc = subprocess.Popen(
exe_args,
stdin=subprocess.PIPE,
stdout=output,
stderr=subprocess.STDOUT,
)
proc.communicate(data_in)
ret = proc.returncode
except Exception:
doexit(EXIT_FAIL)
doexit(ret)
def run_after_pid_gone(pid, pidcmdline, timeout, condition, func, args):
# wait until pid, with /proc/pid/cmdline contents of pidcmdline
# is no longer alive. After it is gone, or timeout has passed
# execute func(args)
msg = None
end_time = time.time() + timeout
def fatal(msg):
LOG.warning(msg)
doexit(EXIT_FAIL)
known_errnos = (errno.ENOENT, errno.ESRCH)
while True:
if time.time() > end_time:
msg = "timeout reached before %s ended" % pid
break
try:
cmdline = givecmdline(pid)
if cmdline != pidcmdline:
msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
break
except IOError as ioerr:
if ioerr.errno in known_errnos:
msg = "pidfile gone [%d]" % ioerr.errno
else:
fatal("IOError during wait: %s" % ioerr)
break
except Exception as e:
fatal("Unexpected Exception: %s" % e)
time.sleep(0.25)
if not msg:
fatal("Unexpected error in run_after_pid_gone")
LOG.debug(msg)
try:
if not METHOD_NAME(condition):
return
except Exception as e:
fatal("Unexpected Exception when checking condition: %s" % e)
func(*args)
# vi: ts=4 expandtab
| null |
5,166 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
import unittest
from mantid.simpleapi import *
from mantid.api import MatrixWorkspace, WorkspaceGroup
from mantid import config
class IndirectILLEnergyTransferTest(unittest.TestCase):
_runs = dict(
[
("one_wing_QENS", "090661"),
("one_wing_EFWS", "083072"),
("one_wing_IFWS", "083073"),
("two_wing_QENS", "136558-136559"),
("two_wing_EFWS", "143720"),
("two_wing_IFWS", "170300"),
("bats", "215962"),
("3_single_dets", "318724"),
]
)
# cache the def instrument and data search dirs
_def_fac = config["default.facility"]
_def_inst = config["default.instrument"]
_data_dirs = config["datasearch.directories"]
def setUp(self):
# set instrument and append datasearch directory
config["default.facility"] = "ILL"
config["default.instrument"] = "IN16B"
config.appendDataSearchSubDir("ILL/IN16B/")
def tearDown(self):
# set cached facility and datasearch directory
config["default.facility"] = self._def_fac
config["default.instrument"] = self._def_inst
config["datasearch.directories"] = self._data_dirs
def test_complete_options(self):
# Tests for map file, no verbose, multiple runs, and crop dead channels for two wing QENS data
# manually get name of grouping file from parameter file
idf = os.path.join(config["instrumentDefinition.directory"], "IN16B_Definition.xml")
ipf = os.path.join(config["instrumentDefinition.directory"], "IN16B_Parameters.xml")
ws = LoadEmptyInstrument(Filename=idf)
LoadParameterFile(ws, Filename=ipf)
instrument = ws.getInstrument()
grouping_filename = instrument.getStringParameter("Workflow.GroupingFile")[0]
DeleteWorkspace(ws)
args = {
"Run": self._runs["two_wing_QENS"],
"MapFile": os.path.join(config["groupingFiles.directory"], grouping_filename),
"CropDeadMonitorChannels": True,
"OutputWorkspace": "red",
}
IndirectILLEnergyTransfer(**args)
self._check_workspace_group(mtd["red"], 2, 18, 1017)
deltaE = mtd["red"][0].readX(0)
bsize = mtd["red"][0].blocksize()
self.assertAlmostEqual(deltaE[bsize // 2], 0, 4)
self.assertTrue(deltaE[-1] > -deltaE[0])
def test_one_wing_QENS(self):
# tests one wing QENS with PSD range
args = {"Run": self._runs["one_wing_QENS"], "ManualPSDIntegrationRange": [20, 100], "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 18, 1024)
deltaE = res[0].readX(0)
bsize = res[0].blocksize()
self.assertEqual(deltaE[bsize // 2], 0)
self.assertTrue(deltaE[-1] > -deltaE[0])
def test_one_wing_EFWS(self):
args = {"Run": self._runs["one_wing_EFWS"], "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 18, 256)
def test_one_wing_IFWS(self):
args = {"Run": self._runs["one_wing_IFWS"], "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 18, 256)
def test_two_wing_EFWS(self):
args = {"Run": self._runs["two_wing_EFWS"], "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 2, 18, 8)
def test_two_wing_IFWS(self):
args = {"Run": self._runs["two_wing_IFWS"], "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 2, 18, 512)
def test_spectrum_axis(self):
args = {"Run": self._runs["one_wing_EFWS"], "SpectrumAxis": "2Theta", "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self.assertTrue(res.getItem(0).getAxis(1).getUnit().unitID(), "Theta")
def METHOD_NAME(self):
args = {"Run": self._runs["bats"], "PulseChopper": "34", "GroupDetectors": False, "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 2050, 1121)
def test_bats_monitor(self):
args = {
"Run": self._runs["bats"],
"PulseChopper": "34",
"GroupDetectors": False,
"DeleteMonitorWorkspace": False,
"OutputWorkspace": "res",
}
res = IndirectILLEnergyTransfer(**args)
mon_ws = "res_215962_mon"
self.assertTrue(mtd.doesExist(mon_ws))
self.assertTrue(mtd[mon_ws])
self.assertTrue(isinstance(mtd[mon_ws], MatrixWorkspace))
self.assertEqual(mtd[mon_ws].getAxis(0).getUnit().unitID(), "DeltaE")
self._check_workspace_group(res, 1, 2050, 1121)
def test_bats_grouped(self):
args = {"Run": self._runs["bats"], "PulseChopper": "34", "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 18, 1121)
def test_psd_tubes_only(self):
args = {"Run": self._runs["one_wing_QENS"], "DiscardSingleDetectors": True, "OutputWorkspace": "res"}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 16, 1024)
def test_3_sd(self):
args = {
"Run": self._runs["3_single_dets"],
"DiscardSingleDetectors": False,
"GroupDetectors": False,
"OutputWorkspace": "res",
}
res = IndirectILLEnergyTransfer(**args)
self._check_workspace_group(res, 1, 2051, 984)
def test_equatorial_fit(self):
args = {
"Run": self._runs["3_single_dets"],
"OutputWorkspace": "res",
"DiscardSingleDetectors": False,
"GroupDetectors": False,
"ElasticPeakFitting": "FitEquatorialOnly",
"OutputElasticChannelWorkspace": "out_epp_ws",
}
IndirectILLEnergyTransfer(**args)
self._check_workspace_group(mtd["res"], 1, 2051, 984)
epp_ws = mtd["out_epp_ws"]
self.assertEqual(epp_ws.rowCount(), 4)
def test_fit_all(self):
args = {
"Run": self._runs["3_single_dets"],
"OutputWorkspace": "res",
"DiscardSingleDetectors": False,
"GroupDetectors": False,
"ElasticPeakFitting": "FitAllPixelGroups",
"OutputElasticChannelWorkspace": "out_epp_ws",
}
IndirectILLEnergyTransfer(**args)
self._check_workspace_group(mtd["res"], 1, 2051, 984)
epp_ws = mtd["out_epp_ws"]
self.assertEqual(epp_ws.rowCount(), 516)
def _check_workspace_group(self, wsgroup, nentries, nspectra, nbins):
self.assertTrue(isinstance(wsgroup, WorkspaceGroup))
self.assertEqual(wsgroup.getNumberOfEntries(), nentries)
item = wsgroup.getItem(0)
self.assertTrue(isinstance(item, MatrixWorkspace))
self.assertEqual(item.getAxis(0).getUnit().unitID(), "DeltaE")
self.assertEqual(item.getNumberHistograms(), nspectra)
self.assertEqual(item.blocksize(), nbins)
self.assertTrue(item.getSampleDetails())
self.assertTrue(item.getHistory().lastAlgorithm())
if __name__ == "__main__":
unittest.main()
| null |
5,167 |
"""sqlite3 CLI tests."""
import sqlite3 as sqlite
import subprocess
import sys
import unittest
from test.support import SHORT_TIMEOUT#, requires_subprocess
from test.support.os_helper import TESTFN, unlink
# TODO: RUSTPYTHON
#@requires_subprocess()
class CommandLineInterface(unittest.TestCase):
def _do_test(self, *args, expect_success=True):
with subprocess.Popen(
[sys.executable, "-Xutf8", "-m", "sqlite3", *args],
encoding="utf-8",
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
proc.wait()
if expect_success == bool(proc.returncode):
self.fail("".join(proc.stderr))
stdout = proc.stdout.read()
stderr = proc.stderr.read()
if expect_success:
self.assertEqual(stderr, "")
else:
self.assertEqual(stdout, "")
return stdout, stderr
def expect_success(self, *args):
out, _ = self._do_test(*args)
return out
def expect_failure(self, *args):
_, err = self._do_test(*args, expect_success=False)
return err
def test_cli_help(self):
out = self.expect_success("-h")
self.assertIn("usage: python -m sqlite3", out)
def test_cli_version(self):
out = self.expect_success("-v")
self.assertIn(sqlite.sqlite_version, out)
def test_cli_execute_sql(self):
out = self.expect_success(":memory:", "select 1")
self.assertIn("(1,)", out)
def test_cli_execute_too_much_sql(self):
stderr = self.expect_failure(":memory:", "select 1; select 2")
err = "ProgrammingError: You can only execute one statement at a time"
self.assertIn(err, stderr)
def test_cli_execute_incomplete_sql(self):
stderr = self.expect_failure(":memory:", "sel")
self.assertIn("OperationalError (SQLITE_ERROR)", stderr)
def test_cli_on_disk_db(self):
self.addCleanup(unlink, TESTFN)
out = self.expect_success(TESTFN, "create table t(t)")
self.assertEqual(out, "")
out = self.expect_success(TESTFN, "select count(t) from t")
self.assertIn("(0,)", out)
# TODO: RUSTPYTHON
#@requires_subprocess()
class InteractiveSession(unittest.TestCase):
TIMEOUT = SHORT_TIMEOUT / 10.
MEMORY_DB_MSG = "Connected to a transient in-memory database"
PS1 = "sqlite> "
PS2 = "... "
def start_cli(self, *args):
return subprocess.Popen(
[sys.executable, "-Xutf8", "-m", "sqlite3", *args],
encoding="utf-8",
bufsize=0,
stdin=subprocess.PIPE,
# Note: the banner is printed to stderr, the prompt to stdout.
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def expect_success(self, proc):
proc.wait()
if proc.returncode:
self.fail("".join(proc.stderr))
def test_interact(self):
with self.start_cli() as proc:
out, err = proc.communicate(timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn(self.PS1, out)
self.expect_success(proc)
def test_interact_quit(self):
with self.start_cli() as proc:
out, err = proc.communicate(input=".quit", timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn(self.PS1, out)
self.expect_success(proc)
def test_interact_version(self):
with self.start_cli() as proc:
out, err = proc.communicate(input=".version", timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn(sqlite.sqlite_version, out)
self.expect_success(proc)
def test_interact_valid_sql(self):
with self.start_cli() as proc:
out, err = proc.communicate(input="select 1;",
timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn("(1,)", out)
self.expect_success(proc)
def test_interact_valid_multiline_sql(self):
with self.start_cli() as proc:
out, err = proc.communicate(input="select 1\n;",
timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn(self.PS2, out)
self.assertIn("(1,)", out)
self.expect_success(proc)
def test_interact_invalid_sql(self):
with self.start_cli() as proc:
out, err = proc.communicate(input="sel;", timeout=self.TIMEOUT)
self.assertIn(self.MEMORY_DB_MSG, err)
self.assertIn("OperationalError (SQLITE_ERROR)", err)
self.expect_success(proc)
def METHOD_NAME(self):
self.addCleanup(unlink, TESTFN)
with self.start_cli(TESTFN) as proc:
out, err = proc.communicate(input="create table t(t);",
timeout=self.TIMEOUT)
self.assertIn(TESTFN, err)
self.assertIn(self.PS1, out)
self.expect_success(proc)
with self.start_cli(TESTFN, "select count(t) from t") as proc:
out = proc.stdout.read()
err = proc.stderr.read()
self.assertIn("(0,)", out)
self.expect_success(proc)
if __name__ == "__main__":
unittest.main()
| null |
5,168 |
import http
from typing import FrozenSet, Optional
from fastapi import FastAPI, Path, Query
app = FastAPI()
@app.api_route("/api_route")
def non_operation():
return {"message": "Hello World"}
def non_decorated_route():
return {"message": "Hello World"}
app.add_api_route("/non_decorated_route", non_decorated_route)
@app.get("/text")
def get_text():
return "Hello World"
@app.get("/path/{item_id}")
def get_id(item_id):
return item_id
@app.get("/path/str/{item_id}")
def get_str_id(item_id: str):
return item_id
@app.get("/path/int/{item_id}")
def METHOD_NAME(item_id: int):
return item_id
@app.get("/path/float/{item_id}")
def get_float_id(item_id: float):
return item_id
@app.get("/path/bool/{item_id}")
def get_bool_id(item_id: bool):
return item_id
@app.get("/path/param/{item_id}")
def get_path_param_id(item_id: Optional[str] = Path()):
return item_id
@app.get("/path/param-minlength/{item_id}")
def get_path_param_min_length(item_id: str = Path(min_length=3)):
return item_id
@app.get("/path/param-maxlength/{item_id}")
def get_path_param_max_length(item_id: str = Path(max_length=3)):
return item_id
@app.get("/path/param-min_maxlength/{item_id}")
def get_path_param_min_max_length(item_id: str = Path(max_length=3, min_length=2)):
return item_id
@app.get("/path/param-gt/{item_id}")
def get_path_param_gt(item_id: float = Path(gt=3)):
return item_id
@app.get("/path/param-gt0/{item_id}")
def get_path_param_gt0(item_id: float = Path(gt=0)):
return item_id
@app.get("/path/param-ge/{item_id}")
def get_path_param_ge(item_id: float = Path(ge=3)):
return item_id
@app.get("/path/param-lt/{item_id}")
def get_path_param_lt(item_id: float = Path(lt=3)):
return item_id
@app.get("/path/param-lt0/{item_id}")
def get_path_param_lt0(item_id: float = Path(lt=0)):
return item_id
@app.get("/path/param-le/{item_id}")
def get_path_param_le(item_id: float = Path(le=3)):
return item_id
@app.get("/path/param-lt-gt/{item_id}")
def get_path_param_lt_gt(item_id: float = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge/{item_id}")
def get_path_param_le_ge(item_id: float = Path(le=3, ge=1)):
return item_id
@app.get("/path/param-lt-int/{item_id}")
def get_path_param_lt_int(item_id: int = Path(lt=3)):
return item_id
@app.get("/path/param-gt-int/{item_id}")
def get_path_param_gt_int(item_id: int = Path(gt=3)):
return item_id
@app.get("/path/param-le-int/{item_id}")
def get_path_param_le_int(item_id: int = Path(le=3)):
return item_id
@app.get("/path/param-ge-int/{item_id}")
def get_path_param_ge_int(item_id: int = Path(ge=3)):
return item_id
@app.get("/path/param-lt-gt-int/{item_id}")
def get_path_param_lt_gt_int(item_id: int = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge-int/{item_id}")
def get_path_param_le_ge_int(item_id: int = Path(le=3, ge=1)):
return item_id
@app.get("/query")
def get_query(query):
return f"foo bar {query}"
@app.get("/query/optional")
def get_query_optional(query=None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int")
def get_query_type(query: int):
return f"foo bar {query}"
@app.get("/query/int/optional")
def get_query_type_optional(query: Optional[int] = None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int/default")
def get_query_type_int_default(query: int = 10):
return f"foo bar {query}"
@app.get("/query/param")
def get_query_param(query=Query(default=None)):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/param-required")
def get_query_param_required(query=Query()):
return f"foo bar {query}"
@app.get("/query/param-required/int")
def get_query_param_required_type(query: int = Query()):
return f"foo bar {query}"
@app.get("/enum-status-code", status_code=http.HTTPStatus.CREATED)
def get_enum_status_code():
return "foo bar"
@app.get("/query/frozenset")
def get_query_type_frozenset(query: FrozenSet[int] = Query(...)):
return ",".join(map(str, sorted(query)))
| null |
5,169 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore import Tensor, context
from mindspore.nn import TrainOneStepCell, Adam
from tests.ut.python.ops.test_math_ops import VirtualLoss
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
grad_all = C.GradOperation(get_all=True)
@pytest.fixture(name="test_context")
def _test_context():
context.set_context(mode=context.GRAPH_MODE)
yield
context.reset_auto_parallel_context()
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, z):
return grad_all(self.network)(x, y, z)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, z):
predict = self.network(x, y, z)
return self.loss(predict)
class Net(nn.Cell):
def __init__(self, shape, field_size=10, slice_mode=nn.EmbeddingLookup.BATCH_SLICE, target="Device",
operator='SUM'):
super().__init__()
self.embedding = nn.MultiFieldEmbeddingLookup(vocab_size=32, embedding_size=64, target=target,
field_size=field_size, slice_mode=slice_mode, operator=operator)
self.reshape = P.Reshape()
self.batch_size = shape[0]
def construct(self, x, y, z):
out = self.embedding(x, y, z)
out = self.reshape(out, (self.batch_size, -1))
return out
def compile_net(net, shape):
x = Tensor(np.ones(shape), dtype=ms.int32)
y = Tensor(np.ones(shape), dtype=ms.float32)
z = Tensor(np.ones(shape), dtype=ms.int32)
optimizer = Adam(net.trainable_params(), learning_rate=0.1)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_cell_graph_executor.compile(train_net, x, y, z)
context.reset_auto_parallel_context()
def test_embeddinglookup_batch_parallel_sum(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=10, target='DEVICE'))
compile_net(net, shape)
def METHOD_NAME(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=9, slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, target='DEVICE'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_sum(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=10, slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, target='DEVICE'))
compile_net(net, shape)
def test_embeddinglookup_batch_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=1, target='DEVICE', operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_row_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_batch_parallel_max(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', operator='MAX'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_max(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, operator='MAX'))
compile_net(net, shape)
def test_embeddinglookup_row_parallel_max(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, operator='MAX'))
compile_net(net, shape)
| null |
5,170 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test distribute predict """
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, Model
from mindspore.ops import operations as P
from mindspore import context
from mindspore.parallel._utils import _infer_rank_list
class Net(nn.Cell):
"""Net definition"""
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Dense(128, 768, activation='relu')
self.fc2 = nn.Dense(128, 768, activation='relu')
self.fc3 = nn.Dense(128, 768, activation='relu')
self.fc4 = nn.Dense(768, 768, activation='relu')
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def METHOD_NAME(self, x):
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
def test_distribute_predict():
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, full_batch=True,
enable_parallel_optimizer=True)
inputs = Tensor(np.ones([32, 128]).astype(np.float32))
net = Net()
model = Model(net)
predict_map = model.infer_predict_layout(inputs)
output = model.predict(inputs)
context.reset_auto_parallel_context()
return predict_map, output
def test_edge_case():
context.set_context(mode=context.GRAPH_MODE)
inputs = Tensor(np.ones([32, 48]).astype(np.float32))
net = Net()
model = Model(net)
with pytest.raises(RuntimeError):
model.infer_predict_layout(inputs)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
with pytest.raises(ValueError):
model.infer_predict_layout(inputs)
# standalone predict
def test_infer_rank_list1():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = None
rank_list = _infer_rank_list(train_map, predict_map)["weight"]
assert list(rank_list[0]) == [0, 1, 2, 3, 4, 5, 6, 7]
assert rank_list[1] is False
# similar layout: gpt3 prediction mode
def test_infer_rank_list2():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[8], [-1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], True)}
assert rank_list == expect_map
# same layout
def test_infer_rank_list3():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[4, 8], [-1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], True)}
assert rank_list == expect_map
# totally different layout
def test_infer_rank_list4():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[2, 2], [1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)["weight"]
assert list(rank_list[0]) == [0, 1, 2, 3, 4, 5, 6, 7]
assert rank_list[1] is False
# full shape ckpt
def test_infer_rank_list5():
train_map = {'weight': [[8], [-1, -1]]}
predict_map = {'weight': [[2, 2], [1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], False)}
assert rank_list == expect_map
| null |
5,171 |
from django.template import Template, Context
from django.utils.translation import gettext_lazy
from devilry.apps.core import models as core_models
class UserInfo(object):
def __init__(self, groupuserlookup, user):
self.groupuserlookup = groupuserlookup
self.user = user
@property
def candidate(self):
if not hasattr(self, '_candidate'):
try:
self._candidate = core_models.Candidate.objects.get(
assignment_group=self.groupuserlookup.group,
relatedstudent__user=self.user)
except core_models.Candidate.DoesNotExist:
self._candidate = None
return self._candidate
@property
def relatedexaminer(self):
if not hasattr(self, '_relatedexaminer'):
try:
self._relatedexaminer = core_models.RelatedExaminer.objects.get(
period_id=self.groupuserlookup.assignment.parentnode_id,
user=self.user)
except core_models.RelatedExaminer.DoesNotExist:
self._relatedexaminer = None
return self._relatedexaminer
@property
def relatedstudent(self):
if not hasattr(self, '_relatedstudent'):
try:
self._relatedstudent = core_models.RelatedStudent.objects.get(
period_id=self.groupuserlookup.assignment.parentnode_id,
user=self.user)
except core_models.RelatedStudent.DoesNotExist:
self._relatedstudent = None
return self._relatedstudent
def _render_template(self, templatestring, **contextdata):
return Template(templatestring).render(Context(contextdata))
def _render_span(self, cssclass, content):
return self._render_template("""
<span class="{{ cssclass }}">
{{ content }}
</span>
""", cssclass=cssclass, content=content)
def get_unanonymized_long_name_from_user(self, user, html=False):
if user is None:
fallback = gettext_lazy('Deleted user')
if html:
return self._render_span(cssclass='text-danger', content=fallback)
else:
return fallback
if html:
return self._render_template('{% load devilry_account_tags %}{% devilry_user_verbose_inline user %}', user=user)
else:
return user.get_displayname()
def METHOD_NAME(self, user, html=False):
if user is None:
fallback = gettext_lazy('Deleted user')
if html:
return self._render_span(cssclass='text-danger', content=fallback)
else:
return fallback
return user.get_short_name()
def __get_anonymized_name_from_user(self, user, user_role):
if user_role == 'student':
if self.groupuserlookup.assignment.uses_custom_candidate_ids:
return self.candidate.get_anonymous_name(assignment=self.groupuserlookup.assignment)
elif self.relatedstudent:
return self.relatedstudent.get_anonymous_name()
elif user_role == 'examiner':
if self.relatedexaminer:
return self.relatedexaminer.get_anonymous_name()
else:
raise ValueError('Can only call __get_anonymized_name_from_user '
'with user_role "examiner" or "student".')
return gettext_lazy('User removed from semester')
def get_anonymized_name_from_user(self, user, user_role, html=False):
name = self.__get_anonymized_name_from_user(user=user, user_role=user_role)
if html:
if user_role == 'student':
return self._render_span(cssclass='devilry-core-candidate-anonymous-name',
content=name)
else:
return self._render_span(cssclass='devilry-core-examiner-anonymous-name',
content=name)
return name
class GroupUserLookup(object):
"""
"""
def __init__(self, assignment, group, requestuser_devilryrole, requestuser=None):
"""
Args:
group:
requestuser:
requestuser_devilryrole:
"""
assert assignment.id == group.parentnode_id
self.assignment = assignment
self.group = group
self.requestuser = requestuser
self.requestuser_devilryrole = requestuser_devilryrole
self._usercache = {}
def is_requestuser(self, user):
"""
"""
if not self.requestuser:
return False
return self.requestuser == user
def _get_userinfo(self, user):
if user.id not in self._usercache:
self._usercache[user.id] = UserInfo(groupuserlookup=self, user=user)
return self._usercache[user.id]
def get_long_name_from_user(self, user, user_role, html=False):
userinfo = self._get_userinfo(user=user)
if not self.is_requestuser(user=user):
if user_role == 'student' and self.assignment.students_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
elif user_role == 'examiner' and self.assignment.examiners_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
return userinfo.get_unanonymized_long_name_from_user(user=user, html=html)
def get_plaintext_short_name_from_user(self, user, user_role, html=False):
userinfo = self._get_userinfo(user=user)
if not self.is_requestuser(user=user):
if user_role == 'student' and self.assignment.students_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
elif user_role == 'examiner' and self.assignment.examiners_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
return userinfo.METHOD_NAME(user=user)
| null |
5,172 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
Mantid
======
http://www.mantidproject.org
The Mantid project provides a platform that supports high-performance computing
on neutron and muon data. The framework provides a set of common services,
algorithms and data objects that are:
- Instrument or technique independent;
- Supported on multiple target platforms (Windows, Linux, Mac OS X);
- Easily extensible by Instruments Scientists/Users;
- Open source and freely redistributable to visiting scientists;
- Provides functionalities for Scripting, Visualization, Data transformation,
Implementing Algorithms, Virtual Instrument Geometry.
"""
import os
import sys
import site
from .buildconfig import check_python_version
check_python_version()
def apiVersion():
"""Indicates that this is version 2
of the API
"""
return 2
def METHOD_NAME():
"""
Generate a list of possible paths that contain the Mantid.properties file
"""
_moduledir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
# standard packaged install
yield _moduledir
# conda layout
yield os.path.dirname(sys.executable)
# conda windows layout
yield os.path.join(os.path.dirname(sys.executable), "Library", "bin")
# iterate over the PYTHONPATH, to scan all possible bin dirs
for path in sys.path:
yield path
# Bail out early if a Mantid.properties files is not found in
# one of the expected places - it indicates a broken installation or build.
_bindir = None
for path in METHOD_NAME():
if os.path.exists(os.path.join(path, "Mantid.properties")):
_bindir = path
break
if _bindir is None:
raise ImportError(
"Broken installation! Unable to find Mantid.properties file.\n" "Directories searched: {}".format(", ".join(METHOD_NAME()))
)
# Windows doesn't have rpath settings so make sure the C-extensions can find the rest of the
# mantid dlls. We assume they will be next to the properties file.
if sys.platform == "win32":
os.environ["PATH"] = _bindir + ";" + os.environ.get("PATH", "")
# Make sure the config service loads this properties file
os.environ["MANTIDPATH"] = _bindir
# Add directory as a site directory to process the .pth files
site.addsitedir(_bindir)
try:
# Flag indicating whether mantidplot layer is loaded.
import _qti # noqa: F401
__gui__ = True
except ImportError:
__gui__ = False
# Set deprecation warnings back to default (they are ignored in 2.7)
import warnings as _warnings
# Default we see everything
_warnings.filterwarnings("default", category=DeprecationWarning, module="mantid.*")
# We can't do anything about numpy.oldnumeric being deprecated but
# still used in other libraries, e.g scipy, so just ignore those
_warnings.filterwarnings("ignore", category=DeprecationWarning, module="numpy.oldnumeric")
###############################################################################
# Load all non-plugin subpackages that contain a C-extension. The boost.python
# registry will be missing entries if all are not loaded.
###############################################################################
from mantid import kernel as _kernel # noqa: F401
from mantid import api as _api # noqa: F401
from mantid import geometry as _geometry # noqa: F401
from mantid import dataobjects as _dataobjects # noqa: F401
# Make the aliases from each module accessible in the mantid namespace
from mantid.kernel._aliases import *
from mantid.api._aliases import *
# Make the version string and info accessible in the standard way
from mantid.kernel import version_str as _version_str
from mantid.kernel import version # noqa: F401
__version__ = _version_str()
| null |
5,173 |
"""Builder Base Classes"""
import os
from xonsh.lib import subprocess
from glob import glob
from itertools import groupby
from jinja2 import Environment, FileSystemLoader
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
from regolith.sorters import doc_date_key, category_val, level_val, date_key
from regolith.tools import (
date_to_rfc822,
rfc822now,
gets,
LATEX_OPTS,
month_and_year,
latex_safe,
latex_safe_url)
class BuilderBase(object):
"""Base class for builders"""
def __init__(self, rc):
self.rc = rc
self.bldir = os.path.join(rc.builddir, self.btype)
# allow subclasses to override
if not hasattr(self, "env"):
self.env = Environment(
loader=FileSystemLoader(
[
"templates",
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"templates",
),
]
)
)
self.gtx = {}
self.construct_global_ctx()
self.cmds = []
def construct_global_ctx(self):
"""Constructs the global context"""
gtx = self.gtx
gtx["len"] = len
gtx["True"] = True
gtx["False"] = False
gtx["None"] = None
gtx["sorted"] = sorted
gtx["groupby"] = groupby
gtx["gets"] = gets
gtx["date_key"] = date_key
gtx["doc_date_key"] = doc_date_key
gtx["level_val"] = level_val
gtx["category_val"] = category_val
gtx["rfc822now"] = rfc822now
gtx["date_to_rfc822"] = date_to_rfc822
def render(self, tname, fname, **kwargs):
"""Render the template into a file using the kwargs and global context
Parameters
----------
tname : str
Template name
fname : str
Resulting file name
kwargs : dict
Additional kwargs to the renderer
"""
template = self.env.get_template(tname)
ctx = dict(self.gtx)
ctx.update(kwargs)
ctx["rc"] = ctx.get("rc", self.rc)
ctx["static"] = ctx.get(
"static", os.path.relpath("static", os.path.dirname(fname))
)
ctx["root"] = ctx.get(
"root", os.path.relpath("/", os.path.dirname(fname))
)
result = template.render(ctx)
with open(os.path.join(self.bldir, fname), "wt", encoding='utf-8'
) as f:
f.write(result)
def build(self):
"""Build the thing that is being built, note this runs all commands
listed in ``self.cmds``"""
os.makedirs(self.bldir, exist_ok=True)
for cmd in self.cmds:
getattr(self, cmd)()
class LatexBuilderBase(BuilderBase):
"""Base class for Latex builders"""
def __init__(self, rc):
super().__init__(rc)
self.cmds = ["latex", "clean"]
if HAVE_BIBTEX_PARSER:
self.bibdb = BibDatabase()
self.bibwriter = BibTexWriter()
def construct_global_ctx(self):
super().construct_global_ctx()
gtx = self.gtx
gtx["month_and_year"] = month_and_year
gtx["latex_safe"] = latex_safe
gtx["latex_safe_url"] = latex_safe_url
def METHOD_NAME(self, cmd):
"""Run command in build dir"""
subprocess.METHOD_NAME(cmd, cwd=self.bldir, check=True)
def pdf(self, base):
"""Compiles latex files to PDF"""
if self.rc.pdf:
if os.name == 'nt':
self.METHOD_NAME(["pdflatex"] + LATEX_OPTS + [base + ".tex"])
else:
self.METHOD_NAME(["latex"] + LATEX_OPTS + [base + ".tex"])
self.METHOD_NAME(["dvipdf", base])
def clean(self):
"""Remove files created by latex"""
postfixes = [
"*.dvi",
"*.toc",
"*.aux",
"*.out",
"*.log",
"*.bbl",
"*.blg",
"*.log",
"*.spl",
"*~",
"*.spl",
"*.run.xml",
"*-blx.bib",
]
to_rm = []
for pst in postfixes:
to_rm += glob(os.path.join(self.bldir, pst))
for f in set(to_rm):
os.remove(f)
| null |
5,174 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Invert Bijector"""
from mindspore import _checkparam as validator
from .bijector import Bijector
class Invert(Bijector):
r"""
Invert Bijector. Compute the inverse function of the input bijector. If the function of the forward mapping,
namely the input of `bijector` below, is :math:`Y = g(X)`,
then the function of corresponding inverse mapping Bijector is :math:`Y = h(X) = g^{-1}(X)`.
Args:
bijector (Bijector): Base Bijector.
name (str): The name of the Bijector. Default: ``""`` . When name is set to "", it is actually
'Invert' + bijector.name.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.bijector as msb
>>> from mindspore import Tensor
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.origin = msb.ScalarAffine(scale=2.0, shift=1.0)
... self.invert = msb.Invert(self.origin)
...
... def construct(self, x_):
... return self.invert.forward(x_)
>>> forward = Net()
>>> x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32)
>>> ans = forward(Tensor(x, dtype=mindspore.float32))
>>> print(ans.shape)
(4,)
"""
def __init__(self,
bijector,
name=""):
param = dict(locals())
validator.check_value_type('bijector', bijector, [Bijector], "Invert")
name = name or ('Invert' + bijector.name)
param["name"] = name
super(Invert, self).__init__(is_constant_jacobian=bijector.is_constant_jacobian,
is_injective=bijector.is_injective,
name=name,
dtype=bijector.dtype,
param=param)
self._bijector = bijector
self._batch_shape = self.bijector.batch_shape
self._is_scalar_batch = self.bijector.is_scalar_batch
@property
def bijector(self):
"""Return base bijector."""
return self._bijector
def inverse(self, y):
"""
Perform the inverse transformation of the inverse bijector,
namely the forward transformation of the underlying bijector.
Args:
y (Tensor): the value of the transformed random variable.
Output:
Tensor, the value of the input random variable.
"""
return self.bijector("forward", y)
def METHOD_NAME(self, x):
"""
Perform the forward transformation of the inverse bijector,
namely the inverse transformation of the underlying bijector.
Args:
x (Tensor): the value of the input random variable.
Output:
Tensor, the value of the transformed random variable.
"""
return self.bijector("inverse", x)
def inverse_log_jacobian(self, y):
"""
Logarithm of the derivative of the inverse transformation of the inverse bijector,
namely logarithm of the derivative of the forward transformation of the underlying bijector.
Args:
y (Tensor): the value of the transformed random variable.
Output:
Tensor, logarithm of the derivative of the inverse transformation of the inverse bijector.
"""
return self.bijector("forward_log_jacobian", y)
def forward_log_jacobian(self, x):
"""
Logarithm of the derivative of the forward transformation of the inverse bijector,
namely logarithm of the derivative of the inverse transformation of the underlying bijector.
Args:
x (Tensor): the value of the input random variable.
Output:
Tensor, logarithm of the derivative of the forward transformation of the inverse bijector.
"""
return self.bijector("inverse_log_jacobian", x)
| null |
5,175 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from collections import deque
import logging
import pytest
from cylc.flow import CYLC_LOG
from cylc.flow.exceptions import CylcError
from cylc.flow.main_loop import (
CoroTypes,
MainLoopPluginException,
_wrapper,
get_runners,
load,
)
from cylc.flow.main_loop.health_check import health_check as hc_during
def test_load_plugins_blank():
"""Test that log_plugins works when no plugins are requested."""
conf = {
'plugins': []
}
assert load(conf) == {
'config': conf,
'state': {},
'timings': {}
}
def test_load_plugins():
"""Test the loading of a built-in plugin."""
conf = {
'plugins': ['health check'],
'health check': {
'interval': 1234
}
}
assert load(conf) == {
CoroTypes.Periodic: {
('health check', 'health_check'): hc_during
},
'state': {
'health check': {
}
},
'config': conf,
'timings': {
('health check', 'health_check'): deque([], maxlen=1)
}
}
def test_wrapper_calls_function():
"""Ensure the wrapper calls coroutines."""
flag = False
async def test_coro(arg1, arg2):
assert arg1 == 'arg1'
assert arg2 == 'arg2'
nonlocal flag
flag = True
coro = _wrapper(
test_coro,
'arg1',
'arg2'
)
asyncio.run(coro)
assert flag
def test_wrapper_logging(caplog):
"""Ensure the wrapper logs each coroutine call."""
async def test_coro(*_):
pass
coro = _wrapper(
test_coro,
None,
None
)
with caplog.at_level(logging.DEBUG, logger=CYLC_LOG):
asyncio.run(coro)
assert len(caplog.record_tuples) == 2
(
(run_log, run_level, run_msg),
(end_log, end_level, end_msg)
) = caplog.record_tuples
# we should have two messages, one sent before and one after
# the function
assert 'run' in run_msg
assert 'end' in end_msg
# both should contain the name of the function
assert 'test_coro' in run_msg
assert 'test_coro' in end_msg
# and should be sent to the cylc logger at the debug level
assert run_log == end_log == CYLC_LOG
assert run_level == end_level == logging.DEBUG
def test_wrapper_catches_exceptions(caplog):
"""Ensure the wrapper catches Exception instances and logs them."""
async def test_coro(*_):
raise Exception('foo')
coro = _wrapper(
test_coro,
None,
None
)
with caplog.at_level(logging.DEBUG, logger=CYLC_LOG):
asyncio.run(coro)
assert len(caplog.record_tuples) == 4
run, error, traceback, completed = caplog.record_tuples
assert 'run' in run[2]
assert error[1] == logging.ERROR
assert traceback[1] == logging.ERROR
assert 'foo' in traceback[2]
assert completed[1] == logging.DEBUG
def METHOD_NAME():
"""Ensure the wrapper does not catch CylcError instances."""
async def test_coro(*_):
raise CylcError('foo')
coro = _wrapper(
test_coro,
None,
None
)
with pytest.raises(MainLoopPluginException):
asyncio.run(coro)
@pytest.fixture
def basic_plugins():
calls = []
def capture(*args):
nonlocal calls
calls.append(args)
plugins = {
'config': {
'periodic plugin': {
'interval': 10
}
},
'timings': {
('periodic plugin', 'periodic_coro'): [],
('startup plugin', 'startup_coro'): [],
},
'state': {
'periodic plugin': {
'a': 1
},
'startup plugin': {
'b': 2
}
},
CoroTypes.Periodic: {
('periodic plugin', 'periodic_coro'): capture
},
CoroTypes.StartUp: {
('startup plugin', 'startup_coro'): capture
}
}
return (plugins, calls, capture)
def test_get_runners_startup(basic_plugins):
"""IT should return runners for startup functions."""
plugins, calls, capture = basic_plugins
runners = get_runners(
plugins,
CoroTypes.StartUp,
'scheduler object'
)
assert len(runners) == 1
asyncio.run(runners[0])
assert calls == [('scheduler object', {'b': 2})]
def test_get_runners_periodic(basic_plugins):
"""It should return runners for periodic functions."""
plugins, calls, capture = basic_plugins
runners = get_runners(
plugins,
CoroTypes.Periodic,
'scheduler object'
)
assert len(runners) == 1
asyncio.run(runners[0])
assert calls == [('scheduler object', {'a': 1})]
def test_get_runners_periodic_debounce(basic_plugins):
"""It should run periodic functions based on the configured interval."""
plugins, calls, capture = basic_plugins
# we should start with a blank timings object
assert len(plugins['timings'][('periodic plugin', 'periodic_coro')]) == 0
runners = get_runners(
plugins,
CoroTypes.Periodic,
'scheduler object'
)
assert len(runners) == 1
asyncio.run(runners[0])
assert calls == [('scheduler object', {'a': 1})]
# the timings object should now contain the previous run
assert len(plugins['timings'][('periodic plugin', 'periodic_coro')]) == 1
# the next run should be skipped because of the interval
runners = get_runners(
plugins,
CoroTypes.Periodic,
'scheduler object'
)
assert len(runners) == 0
# if we remove the interval the next run will not get skipped
plugins['config']['periodic plugin']['interval'] = 0
runners = get_runners(
plugins,
CoroTypes.Periodic,
'scheduler object'
)
assert len(runners) == 1
assert calls[-1] == ('scheduler object', {'a': 1})
# Clean up coroutines we didn't run
for coro in runners:
coro.close()
def test_state(basic_plugins):
"""It should pass the same state object with each function call.
* Run the same plugin function twice.
* Ensure that the state object recieved by each call is the same object.
"""
plugins, calls, capture = basic_plugins
runners = get_runners(
plugins,
CoroTypes.StartUp,
'scheduler object'
)
assert len(runners) == 1
asyncio.run(*runners)
assert len(calls) == 1
runners = get_runners(
plugins,
CoroTypes.StartUp,
'scheduler object'
)
assert len(runners) == 1
asyncio.run(*runners)
assert len(calls) == 2
(_, state1), (_, state2) = calls
assert id(state1) == id(state2)
| null |
5,176 |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.api import jit
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class SliceGrad(nn.Cell):
def __init__(self):
super(SliceGrad, self).__init__()
self.slice_grad = G.SliceGrad()
@jit
def construct(self, dy, x):
return self.slice_grad(dy, x, (0, 1, 0), (2, 1, 3))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_slice_grad():
x = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]), mstype.float32)
dy = Tensor(np.array([[[3., 1., 2.]], [[4., 1., 4.]]]), mstype.float32)
slice_grad = SliceGrad()
output = slice_grad(dy, x)
expect = [[[0., 0., 0.],
[3., 1., 2.]],
[[0., 0., 0.],
[4., 1., 4.]],
[[0., 0., 0.],
[0., 0., 0.]]]
print("output:\n", output)
assert (output.asnumpy() == expect).all()
class SliceGrad2(nn.Cell):
def __init__(self):
super(SliceGrad2, self).__init__()
self.slice_grad = G.SliceGrad()
def construct(self, dy, x):
return self.slice_grad(dy, x, (0, 1, 0), (2, 2, 2))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_slice_grad2():
dy = Tensor(np.array([[[2., 3.], [4., 5.]], [[8., 9.], [10., 11.]]]), mstype.float32)
x = Tensor(np.arange(2 * 3 * 2).reshape(2, 3, 2), mstype.float32)
grad = SliceGrad2()
output = grad(dy, x)
print("output:\n", output)
expect = [[[0., 0.], [2., 3.], [4., 5.]],
[[0., 0.], [8., 9.], [10., 11.]]]
assert (output.asnumpy() == expect).all()
def test_slice_grad3():
x = Tensor(np.array([[[1.0, 3.5, 5.8], [2.5, 4, 1]], [[3.5, 15.3, 3.1], [2.2, 4.0, 1.1]],
[[43.4, 1.1, 12.1], [2.4, 6.5, 6.3]]]), mstype.float64)
dy = Tensor(np.array([[[3.1, 1.1, 2.2]], [[4.4, 1.2, 4.2]]]), mstype.float64)
slice_grad = SliceGrad()
output = slice_grad(dy, x)
expect = [[[0., 0., 0.],
[3.1, 1.1, 2.2]],
[[0., 0., 0.],
[4.4, 1.2, 4.2]],
[[0., 0., 0.],
[0., 0., 0.]]]
print("output:\n", output)
assert (output.asnumpy() == expect).all()
class SliceGrad8D(nn.Cell):
def __init__(self):
super(SliceGrad8D, self).__init__()
self.slice_grad = G.SliceGrad()
@jit
def construct(self, dy, x):
return self.slice_grad(dy, x, (1, 0, 2, 0, 0, 0, 0, 0), (1, 2, 1, 1, 1, 1, 1, 2))
def METHOD_NAME():
"""
Feature: SliceGrad
Description: test SliceGrad with 8D input
Expectation: the output is as expected
"""
x = Tensor(np.array([[[[[[[[6, 5]]]]], [[[[[4, 1]]]]], [[[[[7, 2]]]]]],
[[[[[[1, 5]]]]], [[[[[4, 8]]]]], [[[[[7, 5]]]]]]],
[[[[[[[4, 8]]]]], [[[[[1, 8]]]]], [[[[[0, 0]]]]]],
[[[[[[4, 8]]]]], [[[[[3, 3]]]]], [[[[[3, 9]]]]]]]]), mstype.int32)
dy = Tensor(np.arange(1 * 2 * 1 * 1 * 1 * 1 * 1 * 2).reshape(1, 2, 1, 1, 1, 1, 1, 2), mstype.int32)
slice_grad = SliceGrad8D()
output = slice_grad(dy, x)
expect = np.zeros((2, 2, 3, 1, 1, 1, 1, 2))
expect[1:2, 0:2, 2:3, 0:1, 0:1, 0:1, 0:1, 0:2] = dy
print("output:\n", output)
assert (output.asnumpy() == expect).all()
class StridedSliceGrad(nn.Cell):
def __init__(self, x, begin, end, stride):
super(StridedSliceGrad, self).__init__()
self.shape_op = P.Shape()
self.shapex = self.shape_op(x)
self.begin = begin
self.end = end
self.stride = stride
self.stride_slice = G.StridedSliceGrad()
def construct(self, dy):
return self.stride_slice(dy, self.shapex, self.begin, self.end, self.stride)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_strided_slice_grad_bool_type():
x = Tensor([[[False, False, True], [False, True, False]], [[False, True, False], [True, False, False]],
[[False, True, True], [True, False, True]]], mstype.bool_)
dy = Tensor([False, True, False], mstype.bool_)
begin = (1, 0, 0)
end = (2, 1, 3)
stride = (1, 1, 1)
slice_op = StridedSliceGrad(x, begin, end, stride)
output = slice_op(dy)
expected_output = np.array([[[False, False, False], [False, False, False]],
[[False, True, False], [False, False, False]],
[[False, False, False], [False, False, False]]])
assert (output.asnumpy() == expected_output).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_strided_slice_grad_float32_type():
x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]], mstype.float32)
dy = Tensor([3, 3, 3], mstype.float32)
begin = (1, 0, 0)
end = (2, 1, 3)
stride = (1, 1, 1)
slice_op = StridedSliceGrad(x, begin, end, stride)
output = slice_op(dy)
expected_output = np.array([[[0, 0, 0], [0, 0, 0]], [[3, 3, 3], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]])
assert (output.asnumpy() == expected_output).all()
| null |
5,177 |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test_image_summary"""
import logging
import os
import numpy as np
import mindspore.nn as nn
from mindspore import context
from mindspore.train import Model, Callback
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
from tests.security_utils import security_off_wrap
from .....dataset_mock import MindData
CUR_DIR = os.getcwd()
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
log = logging.getLogger("test")
log.setLevel(level=logging.ERROR)
def make_image_tensor(shape, dtype=float):
""" make_image_tensor """
number = np.prod(shape)
x = (np.arange(number, dtype=dtype)).reshape(shape)
return x
def METHOD_NAME(step):
""" get_test_data """
test_data_list = []
tag1 = "x1[:Image]"
tag2 = "x2[:Image]"
np1 = make_image_tensor([2, 3, 8, 8])
np2 = make_image_tensor([step, 3, 8, 8])
dict1 = {}
dict1["name"] = tag1
dict1["data"] = Tensor(np1)
dict2 = {}
dict2["name"] = tag2
dict2["data"] = Tensor(np2)
test_data_list.append(dict1)
test_data_list.append(dict2)
return test_data_list
# Test: call method on parse graph code
@security_off_wrap
def test_image_summary_sample():
""" test_image_summary_sample """
with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer:
for i in range(1, 5):
test_data = METHOD_NAME(i)
_cache_summary_tensor_data(test_data)
test_writer.record(i)
test_writer.flush()
class Net(nn.Cell):
""" Net definition """
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal',
pad_mode='valid')
self.bn = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.fc = nn.Dense(64 * 222 * 222, 3) # padding=0
def construct(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.flatten(x)
out = self.fc(x)
return out
class LossNet(nn.Cell):
""" LossNet definition """
def __init__(self):
super(LossNet, self).__init__()
self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal',
pad_mode='valid')
self.bn = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.fc = nn.Dense(64 * 222 * 222, 3) # padding=0
self.loss = nn.SoftmaxCrossEntropyWithLogits()
def construct(self, x, y):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.flatten(x)
x = self.fc(x)
out = self.loss(x, y)
return out
def get_model():
""" get_model """
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits()
optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
context.set_context(mode=context.GRAPH_MODE)
model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
return model
def get_dataset():
""" get_dataset """
dataset_types = (np.float32, np.float32)
dataset_shapes = ((2, 3, 224, 224), (2, 3))
dataset = MindData(size=2, batch_size=2,
np_types=dataset_types,
output_shapes=dataset_shapes,
input_indexs=(0, 1))
return dataset
class ImageSummaryCallback(Callback):
"""Image summary callback."""
def __init__(self, summary_record):
self._summary_record = summary_record
def __enter__(self):
return self
def __exit__(self, *err):
self._summary_record.close()
def record(self, step, train_network=None):
"""record data."""
self._summary_record.record(step, train_network)
self._summary_record.flush()
@security_off_wrap
def test_image_summary_train():
""" test_image_summary_train """
dataset = get_dataset()
with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer:
model = get_model()
callback = ImageSummaryCallback(test_writer)
model.train(2, dataset, callbacks=[callback])
@security_off_wrap
def test_image_summary_data():
""" test_image_summary_data """
dataset = get_dataset()
test_data_list = []
i = 1
for next_element in dataset:
tag = "image_" + str(i) + "[:Image]"
dct = {}
dct["name"] = tag
dct["data"] = Tensor(next_element[0])
test_data_list.append(dct)
i += 1
with SummaryRecord(SUMMARY_DIR, file_suffix="_MS_IMAGE") as test_writer:
_cache_summary_tensor_data(test_data_list)
test_writer.record(1)
| null |
5,178 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
Presenter for DNS path panel.
"""
import unittest
from unittest import mock
from mantidqtinterfaces.dns_powder_tof.data_structures.dns_observer import DNSObserver
from mantidqtinterfaces.dns_powder_tof.paths.path_model import DNSPathModel
from mantidqtinterfaces.dns_powder_tof.paths.path_presenter import DNSPathPresenter
from mantidqtinterfaces.dns_powder_tof.paths.path_view import DNSPathView
class DNSPathPresenterTest(unittest.TestCase):
# pylint: disable=protected-access, too-many-public-methods
@classmethod
def setUpClass(cls):
# cls.parent = mock.patch(DNSReductionGUI_presenter)
cls.view = mock.create_autospec(DNSPathView)
cls.model = mock.create_autospec(DNSPathModel)
# view signals
cls.view.sig_data_path_set = mock.Mock(return_value="dummypath")
cls.view.sig_clear_cache = mock.Mock()
cls.view.sig_file_dialog_requested = mock.Mock(return_value="data")
cls.view.sig_data_dir_editing_finished = mock.Mock()
cls.view.within_mantid = False
# view functions
cls.view.get_path.return_value = ""
cls.view.open_file_dialog.return_value = "C:/dummy/test.py"
# model functions
cls.model.get_start_path_for_dialog.return_value = "C:/dummy"
cls.model.get_user_and_proposal_number.return_value = ["Thomas", "p123456"]
# create presenter
cls.presenter = DNSPathPresenter(view=cls.view, model=cls.model)
def setUp(self):
self.model.get_user_and_proposal_number.reset_mock()
self.view.set_data_path.reset_mock()
self.view.set_prop_number.reset_mock()
self.view.set_user.reset_mock()
self.view.get_path.reset_mock()
self.view.set_path.reset_mock()
def test__init__(self):
self.assertIsInstance(self.presenter, DNSObserver)
self.view.set_data_path.assert_not_called()
self.model.get_current_directory.assert_called()
def test_data_path_editing_finished(self):
self.view.get_state.return_value = {"auto_set_other_dir": True}
self.presenter._data_path_set(dir_name="C:/test")
self.assertEqual(self.view.set_path.call_count, 4)
def test_data_path_set(self):
self.view.get_state.return_value = {"auto_set_other_dir": True}
self.presenter._data_path_set(dir_name="C:/test")
self.assertEqual(self.view.set_path.call_count, 4)
self.view.set_path.assert_called_with("export_dir", "C:/test/export")
self.view.set_path.reset_mock()
self.view.get_state.return_value = {"auto_set_other_dir": False}
self.presenter._data_path_set(dir_name="C:/test")
self.view.set_path.assert_not_called()
def METHOD_NAME(self):
self.presenter._set_user_prop_from_datafile(dir_name="C:/test")
self.model.get_user_and_proposal_number.assert_called_once()
self.view.set_prop_number.assert_called_once()
self.view.set_user.assert_called_once()
self.view.show_status_message.assert_not_called()
def test_clear_cache(self):
self.presenter._clear_cache()
self.model.clear_cache.assert_not_called()
self.presenter.own_dict = {"data_dir": "123"}
self.presenter._clear_cache()
self.model.clear_cache.assert_called_once()
def test_filedialog_requested(self):
self.presenter._file_dialog_requested(sender="data")
self.view.get_path.assert_called_once_with("data_dir")
self.model.get_start_path_for_dialog.assert_called_once()
self.view.open_file_dialog.assert_called_once()
self.view.set_data_path.assert_called_once()
self.view.set_path.assert_not_called()
if __name__ == "__main__":
unittest.main()
| null |
5,179 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _cell_graph_executor
from mindspore.context import set_auto_parallel_context
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from tests.ut.python.ops.test_math_ops import VirtualLoss
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
def METHOD_NAME(net, x):
net.set_train()
_cell_graph_executor.compile(net, x)
class Net(nn.Cell):
def __init__(self, strategy1, strategy2, strategy3, strategy4, strategy5):
super().__init__()
self.query_w = Parameter(initializer(
"normal", [8, 16], ms.float32), name='query')
self.query = P.MatMul().shard(strategy1)
self.key_w = Parameter(initializer(
"normal", [8, 16], ms.float32), name='key')
self.key = P.MatMul().shard(strategy2)
self.value_w = Parameter(initializer(
"normal", [8, 16], ms.float32), name='value')
self.value = P.MatMul().shard(strategy3)
self.score = P.MatMul().shard(strategy4)
self.context = P.MatMul().shard(strategy5)
self.transpose1 = P.Transpose()
self.transpose2 = P.Transpose()
self.relu = P.ReLU()
def construct(self, x):
q = self.query(x, self.query_w)
k = self.key(x, self.key_w)
v = self.value(x, self.value_w)
k = self.transpose1(k, (1, 0))
s = self.score(q, k)
v = self.transpose2(v, (1, 0))
c = self.context(v, s)
out = self.relu(c)
return out
def test_self_attention_standalone():
context.reset_auto_parallel_context()
set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="stand_alone")
net = GradWrap(NetWithLoss(
Net(None, None, None, None, None)))
x = Tensor(np.ones([32, 8]), dtype=ms.float32)
METHOD_NAME(net, x)
def test_self_attention_semi():
set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((2, 2), (2, 2))
strategy2 = ((2, 2), (2, 2))
strategy3 = ((2, 2), (2, 2))
strategy4 = ((2, 4), (4, 1))
strategy5 = ((2, 1), (1, 4))
net = GradWrap(NetWithLoss(
Net(strategy1, strategy2, strategy3, strategy4, strategy5)))
x = Tensor(np.ones([32, 8]), dtype=ms.float32)
METHOD_NAME(net, x)
def test_self_attention_dp():
set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
strategy1 = ((8, 1), (1, 1))
strategy2 = ((8, 1), (1, 1))
strategy3 = ((8, 1), (1, 1))
strategy4 = ((8, 1), (1, 1))
strategy5 = ((8, 1), (1, 1))
net = GradWrap(NetWithLoss(
Net(strategy1, strategy2, strategy3, strategy4, strategy5)))
x = Tensor(np.ones([32, 8]), dtype=ms.float32)
METHOD_NAME(net, x)
def test_self_attention_auto():
set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net = GradWrap(NetWithLoss(
Net(None, None, None, None, None)))
x = Tensor(np.ones([32, 8]), dtype=ms.float32)
METHOD_NAME(net, x)
| null |
5,180 |
import numpy as np
import torch
from gymnasium import spaces
from torch import nn as nn
from torch.nn import functional as F
def loss_function_factory(loss_function, **kwargs):
if loss_function == "l2":
return torch.nn.MSELoss(**kwargs)
elif loss_function == "l1":
return torch.nn.L1Loss(**kwargs)
elif loss_function == "smooth_l1":
return torch.nn.SmoothL1Loss(**kwargs)
elif loss_function == "bce":
return torch.nn.BCELoss(**kwargs)
else:
raise ValueError("Unknown loss function : {}".format(loss_function))
def METHOD_NAME(params, optimizer_type="ADAM", **kwargs):
if optimizer_type == "ADAM":
return torch.optim.Adam(params=params, **kwargs)
elif optimizer_type == "RMS_PROP":
return torch.optim.RMSprop(params=params, **kwargs)
else:
raise ValueError("Unknown optimizer type: {}".format(optimizer_type))
def model_factory_from_env(env, **kwargs):
"""Returns a torch module after setting up input/output dimensions according to an env.
Parameters
----------
env: gym.Env
Environment
**kwargs: Dict
Parameters to be updated, used to call :func:`~rlberry.agents.torch.utils.training.model_factory`.
"""
kwargs = size_model_config(env, **kwargs)
return model_factory(**kwargs)
def model_factory(type="MultiLayerPerceptron", **kwargs) -> nn.Module:
"""Build a neural net of a given type.
Parameters
----------
type: {"MultiLayerPerceptron",
"ConvolutionalNetwork",
"DuelingNetwork",
"Table"}, default = "MultiLayerPerceptron"
Type of neural network.
**kwargs: dict
Parameters that vary according to each neural net type, see
* :class:`~rlberry.agents.torch.utils.models.MultiLayerPerceptron`
* :class:`~rlberry.agents.torch.utils.models.ConvolutionalNetwork`
* :class:`~rlberry.agents.torch.utils.models.DuelingNetwork`
* :class:`~rlberry.agents.torch.utils.models.Table`
"""
from rlberry.agents.torch.utils.models import (
MultiLayerPerceptron,
DuelingNetwork,
ConvolutionalNetwork,
Table,
)
if type == "MultiLayerPerceptron":
return MultiLayerPerceptron(**kwargs)
elif type == "DuelingNetwork":
return DuelingNetwork(**kwargs)
elif type == "ConvolutionalNetwork":
return ConvolutionalNetwork(**kwargs)
elif type == "Table":
return Table(**kwargs)
else:
raise ValueError("Unknown model type")
def size_model_config(env, **model_config):
"""
Setup input/output dimensions for the configuration of
a model depending on the environment observation/action spaces.
Parameters
----------
env : gym.Env
An environment.
model_config : dict
Parameters to be updated, used to call :func:`~rlberry.agents.torch.utils.training.model_factory`.
If "out_size" is not given in model_config, assumes
that the output dimension of the neural net is equal to the number
of actions in the environment.
"""
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
elif isinstance(env.observation_space, spaces.Discrete):
return model_config
# Assume CHW observation space
if "type" in model_config and model_config["type"] == "ConvolutionalNetwork":
if "transpose_obs" in model_config and not model_config["transpose_obs"]:
# Assume CHW observation space
if "in_channels" not in model_config:
model_config["in_channels"] = int(obs_shape[0])
if "in_height" not in model_config:
model_config["in_height"] = int(obs_shape[1])
if "in_width" not in model_config:
model_config["in_width"] = int(obs_shape[2])
else:
# Assume WHC observation space to transpose
if "in_channels" not in model_config:
model_config["in_channels"] = int(obs_shape[2])
if "in_height" not in model_config:
model_config["in_height"] = int(obs_shape[1])
if "in_width" not in model_config:
model_config["in_width"] = int(obs_shape[0])
else:
model_config["in_size"] = int(np.prod(obs_shape))
if "out_size" not in model_config:
if isinstance(env.action_space, spaces.Discrete):
model_config["out_size"] = env.action_space.n
elif isinstance(env.action_space, spaces.Tuple):
model_config["out_size"] = env.action_space.spaces[0].n
return model_config
def activation_factory(activation_type):
if activation_type == "RELU":
return F.relu
elif activation_type == "TANH":
return torch.tanh
elif activation_type == "ELU":
return nn.ELU()
else:
raise ValueError("Unknown activation_type: {}".format(activation_type))
def trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| null |
5,181 |
"""Pinproc object for tests."""
class FakePinProcModule:
DriverCount = 256
EventTypeAccelerometerIRQ = 11
EventTypeAccelerometerX = 8
EventTypeAccelerometerY = 9
EventTypeAccelerometerZ = 10
EventTypeBurstSwitchClosed = 7
EventTypeBurstSwitchOpen = 6
EventTypeDMDFrameDisplayed = 5
EventTypeSwitchClosedDebounced = 1
EventTypeSwitchClosedNondebounced = 3
EventTypeSwitchOpenDebounced = 2
EventTypeSwitchOpenNondebounced = 4
MachineTypeCustom = 1
MachineTypeInvalid = 0
MachineTypePDB = 7
MachineTypeSternSAM = 6
MachineTypeSternWhitestar = 5
MachineTypeWPC = 3
MachineTypeWPC95 = 4
MachineTypeWPCAlphanumeric = 2
SwitchCount = 255
SwitchNeverDebounceFirst = 192
SwitchNeverDebounceLast = 255
def __init__(self):
self.pinproc = FakePinProc()
def driver_state_pulse(self, driver, milliseconds):
driver["state"] = 1
driver["timeslots"] = 0
driver["waitForFirstTimeSlot"] = False
driver["outputDriveTime"] = milliseconds
driver["patterOnTime"] = 0
driver["patterOffTime"] = 0
driver["patterEnable"] = False
driver["futureEnable"] = False
return driver
def driver_state_disable(self, driver):
driver["state"] = 0
driver["timeslots"] = 0
driver["waitForFirstTimeSlot"] = False
driver["outputDriveTime"] = 0
driver["patterOnTime"] = 0
driver["patterOffTime"] = 0
driver["patterEnable"] = False
driver["futureEnable"] = False
return driver
def driver_state_patter(self, driver, millisecondsOn, millisecondsOff, originalOnTime, now):
driver["state"] = True
driver["timeslots"] = 0
driver["waitForFirstTimeSlot"] = not now
driver["outputDriveTime"] = originalOnTime
driver["patterOnTime"] = millisecondsOn
driver["patterOffTime"] = millisecondsOff
driver["patterEnable"] = True
driver["futureEnable"] = False
return driver
def driver_pulsed_patter(self, driver, millisecondsOn, millisecondsOff, milliseconds_overall_patter_time, now):
driver["state"] = True
driver["timeslots"] = 0
driver["waitForFirstTimeSlot"] = not now
driver["outputDriveTime"] = milliseconds_overall_patter_time
driver["patterOnTime"] = millisecondsOn
driver["patterOffTime"] = millisecondsOff
driver["patterEnable"] = True
driver["futureEnable"] = False
return driver
def normalize_machine_type(self, type):
return 7
def PinPROC(self, machine_type):
return self.pinproc
class FakePinProc:
"""Behaves like pypinproc."""
def __init__(self):
self._memory = {
0x00: { # manager
0x00: 0, # chip id
0x01: 0x00020006, # version
0x03: 0x01FF, # dip switches
},
0x02: { # switch controller
0x1000: 0xA3, # SW-16 Address 0 Reg 0
0x1001: 0x00, # SW-16 Address 0 Reg 1
0x1040: 0xA3, # SW-16 Address 1 Reg 0
0x1041: 0x13, # SW-16 Address 1 Reg 1
0x1080: 0xA4, # SW-16 Address 2 Reg 0
0x1081: 0x00, # SW-16 Address 2 Reg 1
}
}
self._switches = [0, 1] + [0] * 100
self._events = []
def read_data(self, module, address):
if module not in self._memory or address not in self._memory[module]:
return 0
return self._memory[module][address]
def write_data(self, module, address, data):
if module not in self._memory:
self._memory[module] = {}
self._memory[module][address] = data
def switch_get_states(self):
return self._switches
def flush(self):
pass
def switch_update_rule(self, *args):
return True
def driver_update_group_config(self, *args):
return True
def driver_update_global_config(self, *args):
return True
def driver_update_state(self, *args):
return True
def driver_pulse(self, *args):
return True
def driver_schedule(self, *args):
return True
def driver_patter(self, *args):
return True
def driver_disable(self, *args):
return True
def METHOD_NAME(self, *args):
self._events = []
return True
def watchdog_tickle(self):
pass
def close(self):
pass
def get_events(self):
events = self._events
self._events = []
return events
| null |
5,182 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_staging """
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore._c_expression import MetaTensor
from mindspore.common import dtype
from mindspore.common.api import jit
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from ..ut_filter import non_graph_engine
def setup_module(module):
context.set_context(mode=context.PYNATIVE_MODE)
@jit
def tensor_add_func_inner(x, y):
""" tensor_add_func_inner """
z = F.tensor_add(x, y)
return z
@jit
def tensor_add_func(x, y):
""" tensor_add_func """
z = tensor_add_func_inner(x, y)
z = F.tensor_add(z, x)
return z
@jit
def scalar_add(x, y):
""" scalar_add """
return x + y
@jit
def scalar_add_if(x, y):
""" scalar_add_if """
if x > y:
return x + y + 10
return x + y + 20
@jit
def scalar_mul_while(x):
""" scalar_mul_while """
rv = x
while rv < 100:
rv = rv * rv
return rv
@jit(input_signature=(MetaTensor(dtype.float32, (1, 1, 3, 3)),
MetaTensor(dtype.float32, (1, 1, 3, 3))))
def tensor_add_test(x, y):
""" tensor_add_test """
z = F.tensor_add(x, y)
return z
class TensorAddMulNet(nn.Cell):
""" TensorAddMulNet definition """
def __init__(self):
super(TensorAddMulNet, self).__init__()
self.add = P.Add()
@jit
def METHOD_NAME(self, x, y):
z = self.add(x, y)
z = self.add(x, z)
return z
@jit
def add_stage1(self, x, y):
z = self.add(x, y)
z = self.add(x, z)
return z
def construct(self, x, y):
z = self.add(x, y) # PyNative mode
z = self.METHOD_NAME(x, z) # Graph mode
z = self.add(x, z) # PyNative mode
z = self.add_stage1(y, z) # Graph mode
return z
class TensorAddNet(nn.Cell):
""" TensorAddNet definition """
def __init__(self):
super(TensorAddNet, self).__init__()
self.add = P.Add()
@jit
def compute(self, x, y):
return self.add(x, y)
def construct(self, x, y):
z = self.compute(x, y)
return z
def test_control_func():
""" test_control_func """
res = scalar_add(3, 4)
assert res == 7
res = scalar_add_if(3, 4)
assert res == 27
res = scalar_mul_while(2)
assert res == 256
@non_graph_engine
def test_staging_call_func():
""" test_staging_call_func """
x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
output = tensor_add_func(x, y)
assert (output.asnumpy() == (np.ones([1, 1, 3, 3]) * 3)).all()
@non_graph_engine
def test_class_method_staging():
""" test_class_method_staging """
x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
net = TensorAddNet()
output = net.construct(x, y)
assert (output.asnumpy() == (np.ones([1, 1, 3, 3]) * 2)).all()
@non_graph_engine
def test_class_method_composite_staging():
""" test_class_method_composite_staging """
x = Tensor(np.ones([3, 3]).astype(np.float32))
y = Tensor(np.ones([3, 3]).astype(np.float32))
net = TensorAddMulNet()
output = net.construct(x, y)
assert (output.asnumpy() == (np.ones([3, 3]) * 7)).astype(np.float32).all()
@non_graph_engine
def test_input_signature():
""" test_input_signature """
x1 = Tensor(np.ones([1, 1, 3, 3], dtype=np.float32))
y1 = Tensor(np.ones([1, 1, 3, 3], dtype=np.float32))
output = tensor_add_test(x1, y1)
assert (output.asnumpy() == (np.ones([1, 1, 3, 3]) * 2)).all()
# test input type signature
x2 = Tensor(np.ones([1, 1, 3, 3], dtype=np.float64))
y2 = Tensor(np.ones([1, 1, 3, 3], dtype=np.float64))
with pytest.raises(ValueError):
tensor_add_test(x2, y2)
# test input shape signature
x3 = Tensor(np.ones([1, 1, 4, 4], dtype=np.float64))
y3 = Tensor(np.ones([1, 1, 4, 4], dtype=np.float64))
with pytest.raises(ValueError):
tensor_add_test(x3, y3)
def test_scalar_cast():
""" test_scalar_cast """
input_x = 8.5
input_t = ms.int64
@jit
def fn_cast(x, t):
output = F.scalar_cast(x, t)
return output
expect_value = 8
z = fn_cast(input_x, input_t)
assert z == expect_value
| null |
5,183 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
import mindspore as ms
class RLBufferAppend(nn.Cell):
def __init__(self, capcity, shapes, types):
super(RLBufferAppend, self).__init__()
self._capacity = capcity
self.count = Parameter(Tensor(0, ms.int32), name="count")
self.head = Parameter(Tensor(0, ms.int32), name="head")
self.buffer_append = P.BufferAppend(self._capacity, shapes, types)
@jit
def construct(self, buffer, exps):
return self.buffer_append(buffer, exps, self.count, self.head)
class RLBufferGet(nn.Cell):
def __init__(self, capcity, shapes, types):
super(RLBufferGet, self).__init__()
self._capacity = capcity
self.count = Parameter(Tensor(5, ms.int32), name="count")
self.head = Parameter(Tensor(0, ms.int32), name="head")
self.buffer_get = P.BufferGetItem(self._capacity, shapes, types)
@jit
def construct(self, buffer, index):
return self.buffer_get(buffer, self.count, self.head, index)
class RLBufferSample(nn.Cell):
def __init__(self, capcity, batch_size, shapes, types):
super(RLBufferSample, self).__init__()
self._capacity = capcity
self.count = Parameter(Tensor(5, ms.int32), name="count")
self.head = Parameter(Tensor(0, ms.int32), name="head")
self.buffer_sample = P.BufferSample(self._capacity, batch_size, shapes, types)
@jit
def construct(self, buffer):
return self.buffer_sample(buffer, self.count, self.head)
states = Tensor(np.arange(4*5).reshape(5, 4).astype(np.float32)/10.0)
actions = Tensor(np.arange(2*5).reshape(5, 2).astype(np.int32))
rewards = Tensor(np.ones((5, 1)).astype(np.int32))
states_ = Tensor(np.arange(4*5).reshape(5, 4).astype(np.float32))
b = [states, actions, rewards, states_]
s = Tensor(np.array([2, 2, 2, 2]), ms.float32)
a = Tensor(np.array([0, 0]), ms.int32)
r = Tensor(np.array([0]), ms.int32)
s_ = Tensor(np.array([3, 3, 3, 3]), ms.float32)
exp = [s, a, r, s_]
exp1 = [s_, a, r, s]
c = [Tensor(np.array([[6, 6, 6, 6], [6, 6, 6, 6]]), ms.float32),
Tensor(np.array([[6, 6], [6, 6]]), ms.int32),
Tensor(np.array([[6], [6]]), ms.int32),
Tensor(np.array([[6, 6, 6, 6], [6, 6, 6, 6]]), ms.float32)]
@ pytest.mark.level0
@ pytest.mark.platform_x86_cpu
@ pytest.mark.env_onecard
def test_BufferSample():
context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
buffer_sample = RLBufferSample(capcity=5, batch_size=3, shapes=[(4,), (2,), (1,), (4,)], types=[
ms.float32, ms.int32, ms.int32, ms.float32])
ss, aa, rr, ss_ = buffer_sample(b)
print(ss, aa, rr, ss_)
@ pytest.mark.level0
@ pytest.mark.platform_x86_cpu
@ pytest.mark.env_onecard
def test_BufferGet():
context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
buffer_get = RLBufferGet(capcity=5, shapes=[(4,), (2,), (1,), (4,)], types=[
ms.float32, ms.int32, ms.int32, ms.float32])
ss, aa, rr, ss_ = buffer_get(b, 1)
expect_s = [0.4, 0.5, 0.6, 0.7]
expect_a = [2, 3]
expect_r = [1]
expect_s_ = [4, 5, 6, 7]
np.testing.assert_almost_equal(ss.asnumpy(), expect_s)
np.testing.assert_almost_equal(aa.asnumpy(), expect_a)
np.testing.assert_almost_equal(rr.asnumpy(), expect_r)
np.testing.assert_almost_equal(ss_.asnumpy(), expect_s_)
@ pytest.mark.level0
@ pytest.mark.platform_x86_cpu
@ pytest.mark.env_onecard
def METHOD_NAME():
context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
buffer_append = RLBufferAppend(capcity=5, shapes=[(4,), (2,), (1,), (4,)], types=[
ms.float32, ms.int32, ms.int32, ms.float32])
buffer_append(b, exp)
buffer_append(b, exp)
buffer_append(b, exp)
buffer_append(b, exp)
buffer_append(b, exp)
buffer_append(b, exp1)
expect_s = [[3, 3, 3, 3], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]
expect_a = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
expect_r = [[0], [0], [0], [0], [0]]
expect_s_ = [[2, 2, 2, 2], [3, 3, 3, 3], [3, 3, 3, 3], [3, 3, 3, 3], [3, 3, 3, 3]]
np.testing.assert_almost_equal(b[0].asnumpy(), expect_s)
np.testing.assert_almost_equal(b[1].asnumpy(), expect_a)
np.testing.assert_almost_equal(b[2].asnumpy(), expect_r)
np.testing.assert_almost_equal(b[3].asnumpy(), expect_s_)
buffer_append(b, exp1)
buffer_append(b, c)
buffer_append(b, c)
expect_s2 = [[6, 6, 6, 6], [3, 3, 3, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]
expect_a2 = [[6, 6], [0, 0], [6, 6], [6, 6], [6, 6]]
expect_r2 = [[6], [0], [6], [6], [6]]
expect_s2_ = [[6, 6, 6, 6], [2, 2, 2, 2], [6, 6, 6, 6], [6, 6, 6, 6], [6, 6, 6, 6]]
np.testing.assert_almost_equal(b[0].asnumpy(), expect_s2)
np.testing.assert_almost_equal(b[1].asnumpy(), expect_a2)
np.testing.assert_almost_equal(b[2].asnumpy(), expect_r2)
np.testing.assert_almost_equal(b[3].asnumpy(), expect_s2_)
| null |
5,184 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name
from .l2q import *
from mantid.simpleapi import *
from mantid.api import WorkspaceGroup
def combineDataMulti(
wksp_list,
output_wksp,
beg_overlap,
end_overlap,
Qmin,
Qmax,
binning,
scale_high=1,
scale_factor=-1.0,
which_period=1,
keep=0,
scale_right=True,
):
"""
Function stitches multiple workspaces together. Workspaces should have an X-axis in mod Q, and the Spectrum axis as I/I0
wksp_list: A list of workspaces to stitch together
ouput_wksp: output workspace name
beg_overlap: The beginning of the overlap region. List argument, each entry matches the entry in the wksp_list
end_overlap: The end of the overlap region. List argument, each entry matches the entry in the wksp_list
Qmin: Q minimum of the final output workspace
Qmax: Q maximum of the input workspace
which_period: Which period to use if multiperiod workspaces are provided.
keep=1: keep individual workspaces in Mantid, otherwise delete wksp_list
scale_right: Scales the rhs workspace as part of stitching, if False scales the lhs workspace.
"""
# check if overlaps have correct number of entries
defaultoverlaps = False
if not isinstance(beg_overlap, list):
beg_overlap = [beg_overlap]
if not isinstance(end_overlap, list):
end_overlap = [end_overlap]
if len(wksp_list) != len(beg_overlap):
print("Using default values!")
defaultoverlaps = True
# copy first workspace into temporary wksp 'currentSum'
currentSum = CloneWorkspace(InputWorkspace=wksp_list[0])
print("Length: ", len(wksp_list), wksp_list)
for i in range(0, len(wksp_list) - 1):
w1 = currentSum
w2 = METHOD_NAME(wksp_list[i + 1])
# TODO: distinguishing between a group and a individual workspace is unnecessary for an algorithm.
# But custom group behavior WILL be required.
if defaultoverlaps:
overlapLow = w2.readX(0)[0]
overlapHigh = 0.5 * max(w1.readX(0))
else:
overlapLow = beg_overlap[i + 1]
overlapHigh = end_overlap[i]
print("Iteration", i)
currentSum, scale_factor = stitch2(
currentSum,
mtd[wksp_list[i + 1]],
currentSum.name(),
overlapLow,
overlapHigh,
Qmin,
Qmax,
binning,
scale_high,
scale_right=scale_right,
)
RenameWorkspace(InputWorkspace=currentSum.name(), OutputWorkspace=output_wksp)
# Remove any existing workspaces from the workspace list.
if not keep:
names = mtd.getObjectNames()
for ws in wksp_list:
candidate = ws
if candidate in names:
DeleteWorkspace(candidate)
return mtd[output_wksp]
def stitch2(ws1, ws2, output_ws_name, begoverlap, endoverlap, Qmin, Qmax, binning, scalehigh=True, scalefactor=-1.0, scale_right=True):
"""
Function stitches two workspaces together and returns a stitched workspace along with the scale factor
ws1: First workspace to stitch
ws2: Second workspace to stitch
output_ws_name: The name to give the outputworkspace
begoverlap: The beginning of the overlap region
endoverlap: The end of the overlap region
Qmin: Final minimum Q in the Q range
Qmax: Final maximum Q in the Q range
binning: Binning stride to use
scalehigh: if True, scale ws2, otherwise scale ws1
scalefactor: Use the manual scaling factor provided if > 0
scale_right: Scales the rhs workspace as part of stitching, if False scales the lhs workspace.
"""
if scalefactor > 0.0:
manual_scalefactor = True
else:
manual_scalefactor = False
scalefactor = 1.0
# Internally use the Stitch1D algorithm.
outputs = Stitch1D(
LHSWorkspace=ws1,
RHSWorkspace=ws2,
OutputWorkspace=output_ws_name,
StartOverlap=begoverlap,
EndOverlap=endoverlap,
UseManualScaleFactor=manual_scalefactor,
ManualScaleFactor=scalefactor,
Params="%f,%f,%f" % (Qmin, binning, Qmax),
ScaleRHSWorkspace=scale_right,
)
return outputs
def combine2(wksp1, wksp2, outputwksp, begoverlap, endoverlap, Qmin, Qmax, binning, scalehigh=True, scalefactor=-1.0, scale_right=True):
"""
Function stitches two workspaces together and returns a stitched workspace name along with the scale factor
wksp1: First workspace name to stitch
wksp2: Second workspace name to stitch
outputwksp: The name to give the outputworkspace
begoverlap: The beginning of the overlap region
endoverlap: The end of the overlap region
Qmin: Final minimum Q in the Q range
Qmax: Final maximum Q in the Q range
binning: Binning stride to use
scalehigh: if True, scale ws2, otherwise scale ws1
scalefactor: Use the manual scaling factor provided if > 0
scale_right: Scales the rhs workspace as part of stitching, if False scales the lhs workspace.
"""
if scalefactor > 0.0:
manual_scalefactor = True
else:
manual_scalefactor = False
scalefactor = 1.0
# Internally use the Stitch1D algorithm.
outputs = Stitch1D(
LHSWorkspace=mtd[wksp1],
RHSWorkspace=mtd[wksp2],
OutputWorkspace=outputwksp,
StartOverlap=begoverlap,
EndOverlap=endoverlap,
UseManualScaleFactor=manual_scalefactor,
ManualScaleFactor=scalefactor,
Params="%f,%f,%f" % (Qmin, binning, Qmax),
ScaleRHSWorkspace=scale_right,
)
outscalefactor = outputs[1]
return (outputwksp, outscalefactor)
def METHOD_NAME(wksp):
"""
Get the workspace if it is not a group workspace. If it is a group workspace, get the first period.
"""
if isinstance(mtd[wksp], WorkspaceGroup):
wout = mtd[wksp][0]
else:
wout = mtd[wksp]
return wout
| null |
5,185 |
from sympy.core.singleton import S
from sympy.printing.tableform import TableForm
from sympy.printing.latex import latex
from sympy.abc import x
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin
from sympy.testing.pytest import raises
from textwrap import dedent
def test_TableForm():
s = str(TableForm([["a", "b"], ["c", "d"], ["e", 0]],
headings="automatic"))
assert s == (
' | 1 2\n'
'-------\n'
'1 | a b\n'
'2 | c d\n'
'3 | e '
)
s = str(TableForm([["a", "b"], ["c", "d"], ["e", 0]],
headings="automatic", wipe_zeros=False))
assert s == dedent('''\
| 1 2
-------
1 | a b
2 | c d
3 | e 0''')
s = str(TableForm([[x**2, "b"], ["c", x**2], ["e", "f"]],
headings=("automatic", None)))
assert s == (
'1 | x**2 b \n'
'2 | c x**2\n'
'3 | e f '
)
s = str(TableForm([["a", "b"], ["c", "d"], ["e", "f"]],
headings=(None, "automatic")))
assert s == dedent('''\
1 2
---
a b
c d
e f''')
s = str(TableForm([[5, 7], [4, 2], [10, 3]],
headings=[["Group A", "Group B", "Group C"], ["y1", "y2"]]))
assert s == (
' | y1 y2\n'
'---------------\n'
'Group A | 5 7 \n'
'Group B | 4 2 \n'
'Group C | 10 3 '
)
raises(
ValueError,
lambda:
TableForm(
[[5, 7], [4, 2], [10, 3]],
headings=[["Group A", "Group B", "Group C"], ["y1", "y2"]],
alignments="middle")
)
s = str(TableForm([[5, 7], [4, 2], [10, 3]],
headings=[["Group A", "Group B", "Group C"], ["y1", "y2"]],
alignments="right"))
assert s == dedent('''\
| y1 y2
---------------
Group A | 5 7
Group B | 4 2
Group C | 10 3''')
# other alignment permutations
d = [[1, 100], [100, 1]]
s = TableForm(d, headings=(('xxx', 'x'), None), alignments='l')
assert str(s) == (
'xxx | 1 100\n'
' x | 100 1 '
)
s = TableForm(d, headings=(('xxx', 'x'), None), alignments='lr')
assert str(s) == dedent('''\
xxx | 1 100
x | 100 1''')
s = TableForm(d, headings=(('xxx', 'x'), None), alignments='clr')
assert str(s) == dedent('''\
xxx | 1 100
x | 100 1''')
s = TableForm(d, headings=(('xxx', 'x'), None))
assert str(s) == (
'xxx | 1 100\n'
' x | 100 1 '
)
raises(ValueError, lambda: TableForm(d, alignments='clr'))
#pad
s = str(TableForm([[None, "-", 2], [1]], pad='?'))
assert s == dedent('''\
? - 2
1 ? ?''')
def test_TableForm_latex():
s = latex(TableForm([[0, x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]],
wipe_zeros=True, headings=("automatic", "automatic")))
assert s == (
'\\begin{tabular}{r l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & & $x^{3}$ \\\\\n'
'2 & $c$ & $\\frac{1}{4}$ \\\\\n'
'3 & $\\sqrt{x}$ & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
s = latex(TableForm([[0, x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]],
wipe_zeros=True, headings=("automatic", "automatic"), alignments='l'))
assert s == (
'\\begin{tabular}{r l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & & $x^{3}$ \\\\\n'
'2 & $c$ & $\\frac{1}{4}$ \\\\\n'
'3 & $\\sqrt{x}$ & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
s = latex(TableForm([[0, x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]],
wipe_zeros=True, headings=("automatic", "automatic"), alignments='l'*3))
assert s == (
'\\begin{tabular}{l l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & & $x^{3}$ \\\\\n'
'2 & $c$ & $\\frac{1}{4}$ \\\\\n'
'3 & $\\sqrt{x}$ & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
s = latex(TableForm([["a", x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]],
headings=("automatic", "automatic")))
assert s == (
'\\begin{tabular}{r l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & $a$ & $x^{3}$ \\\\\n'
'2 & $c$ & $\\frac{1}{4}$ \\\\\n'
'3 & $\\sqrt{x}$ & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
s = latex(TableForm([["a", x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]],
formats=['(%s)', None], headings=("automatic", "automatic")))
assert s == (
'\\begin{tabular}{r l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & (a) & $x^{3}$ \\\\\n'
'2 & (c) & $\\frac{1}{4}$ \\\\\n'
'3 & (sqrt(x)) & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
def METHOD_NAME(x, i, j):
if i % 2:
return ('(%s)' if x < 0 else '%s') % x
else:
pass # use default print
s = latex(TableForm([[-1, 2], [-3, 4]],
formats=[METHOD_NAME]*2, headings=("automatic", "automatic")))
assert s == (
'\\begin{tabular}{r l l}\n'
' & 1 & 2 \\\\\n'
'\\hline\n'
'1 & -1 & 2 \\\\\n'
'2 & (-3) & 4 \\\\\n'
'\\end{tabular}'
)
s = latex(TableForm([["a", x**3], ["c", S.One/4], [sqrt(x), sin(x**2)]]))
assert s == (
'\\begin{tabular}{l l}\n'
'$a$ & $x^{3}$ \\\\\n'
'$c$ & $\\frac{1}{4}$ \\\\\n'
'$\\sqrt{x}$ & $\\sin{\\left(x^{2} \\right)}$ \\\\\n'
'\\end{tabular}'
)
| null |
5,186 |
"""
Filter field by field by applying one or several filters
on target string like account@domain@device:service
For active the Target Accurate Filter mode, user MUST type on
selector research a string form beginning by '?' character like :
"?keyword1=value1&?keyword2=value2&?keyword3=value3"
About syntax, THREE SPECIAL characters are defined :
- '?' in front of a word is considered as keyword
- '&' between two filters is considered as filter separator
- '=' between keyword and value is considered as filter key-value separator
Each wanted filter must following the "?keyword=value"
format for be applicable.
For example, the following target filter string will filter on
"account", "domain", "device" and "service" keyword fields according to
the passed values :
"?account=my_account&?domain=my_domain&?device=my_device&?service=my_service"
We can combine 2 filters or more like above but we
can also execute only one filter on target.
The following target filter strings are accepted :
"?account=my_account"
"?domain=my_domain"
"?device=my_device"
"?service=my_service"
There is no specifc order for apply filters.
The following target filter string is also valid :
"?device=my_device&?account=my_account&?domain=my_domain"
BE CAREFUL, some peculiarities are possible about syntax.
The following target filter strings is considered as valid :
"?account=&&to&to&&" (account value : "&&to&to&&")
"?domain===to=to==" (domain value : "==to=to==")
"?device=&=&to=to&=&=" (device value : "&=&to=to&=&=")
"?service=&=&=&=&" (service value : "&=&=&=&")
"?device=toto=tata&&?account=titi&" (device value : "toto=tata&",
account value : "titi&")
The last one can be very misleading because device value isn't equal
to "toto=tata&&" but "toto=tata&" because '&' is both a special character
AND a valid character for target string !!!
However, target filter string badly formatted like containing
duplicated filter keywords, missing values and others aren't
allowed and will be skipped and considered as no valid.
The following examples are forbidden :
"?account=my_account&?account=my_account"
"?&domain=my_domain"
"?my_device=my_device"
"?service="
"?account?&my_account"
"?abcd=my_domain"
"""
from typing import Optional, List, Dict
FILTER_KEYWORD_LIST = (
"account",
"domain",
"device",
"service",
)
KEYWORD_PREFIX = '?'
FILTER_SEPARATOR = '&?'
FILTER_KV_SEPARATOR = '='
class SelectorFilterMode:
NONE = 0
NORMAL = 1
ADVANCED = 2
class FilterKeywordSyntaxError(RuntimeError):
def __init__(self, keyword):
super().__init__(
self, f"unknown filter keyword with '{keyword}'")
class FilterKeywordDuplicateError(RuntimeError):
def __init__(self, keyword):
super().__init__(
self, f"duplicated filter keyword with '{keyword}'")
class ParsingError(RuntimeError):
def __init__(self, bad_string):
super().__init__(
self,
f"'keyword{FILTER_KV_SEPARATOR}value' format parsing error with '{bad_string}'")
def get_selector_filter_mode(pattern: str) -> int:
if not pattern:
return SelectorFilterMode.NONE
elif not pattern.startswith(KEYWORD_PREFIX):
return SelectorFilterMode.NORMAL
else:
return SelectorFilterMode.ADVANCED
def METHOD_NAME(filter_keyword: str) -> Optional[str]:
if filter_keyword.startswith(KEYWORD_PREFIX):
filter_keyword_without_kprefix = filter_keyword[1:]
if filter_keyword_without_kprefix in FILTER_KEYWORD_LIST:
return filter_keyword_without_kprefix
return None
def filter_patterns_splitting(filter_patterns: str) -> List[str]:
word_list = filter_patterns.split(FILTER_SEPARATOR)
return [KEYWORD_PREFIX + filter_pattern if i != 0 else filter_pattern
for i, filter_pattern in enumerate(word_list)]
def get_filter_pattern_dict(filter_patterns: str) -> Dict[str, str]:
filter_pattern_dict = {}
for filter_kv in filter_patterns_splitting(filter_patterns):
token_list = filter_kv.split(FILTER_KV_SEPARATOR, 1)
if len(token_list) != 2:
raise ParsingError(filter_kv)
filter_keyword, filter_value = token_list
if not filter_value:
raise ParsingError(filter_kv)
filter_keyword_without_kprefix = (
METHOD_NAME(filter_keyword))
if not filter_keyword_without_kprefix:
raise FilterKeywordSyntaxError(filter_keyword)
if filter_keyword_without_kprefix not in filter_pattern_dict:
filter_pattern_dict[
filter_keyword_without_kprefix] = filter_value
else:
raise FilterKeywordDuplicateError(filter_keyword)
return filter_pattern_dict
def is_filterable(filter_pattern_dict: Dict[str, str],
target_field_dict: Dict[str, str]) -> bool:
return all(kw in target_field_dict and value in target_field_dict[kw]
for kw, value in filter_pattern_dict.items())
| null |
5,187 |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/random_crop_data.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import cv2
import random
def is_poly_in_rect(poly, x, y, w, h):
poly = np.array(poly)
if poly[:, 0].min() < x or poly[:, 0].max() > x + w:
return False
if poly[:, 1].min() < y or poly[:, 1].max() > y + h:
return False
return True
def METHOD_NAME(poly, x, y, w, h):
poly = np.array(poly)
if poly[:, 0].max() < x or poly[:, 0].min() > x + w:
return True
if poly[:, 1].max() < y or poly[:, 1].min() > y + h:
return True
return False
def split_regions(axis):
regions = []
min_axis = 0
for i in range(1, axis.shape[0]):
if axis[i] != axis[i - 1] + 1:
region = axis[min_axis:i]
min_axis = i
regions.append(region)
return regions
def random_select(axis, max_size):
xx = np.random.choice(axis, size=2)
xmin = np.min(xx)
xmax = np.max(xx)
xmin = np.clip(xmin, 0, max_size - 1)
xmax = np.clip(xmax, 0, max_size - 1)
return xmin, xmax
def region_wise_random_select(regions, max_size):
selected_index = list(np.random.choice(len(regions), 2))
selected_values = []
for index in selected_index:
axis = regions[index]
xx = int(np.random.choice(axis, size=1))
selected_values.append(xx)
xmin = min(selected_values)
xmax = max(selected_values)
return xmin, xmax
def crop_area(im, text_polys, min_crop_side_ratio, max_tries):
h, w, _ = im.shape
h_array = np.zeros(h, dtype=np.int32)
w_array = np.zeros(w, dtype=np.int32)
for points in text_polys:
points = np.round(points, decimals=0).astype(np.int32)
minx = np.min(points[:, 0])
maxx = np.max(points[:, 0])
w_array[minx:maxx] = 1
miny = np.min(points[:, 1])
maxy = np.max(points[:, 1])
h_array[miny:maxy] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return 0, 0, w, h
h_regions = split_regions(h_axis)
w_regions = split_regions(w_axis)
for i in range(max_tries):
if len(w_regions) > 1:
xmin, xmax = region_wise_random_select(w_regions, w)
else:
xmin, xmax = random_select(w_axis, w)
if len(h_regions) > 1:
ymin, ymax = region_wise_random_select(h_regions, h)
else:
ymin, ymax = random_select(h_axis, h)
if xmax - xmin < min_crop_side_ratio * w or ymax - ymin < min_crop_side_ratio * h:
# area too small
continue
num_poly_in_rect = 0
for poly in text_polys:
if not METHOD_NAME(poly, xmin, ymin, xmax - xmin,
ymax - ymin):
num_poly_in_rect += 1
break
if num_poly_in_rect > 0:
return xmin, ymin, xmax - xmin, ymax - ymin
return 0, 0, w, h
class EastRandomCropData(object):
def __init__(self,
size=(640, 640),
max_tries=10,
min_crop_side_ratio=0.1,
keep_ratio=True,
**kwargs):
self.size = size
self.max_tries = max_tries
self.min_crop_side_ratio = min_crop_side_ratio
self.keep_ratio = keep_ratio
def __call__(self, data):
img = data['image']
text_polys = data['polys']
ignore_tags = data['ignore_tags']
texts = data['texts']
all_care_polys = [
text_polys[i] for i, tag in enumerate(ignore_tags) if not tag
]
# 计算crop区域
crop_x, crop_y, crop_w, crop_h = crop_area(
img, all_care_polys, self.min_crop_side_ratio, self.max_tries)
# crop 图片 保持比例填充
scale_w = self.size[0] / crop_w
scale_h = self.size[1] / crop_h
scale = min(scale_w, scale_h)
h = int(crop_h * scale)
w = int(crop_w * scale)
if self.keep_ratio:
padimg = np.zeros((self.size[1], self.size[0], img.shape[2]),
img.dtype)
padimg[:h, :w] = cv2.resize(
img[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w], (w, h))
img = padimg
else:
img = cv2.resize(
img[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w],
tuple(self.size))
# crop 文本框
text_polys_crop = []
ignore_tags_crop = []
texts_crop = []
for poly, text, tag in zip(text_polys, texts, ignore_tags):
poly = ((poly - (crop_x, crop_y)) * scale).tolist()
if not METHOD_NAME(poly, 0, 0, w, h):
text_polys_crop.append(poly)
ignore_tags_crop.append(tag)
texts_crop.append(text)
data['image'] = img
data['polys'] = np.array(text_polys_crop)
data['ignore_tags'] = ignore_tags_crop
data['texts'] = texts_crop
return data
class RandomCropImgMask(object):
def __init__(self, size, main_key, crop_keys, p=3 / 8, **kwargs):
self.size = size
self.main_key = main_key
self.crop_keys = crop_keys
self.p = p
def __call__(self, data):
image = data['image']
h, w = image.shape[0:2]
th, tw = self.size
if w == tw and h == th:
return data
mask = data[self.main_key]
if np.max(mask) > 0 and random.random() > self.p:
# make sure to crop the text region
tl = np.min(np.where(mask > 0), axis=1) - (th, tw)
tl[tl < 0] = 0
br = np.max(np.where(mask > 0), axis=1) - (th, tw)
br[br < 0] = 0
br[0] = min(br[0], h - th)
br[1] = min(br[1], w - tw)
i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0
j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0
else:
i = random.randint(0, h - th) if h - th > 0 else 0
j = random.randint(0, w - tw) if w - tw > 0 else 0
# return i, j, th, tw
for k in data:
if k in self.crop_keys:
if len(data[k].shape) == 3:
if np.argmin(data[k].shape) == 0:
img = data[k][:, i:i + th, j:j + tw]
if img.shape[1] != img.shape[2]:
a = 1
elif np.argmin(data[k].shape) == 2:
img = data[k][i:i + th, j:j + tw, :]
if img.shape[1] != img.shape[0]:
a = 1
else:
img = data[k]
else:
img = data[k][i:i + th, j:j + tw]
if img.shape[0] != img.shape[1]:
a = 1
data[k] = img
return data
| null |
5,188 |
import logging
from paradox.config import config as cfg
from paradox.event import Event, EventLevel, Notification
from paradox.interfaces import ThreadQueueInterface
from paradox.lib import ps
from paradox.lib.event_filter import (EventFilter, EventTagFilter,
LiveEventRegexpFilter)
logger = logging.getLogger("PAI").getChild(__name__)
class AbstractTextInterface(ThreadQueueInterface):
"""Interface Class using any Text interface"""
def __init__(self, alarm, event_filter: EventFilter, min_level=EventLevel.INFO):
super().__init__(alarm)
self.event_filter = event_filter
self.min_level = min_level
self.alarm = alarm
def stop(self):
super().stop()
def _run(self):
super(AbstractTextInterface, self)._run()
ps.subscribe(self.handle_panel_event, "events")
ps.subscribe(self.handle_notify, "notifications")
def send_message(self, message: str, level: EventLevel):
pass
def notification_filter(self, notification: Notification):
return notification.level >= self.min_level and notification.sender != self.name
def handle_notify(self, notification: Notification):
if self.notification_filter(notification):
self.send_message(notification.message, notification.level)
def handle_panel_event(self, event: Event):
if self.event_filter.match(event):
self.send_message(event.message, event.level)
async def METHOD_NAME(self, message_raw):
message = cfg.COMMAND_ALIAS.get(message_raw, message_raw)
tokens = message.split(" ")
if len(tokens) != 3:
m = "Invalid: {}".format(message_raw)
logger.warning(m)
return m
if self.alarm is None:
m = "No alarm registered"
logger.error(m)
return m
element_type = tokens[0].lower()
element = tokens[1]
command = self.normalize_payload(tokens[2].lower())
# Process a Zone Command
if element_type == "zone":
if not await self.alarm.control_zone(element, command):
m = "Zone command error: {}={}".format(element, command)
logger.warning(m)
return m
# Process a Partition Command
elif element_type == "partition":
if not await self.alarm.control_partition(element, command):
m = "Partition command error: {}={}".format(element, command)
logger.warning(m)
return m
# Process an Output Command
elif element_type == "output":
if not await self.alarm.control_output(element, command):
m = "Output command error: {}={}".format(element, command)
logger.warning(m)
return m
else:
m = "Invalid control element: {}".format(element)
logger.error(m)
return m
logger.info("OK: {}".format(message_raw))
return "OK"
# TODO: Remove this (to panels?)
@staticmethod
def normalize_payload(message):
message = message.strip().lower()
if message in ["true", "on", "1", "enable"]:
return "on"
elif message in ["false", "off", "0", "disable"]:
return "off"
elif message in [
"pulse",
"arm",
"disarm",
"arm_stay",
"arm_sleep",
"bypass",
"clear_bypass",
]:
return message
return None
class ConfiguredAbstractTextInterface(AbstractTextInterface):
def __init__(
self, alarm, EVENT_FILTERS, ALLOW_EVENTS, IGNORE_EVENTS, MIN_EVENT_LEVEL
):
if EVENT_FILTERS and (ALLOW_EVENTS or IGNORE_EVENTS):
raise AssertionError(
"You can not use *_EVENT_FILTERS and *_ALLOW_EVENTS+*_IGNORE_EVENTS simultaneously"
)
min_level = EventLevel.from_name(MIN_EVENT_LEVEL)
if ALLOW_EVENTS or IGNORE_EVENTS: # Use if defined, else use TAGS as default
logger.debug("Using REGEXP Filter")
event_filter = LiveEventRegexpFilter(ALLOW_EVENTS, IGNORE_EVENTS, min_level)
else:
logger.debug("Using Tag Filter")
event_filter = EventTagFilter(EVENT_FILTERS, min_level)
super().__init__(alarm, event_filter=event_filter, min_level=min_level)
| null |
5,189 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import SimpleShapeDiscusInelastic, Load, ConvertUnits, DeleteWorkspace
import unittest
class SimpleShapeDiscusInelasticTest(unittest.TestCase):
@classmethod
def setUpClass(self):
red_ws = Load("irs26176_graphite002_red.nxs")
red_ws.run().addProperty("deltaE-mode", "Indirect", True)
red_ws.run().addProperty("Ei", 1.845, True)
self._red_ws = red_ws
sqw_ws = Load("iris26176_graphite002_sqw.nxs")
self._sqw_ws = sqw_ws
self._arguments = {
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"NumberScatterings": 2,
}
self._annulus_arguments = self._arguments.copy()
self._annulus_arguments.update({"Shape": "Annulus", "SampleOuterRadius": 2.0})
@classmethod
def tearDownClass(self):
DeleteWorkspace(self._red_ws)
DeleteWorkspace(self._sqw_ws)
def _test_corrections_workspace(self, corr_ws_grp):
number_ws = corr_ws_grp.getNumberOfEntries()
# Scatter_1, Scatter_1_NoAbs, Scatter_2, Scatter_1_2_Summed, Scatter_2_2_Summed
# Scatter_1_Integrated, Scatter_2_Integrated, Ratio x 2
self.assertEqual(number_ws, 9)
for i in range(number_ws):
x_unit = corr_ws_grp[i].getAxis(0).getUnit().unitID()
y_unit = corr_ws_grp[i].YUnitLabel()
blocksize = corr_ws_grp[i].blocksize()
if corr_ws_grp[i].name().endswith("Integrated"):
self.assertEqual(blocksize, 1)
else:
self.assertEqual(blocksize, 1905)
self.assertEqual(x_unit, "DeltaE")
self.assertEqual(y_unit, "Scattered Weight")
num_hists = corr_ws_grp[i].getNumberHistograms()
self.assertEqual(num_hists, 10)
def test_flat_plate(self):
# Test flat plate shape
kwargs = self._arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, Shape="FlatPlate", Width=2.0, Thickness=2.0, **kwargs
)
self._test_corrections_workspace(results)
def test_cylinder(self):
# Test cylinder shape
kwargs = self._arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, Shape="Cylinder", SampleRadius=2.0, **kwargs
)
self._test_corrections_workspace(results)
def test_annulus(self):
# Test annulus shape
kwargs = self._annulus_arguments
results = SimpleShapeDiscusInelastic(ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, SampleInnerRadius=1.0, **kwargs)
self._test_corrections_workspace(results)
def test_annulus_with_container(self):
kwargs = self._annulus_arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws,
SqwWorkspace=self._sqw_ws,
SampleInnerRadius=1.0,
CanInnerRadius=0.9,
CanOuterRadius=2.1,
Container=True,
**kwargs,
)
self._test_corrections_workspace(results)
# ------------------------------------- Failure Cases --------------------
def test_no_chemical_formula_or_cross_sections_causes_an_error(self):
kwargs = {
"ReducedWorkspace": self._red_ws,
"SqwWorkspace": self._sqw_ws,
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"Shape": "FlatPlate",
"Width": 1.4,
"Thickness": 2.1,
}
with self.assertRaisesRegex(RuntimeError, "Please enter a chemical formula."):
SimpleShapeDiscusInelastic(**kwargs)
def test_flat_plate_no_params(self):
# If the shape is flat plate but the relevant parameters haven't been entered this should throw
# relevant params are Height, Width, Thickness
params = ["Height", "Width", "Thickness"]
for param in params:
kwargs = {
"ReducedWorkspace": self._red_ws,
"SqwWorkspace": self._sqw_ws,
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
param: 0,
"Shape": "FlatPlate",
}
with self.assertRaisesRegex(RuntimeError, f"Please enter a non-zero number for {param.lower()}"):
SimpleShapeDiscusInelastic(**kwargs)
def METHOD_NAME(self):
red_ws_not_deltaE = Load("irs26176_graphite002_red.nxs")
red_ws_not_deltaE = ConvertUnits(InputWorkspace=self._red_ws, Target="Wavelength", EMode="Indirect", EFixed=1.845)
kwargs = {
"ReducedWorkspace": red_ws_not_deltaE,
"SqwWorkspace": self._sqw_ws,
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"Shape": "FlatPlate",
"Width": 1.4,
"Thickness": 2.1,
}
with self.assertRaisesRegex(RuntimeError, "Input workspace must have units of DeltaE for inelastic instrument"):
SimpleShapeDiscusInelastic(**kwargs)
DeleteWorkspace(red_ws_not_deltaE)
if __name__ == "__main__":
unittest.main()
| null |
5,190 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio as audio
from mindspore import log as logger
def gen(shape):
np.random.seed(0)
data = np.random.random(shape)
yield (np.array(data, dtype=np.float32),)
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, \
"\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}". \
format(data_expected[greater], data_me[greater], error[greater])
def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
if np.any(np.isnan(data_expected)):
assert np.allclose(data_me, data_expected, rtol, atol, equal_nan=equal_nan)
elif not np.allclose(data_me, data_expected, rtol, atol, equal_nan=equal_nan):
count_unequal_element(data_expected, data_me, rtol, atol)
def test_phase_vocoder_compare():
"""
Feature: PhaseVocoder
Description: Mindspore eager mode checking precision
Expectation: The returned result is as expected
"""
indata_0 = np.array([[[[0.43189, 2.3049924],
[-0.01202229, 0.9176453],
[-0.6258611, 0.66475236],
[0.13541847, 1.2829605],
[0.9725325, 1.1669061]],
[[-0.35001752, -1.0989336],
[-1.4930767, 0.86829656],
[0.3355314, -0.41216415],
[-1.1828239, 1.0075365],
[-0.19343425, 0.38364533]]]]).astype('float32')
indata_1 = np.array([[[[0.43189, 2.3049924],
[-0.01202229, 0.9176453],
[-0.6258611, 0.66475236],
[0.13541847, 1.2829605],
[0.9725325, 1.1669061]],
[[-0.35001752, -1.0989336],
[-1.4930767, 0.86829656],
[0.3355314, -0.41216415],
[-1.1828239, 1.0075365],
[-0.19343425, 0.38364533]]]]).astype('float64')
rate = 2.
phase_advance_0 = np.array([[0.0000], [3.9270]]).astype('float32')
op_0 = audio.PhaseVocoder(rate, phase_advance_0)
phase_advance_1 = np.array([[0.0000], [3.9270]]).astype('float64')
op_1 = audio.PhaseVocoder(rate, phase_advance_1)
outdata_0 = op_0(indata_0)
outdata_1 = op_1(indata_1)
stand_outdata = np.array([[[[0.43189007, 2.3049924],
[-0.01196056, 0.9129374],
[1.1385509, 1.00558]],
[[-0.35001755, -1.0989336],
[-0.4594292, 0.26718047],
[0.404371, -0.14520557]]]]).astype('float32')
allclose_nparray(outdata_0, stand_outdata, 0.0001, 0.0001)
allclose_nparray(outdata_1, stand_outdata, 0.0001, 0.0001)
def test_phase_vocoder_eager():
"""
Feature: PhaseVocoder
Description: Mindspore eager mode with normal testcase
Expectation: The returned result is as expected
"""
logger.info("test PhaseVocoder op in eager mode")
stft = next(gen([10, 10, 10, 2]))[0]
out_put = audio.PhaseVocoder(1.3, np.random.randn(10, 1).astype('float32'))(stft)
assert out_put.shape == (10, 10, 8, 2)
def test_phase_vocoder_pipeline():
"""
Feature: PhaseVocoder
Description: Mindspore pipeline mode with normal testcase
Expectation: The returned result is as expected
"""
logger.info("test PhaseVocoder op in pipeline mode")
generator = gen([32, 33, 333, 2])
data1 = ds.GeneratorDataset(source=generator, column_names=["input"])
transforms = [audio.PhaseVocoder(0.8, np.random.randn(33, 1).astype('float32'))]
data1 = data1.map(operations=transforms, input_columns=["input"])
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
out_put = item["input"]
assert out_put.shape == (32, 33, 417, 2)
def test_phase_vocoder_invalid_input():
"""
Feature: PhaseVocoder
Description: Mindspore eager mode with invalid input
Expectation: The returned result is as expected
"""
def METHOD_NAME(test_name, rate, phase_advance, error, error_msg):
logger.info("Test PhaseVocoder with wrong params: {0}".format(test_name))
with pytest.raises(error) as error_info:
_ = audio.PhaseVocoder(rate, phase_advance)
assert error_msg in str(error_info.value)
def test_invalid_input(test_name, spec, rate, phase_advance, error, error_msg):
logger.info("Test PhaseVocoder with wrong params: {0}".format(test_name))
with pytest.raises(error) as error_info:
_ = audio.PhaseVocoder(rate, phase_advance)(spec)
assert error_msg in str(error_info.value)
METHOD_NAME("invalid phase_advance", 2, None, TypeError,
"Argument phase_advance with value None is not of type")
METHOD_NAME("invalid phase_advance", 0, np.random.randn(4, 1), ValueError,
"Input rate is not within the required interval of (0, 16777216].")
spec = next(gen([1, 2, 2]))[0]
test_invalid_input("invalid phase_advance", spec, 1.23, np.random.randn(4), RuntimeError,
"PhaseVocoder: invalid parameter, 'phase_advance' should be in shape of <freq, 1>.")
test_invalid_input("invalid phase_advance", spec, 1.1, np.random.randn(4, 4, 1), RuntimeError,
"PhaseVocoder: invalid parameter, 'phase_advance' should be in shape of <freq, 1>.")
test_invalid_input("invalid input tensor", spec, 2, np.random.randn(3, 1), RuntimeError,
"PhaseVocoder: invalid parameter, 'first dimension of 'phase_advance'' should be equal")
input_tensor = np.random.randn(4, 4, 2).astype('float32')
input_phase_advance = np.random.randn(4, 1).astype('float64')
test_invalid_input("invalid input tensor", input_tensor, 2, input_phase_advance, RuntimeError,
"PhaseVocoder: invalid parameter, data type of phase_advance should be equal to data")
if __name__ == "__main__":
test_phase_vocoder_compare()
test_phase_vocoder_eager()
test_phase_vocoder_pipeline()
test_phase_vocoder_invalid_input()
| null |
5,191 |
# SPDX-FileCopyrightText: Copyright DB Netz AG and the capellambse contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=redefined-outer-name
import pathlib
import shutil
import tempfile
import pytest
from capellambse import loader, pvmt
TEST_ROOT = pathlib.Path(__file__).parent / "data" / "pvmt"
MODEL_FILE = "PVMTTest.aird"
@pytest.fixture
def model():
global TEST_ROOT
orig_test_root = TEST_ROOT
with tempfile.TemporaryDirectory() as tempdir:
TEST_ROOT = pathlib.Path(tempdir, "model")
shutil.copytree(orig_test_root, TEST_ROOT)
yield loader.MelodyLoader(TEST_ROOT / MODEL_FILE)
TEST_ROOT = orig_test_root
@pytest.fixture
def pvext(model):
return pvmt.load_pvmt_from_model(model)
class TestPVMTBase:
"""Tests for basic PVMT functionality."""
domain_uuid = "02e0c435-f085-471f-9f6e-e12fe5f27687"
domain_name = "Computer"
enum_names = {
"03e5e5ae-0a61-473d-a792-e003ce601ff4": "Cable Type",
"ae279311-6a75-40b7-9bf3-a19c136d0573": "ComponentType",
}
enum_values = {
"03e5e5ae-0a61-473d-a792-e003ce601ff4": ["UNSET", "Trace", "Cable"],
"ae279311-6a75-40b7-9bf3-a19c136d0573": [
"UNSET",
"Core Component",
"External",
],
}
def test_domains(self, pvext):
assert {k: v.name for k, v in pvext.items()} == {
self.domain_uuid: self.domain_name,
"12a02f1b-8b97-4188-95ea-2afd377c4c41": "Out of scope",
"ae15f861-2f15-40d2-a1b9-57dfdf7fabae": "In scope",
"c2d3e3a3-f9bb-4ff7-964d-2109c7065bc2": "External Data",
}
def METHOD_NAME(self, pvext):
model_enums = pvext[self.domain_uuid].enums
assert {k: v.name for k, v in model_enums.items()} == self.enum_names
for enum_uuid, vals in self.enum_values.items():
actual_vals = set(
v["name"] for _, v in model_enums[enum_uuid].items()
)
assert actual_vals == set(vals)
def test_enum_defaults(self, pvext):
group = "0fb1fbfb-43a6-4f96-9ec3-2cb83d80c702"
pv_id = "f3f9b7ec-f00d-4b2f-8a48-900a22c43853"
epv = pvext[self.domain_uuid][group][pv_id]
assert epv.default_value["name"] == "UNSET"
def test_groups(self, pvext):
model_groups = pvext[self.domain_uuid].groups
assert set(g.name for g in model_groups) == {
"Components",
"Cables",
"Physical Cables",
}
def test_int(self, pvext):
group = "cdc24b8b-3c4c-4513-9aad-99c0f7d884dc"
pv_id = "7130121b-0242-49b7-885c-44b868c4d7b7"
ipv = pvext[self.domain_uuid][group][pv_id]
assert ipv.name == "Price"
assert ipv.unit == "€"
def test_apply_outofscope(self, model, pvext):
elem = model["d32caffc-b9a1-448e-8e96-65a36ba06292"]
domain = pvext["12a02f1b-8b97-4188-95ea-2afd377c4c41"]
for group in domain.groups:
with pytest.raises(pvmt.ScopeError):
pvext.get_element_pv(
elem, f"{domain.name}.{group.name}", create=True
)
def test_apply_inscope(self, model, pvext):
elem = model["d32caffc-b9a1-448e-8e96-65a36ba06292"]
domain = pvext["ae15f861-2f15-40d2-a1b9-57dfdf7fabae"]
for group in domain.groups:
pvext.get_element_pv(
elem, f"{domain.name}.{group.name}", create=True
)
class TestAppliedPropertyValueGroup:
"""Tests for all methods of the ``AppliedPropertyValueGroup`` object."""
elem_uuid = "d32caffc-b9a1-448e-8e96-65a36ba06292"
pvg_name = "Computer.Physical Cables"
def test___iter__(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
keys = set(elem_pv)
assert keys == {"In Stock", "Label", "Plugs per side", "Price"}
def test___len__(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
assert len(elem_pv) == 4
def test___getitem__(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
assert elem_pv["In Stock"] is True
assert elem_pv["Label"] == "DisplayPort_1"
assert elem_pv["Plugs per side"] == 1
assert elem_pv["Price"] == 14.99
with pytest.raises(KeyError):
elem_pv["price"] # pylint: disable=pointless-statement
def test___setitem___modify(self, model, pvext):
elem = model[self.elem_uuid]
elem_pv = pvext.get_element_pv(elem, self.pvg_name)
pv_cable = pvext.get_element_pv(elem, "Computer.Cables")
pv_cable["CableType"] = "Trace"
elem_pv["Price"] = 10
elem_pv["In Stock"] = False
assert pv_cable["CableType"] == "Trace"
assert elem_pv["Price"] == 10.0
assert elem_pv["In Stock"] is False
with pytest.raises(KeyError):
elem_pv["price"] = 10.0
with pytest.raises(ValueError):
elem_pv["Price"] = "Not a float"
def test___delitem_____setitem__(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
del elem_pv["Price"]
assert "Price" not in elem_pv
with pytest.raises(KeyError):
del elem_pv["Price"]
with pytest.raises(KeyError):
del elem_pv["nonexistent_key"]
with pytest.raises(KeyError):
elem_pv["price"] = 10.0
elem_pv["Price"] = 25.0
assert "Price" in elem_pv
assert elem_pv["Price"] == 25.0
def test___contains__(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
assert "Price" in elem_pv
assert "In Stock" in elem_pv
assert "price" not in elem_pv
assert "InStock" not in elem_pv
def test_copy(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
copy = elem_pv.copy()
expected = {
"In Stock": True,
"Label": "DisplayPort_1",
"Plugs per side": 1,
"Price": 14.99,
}
assert expected == copy
assert elem_pv is not copy
def test_get(self, model, pvext):
elem_pv = pvext.get_element_pv(model[self.elem_uuid], self.pvg_name)
assert elem_pv.get("Price") == 14.99
assert elem_pv.get("In Stock") is True
sentinel = object()
assert elem_pv.get("price", sentinel) is sentinel
class TestAppliedPropertyValueGroupXML:
"""Tests that rely on writing back and comparing the output XML."""
expect_root = TEST_ROOT / "expected-output"
elem_uuid = "d32caffc-b9a1-448e-8e96-65a36ba06292"
def compare_xml(self, model, expected_file):
pvmt_model_path = (TEST_ROOT / MODEL_FILE).with_suffix(".capella")
expected_model_path = self.expect_root / expected_file
model.save()
actual = pvmt_model_path.read_text(encoding="utf-8")
expected = expected_model_path.read_text(encoding="utf-8")
assert actual == expected
def test_apply(self, model, pvext, monkeypatch):
call_count = 0
def mock_generate_uuid(*__, **_):
nonlocal call_count
call_count += 1
return f"00000000-0000-0000-0000-{call_count:012x}"
monkeypatch.setattr(
"capellambse.loader.MelodyLoader.generate_uuid",
mock_generate_uuid,
)
elem = model[self.elem_uuid]
obj_ids = pvext.get_element_pv(
elem, "External Data.Object IDs", create=True
)
# By this point, the group and its child must both already
# exist, each with their own UUID.
assert call_count == 2
obj_ids["Object ID"] = "CABLE-0001"
self.compare_xml(model, "apply.capella")
| null |
5,192 |
from ctypes import *
import array
import gc
import unittest
class X(Structure):
_fields_ = [("c_int", c_int)]
init_called = False
def __init__(self):
self._init_called = True
class Test(unittest.TestCase):
def test_from_buffer(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer(a)
y = X.from_buffer(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], a.tolist())
a[0], a[-1] = 200, -200
self.assertEqual(x[:], a.tolist())
self.assertRaises(BufferError, a.append, 100)
self.assertRaises(BufferError, a.pop)
del x; del y; gc.collect(); gc.collect(); gc.collect()
a.append(100)
a.pop()
x = (c_int * 16).from_buffer(a)
self.assertIn(a, [obj.obj if isinstance(obj, memoryview) else obj
for obj in x._objects.values()])
expected = x[:]
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], expected)
with self.assertRaisesRegex(TypeError, "not writable"):
(c_char * 16).from_buffer(b"a" * 16)
with self.assertRaisesRegex(TypeError, "not writable"):
(c_char * 16).from_buffer(memoryview(b"a" * 16))
with self.assertRaisesRegex(TypeError, "not C contiguous"):
(c_char * 16).from_buffer(memoryview(bytearray(b"a" * 16))[::-1])
msg = "bytes-like object is required"
with self.assertRaisesRegex(TypeError, msg):
(c_char * 16).from_buffer("a" * 16)
def test_fortran_contiguous(self):
try:
import _testbuffer
except ImportError as err:
self.skipTest(str(err))
flags = _testbuffer.ND_WRITABLE | _testbuffer.ND_FORTRAN
array = _testbuffer.ndarray(
[97] * 16, format="B", shape=[4, 4], flags=flags)
with self.assertRaisesRegex(TypeError, "not C contiguous"):
(c_char * 16).from_buffer(array)
array = memoryview(array)
self.assertTrue(array.f_contiguous)
self.assertFalse(array.c_contiguous)
with self.assertRaisesRegex(TypeError, "not C contiguous"):
(c_char * 16).from_buffer(array)
def METHOD_NAME(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
with self.assertRaises(ValueError):
c_int.from_buffer(a, -1)
with self.assertRaises(ValueError):
(c_int * 16).from_buffer(a, sizeof(c_int))
with self.assertRaises(ValueError):
(c_int * 1).from_buffer(a, 16 * sizeof(c_int))
def test_from_buffer_memoryview(self):
a = [c_char.from_buffer(memoryview(bytearray(b'a')))]
a.append(a)
del a
gc.collect() # Should not crash
def test_from_buffer_copy(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer_copy(a)
y = X.from_buffer_copy(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], list(range(16)))
a[0], a[-1] = 200, -200
self.assertEqual(x[:], list(range(16)))
a.append(100)
self.assertEqual(x[:], list(range(16)))
self.assertEqual(x._objects, None)
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], list(range(16)))
x = (c_char * 16).from_buffer_copy(b"a" * 16)
self.assertEqual(x[:], b"a" * 16)
with self.assertRaises(TypeError):
(c_char * 16).from_buffer_copy("a" * 16)
def test_from_buffer_copy_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer_copy(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
with self.assertRaises(ValueError):
c_int.from_buffer_copy(a, -1)
with self.assertRaises(ValueError):
(c_int * 16).from_buffer_copy(a, sizeof(c_int))
with self.assertRaises(ValueError):
(c_int * 1).from_buffer_copy(a, 16 * sizeof(c_int))
def test_abstract(self):
from ctypes import _Pointer, _SimpleCData, _CFuncPtr
self.assertRaises(TypeError, Array.from_buffer, bytearray(10))
self.assertRaises(TypeError, Structure.from_buffer, bytearray(10))
self.assertRaises(TypeError, Union.from_buffer, bytearray(10))
self.assertRaises(TypeError, _CFuncPtr.from_buffer, bytearray(10))
self.assertRaises(TypeError, _Pointer.from_buffer, bytearray(10))
self.assertRaises(TypeError, _SimpleCData.from_buffer, bytearray(10))
self.assertRaises(TypeError, Array.from_buffer_copy, b"123")
self.assertRaises(TypeError, Structure.from_buffer_copy, b"123")
self.assertRaises(TypeError, Union.from_buffer_copy, b"123")
self.assertRaises(TypeError, _CFuncPtr.from_buffer_copy, b"123")
self.assertRaises(TypeError, _Pointer.from_buffer_copy, b"123")
self.assertRaises(TypeError, _SimpleCData.from_buffer_copy, b"123")
if __name__ == '__main__':
unittest.main()
| null |
5,193 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task class and related functionality.
Task instructs the work to be peformed. A task is typically generated by the
core task generation loop based on the state of MLMD db.
"""
import abc
import enum
from typing import Dict, Hashable, List, Optional, Type, TypeVar
import attr
from tfx import types
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
@attr.s(auto_attribs=True, frozen=True)
class PipelineUid:
"""Uniquely identifies a pipeline among pipelines being actively orchestrated.
Recommended to use `from_pipeline` or `from_pipeline_id_and_run_id` class
methods to create `PipelineUid` objects as they correctly account for
concurrent pipeline runs mode.
Attributes:
pipeline_id: Id of the pipeline containing the node. Corresponds to
`Pipeline.pipeline_info.id` in the pipeline IR.
pipeline_run_id: Run identifier for the pipeline if one is provided.
"""
pipeline_id: str
pipeline_run_id: Optional[str] = None
@classmethod
def from_pipeline(cls: Type['PipelineUid'],
pipeline: pipeline_pb2.Pipeline) -> 'PipelineUid':
"""Creates a PipelineUid object given a pipeline IR."""
if (env.get_env().concurrent_pipeline_runs_enabled() and
pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC):
pipeline_run_id = pipeline.runtime_spec.pipeline_run_id.field_value.string_value
if not pipeline_run_id:
raise ValueError(
'pipeline_run_id unexpectedly missing for a sync pipeline.')
else:
pipeline_run_id = None
return cls(
pipeline_id=pipeline.pipeline_info.id, pipeline_run_id=pipeline_run_id)
@classmethod
def METHOD_NAME(
cls: Type['PipelineUid'], pipeline_id: str,
pipeline_run_id: Optional[str]) -> 'PipelineUid':
# If concurrent runs are not enabled, pipeline_run_id is not part of the
# PipelineUid.
if env.get_env().concurrent_pipeline_runs_enabled():
return cls(
pipeline_id=pipeline_id, pipeline_run_id=pipeline_run_id or None)
return cls(pipeline_id=pipeline_id)
@attr.s(auto_attribs=True, frozen=True)
class NodeUid:
"""Uniquely identifies a node across all pipelines being actively orchestrated.
Attributes:
pipeline_uid: The pipeline UID.
node_id: Node id. Corresponds to `PipelineNode.node_info.id` in the pipeline
IR.
"""
pipeline_uid: PipelineUid
node_id: str
@classmethod
def from_node(cls: Type['NodeUid'], pipeline: pipeline_pb2.Pipeline,
node: node_proto_view.NodeProtoView) -> 'NodeUid':
return cls(
pipeline_uid=PipelineUid.from_pipeline(pipeline),
node_id=node.node_info.id)
# Task id can be any hashable type.
TaskId = TypeVar('TaskId', bound=Hashable)
_TaskT = TypeVar('_TaskT', bound='Task')
class Task(abc.ABC):
"""Task instructs the work to be performed."""
@property
@abc.abstractmethod
def task_id(self) -> TaskId:
"""Returns a unique identifier for this task.
The concrete implementation must ensure that the returned task id is unique
across all task types.
"""
@classmethod
def task_type_id(cls: Type[_TaskT]) -> str:
"""Returns task type id."""
return cls.__name__
class CancelTask(Task):
"""Base class for cancellation task types."""
pass
@enum.unique
class NodeCancelType(enum.Enum):
# The node is being cancelled with no intention to reuse the same execution.
CANCEL_EXEC = 1
# The node is being paused with the intention of resuming the same execution
# after restart.
PAUSE_EXEC = 2
@attr.s(auto_attribs=True, frozen=True)
class ExecNodeTask(Task):
"""Task to instruct execution of a node in the pipeline.
Attributes:
node_uid: Uid of the node to be executed.
execution_id: Id of the MLMD execution associated with the current node.
contexts: List of contexts associated with the execution.
exec_properties: Execution properties of the execution.
input_artifacts: Input artifacts dict.
output_artifacts: Output artifacts dict.
executor_output_uri: URI for the executor output.
stateful_working_dir: Working directory for the node execution.
tmp_dir: Temporary directory for the node execution.
pipeline: The pipeline IR proto containing the node to be executed.
cancel_type: Indicates whether this is a cancelled execution, and the type
of the cancellation. The task scheduler is expected to gracefully exit
after doing any necessary cleanup.
"""
node_uid: NodeUid
execution_id: int
contexts: List[metadata_store_pb2.Context]
exec_properties: Dict[str, types.ExecPropertyTypes]
input_artifacts: Dict[str, List[types.Artifact]]
output_artifacts: Dict[str, List[types.Artifact]]
executor_output_uri: str
stateful_working_dir: str
tmp_dir: str
pipeline: pipeline_pb2.Pipeline
cancel_type: Optional[NodeCancelType] = None
@property
def task_id(self) -> TaskId:
return _exec_node_task_id(self.task_type_id(), self.node_uid)
def get_node(self) -> node_proto_view.NodeProtoView:
for pipeline_or_node in self.pipeline.nodes:
view = node_proto_view.get_view(pipeline_or_node)
if view.node_info.id == self.node_uid.node_id:
return view
raise ValueError(
f'Node not found in pipeline IR; node uid: {self.node_uid}')
@attr.s(auto_attribs=True, frozen=True)
class CancelNodeTask(CancelTask):
"""Task to instruct cancellation of an ongoing node execution.
Attributes:
node_uid: Uid of the node to be cancelled.
cancel_type: Indicates the type of this cancellation.
"""
node_uid: NodeUid
cancel_type: NodeCancelType = NodeCancelType.CANCEL_EXEC
@property
def task_id(self) -> TaskId:
return (self.task_type_id(), self.node_uid)
@attr.s(auto_attribs=True, frozen=True)
class FinalizePipelineTask(Task):
"""Task to instruct finalizing a pipeline run."""
pipeline_uid: PipelineUid
status: status_lib.Status
@property
def task_id(self) -> TaskId:
return (self.task_type_id(), self.pipeline_uid)
@attr.s(auto_attribs=True, frozen=True)
class UpdateNodeStateTask(Task):
"""Task to instruct updating node states.
This is useful for task generators to defer actually updating node states in
MLMD to the caller, where node state updates can be bundled together with
other pipeline state changes and committed to MLMD in a single transaciton for
efficiency.
"""
node_uid: NodeUid
state: str
status: Optional[status_lib.Status] = None
backfill_token: str = ''
@property
def task_id(self) -> TaskId:
return (self.task_type_id(), self.node_uid)
def exec_node_task_id_from_node(pipeline: pipeline_pb2.Pipeline,
node: node_proto_view.NodeProtoView) -> TaskId:
"""Returns task id of an `ExecNodeTask` from pipeline and node."""
return _exec_node_task_id(ExecNodeTask.task_type_id(),
NodeUid.from_node(pipeline, node))
def _exec_node_task_id(task_type_id: str, node_uid: NodeUid) -> TaskId:
return (task_type_id, node_uid)
| null |
5,194 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name,no-name-in-module,too-many-public-methods
from qtpy import QtCore, QtGui, QtWidgets
import sys
import mantid
from mantidqtinterfaces.DGSPlanner.ValidateOL import ValidateUB
from mantidqtinterfaces.DGSPlanner.LoadNexusUB import LoadNexusUB
try:
from qtpy.QtCore import QString
except ImportError:
QString = type("")
class UBTableModel(QtCore.QAbstractTableModel):
changed = QtCore.Signal(mantid.geometry.OrientedLattice)
def __init__(self, lattice, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.__lattice = lattice
self.__UB = self.__lattice.getUB().copy()
self.sendSignal()
def rowCount(self, dummy_parent):
return 3
def columnCount(self, dummy_parent):
return 3
def flags(self, dummy_index):
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def data(self, index, role):
if role == QtCore.Qt.EditRole:
row = index.row()
column = index.column()
return QString(format(self.__UB[row][column], ".4f"))
elif role == QtCore.Qt.DisplayRole:
row = index.row()
column = index.column()
value = QString(format(self.__UB[row][column], ".4f"))
return value
elif role == QtCore.Qt.BackgroundRole:
if ValidateUB(self.__UB):
return QtGui.QBrush(QtCore.Qt.white)
else:
return QtGui.QBrush(QtCore.Qt.red)
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
row = index.row()
column = index.column()
try:
val = float(value)
except ValueError:
return False
self.__UB[row][column] = val
self.dataChanged.emit(index, index)
if ValidateUB(self.__UB):
self.__lattice.setUB(self.__UB)
self.sendSignal()
return True
return False
def sendSignal(self):
self.changed.emit(self.__lattice)
def updateOL(self, ol):
self.beginResetModel()
self.__lattice = ol
self.__UB = self.__lattice.getUB().copy()
self.endResetModel()
class MatrixUBInputWidget(QtWidgets.QWidget):
# pylint: disable=too-few-public-methods
def __init__(self, ol, parent=None):
# pylint: disable=unused-argument,super-on-old-class
super(MatrixUBInputWidget, self).__init__(parent)
self.setLayout(QtWidgets.QVBoxLayout())
self._tableView = QtWidgets.QTableView(self)
self._tableView.horizontalHeader().hide()
self._tableView.verticalHeader().hide()
self._tableView.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self._tableView.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.LoadIsawUBButton = QtWidgets.QPushButton("LoadIsawUB")
self.LoadNexusUBButton = QtWidgets.QPushButton("LoadNexusUB")
self.layout().addWidget(QtWidgets.QLabel("UB matrix"))
self.layout().addWidget(self._tableView)
self.hbox = QtWidgets.QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(self.LoadIsawUBButton)
self.hbox.addWidget(self.LoadNexusUBButton)
self.layout().addLayout(self.hbox)
self.ol = ol
self.UBmodel = UBTableModel(self.ol, self)
self._tableView.setModel(self.UBmodel)
self._tableView.update()
self._tableView.setMinimumSize(self._tableView.sizeHintForColumn(0) * 6, self._tableView.sizeHintForRow(0) * 4)
self._tableView.setMaximumSize(self._tableView.sizeHintForColumn(0) * 6, self._tableView.sizeHintForRow(0) * 4)
self.LoadIsawUBButton.clicked.connect(self.loadIsawUBDialog)
self.LoadNexusUBButton.clicked.connect(self.METHOD_NAME)
self.layout().addStretch(1)
def loadIsawUBDialog(self):
# pylint: disable=bare-except
try:
fname = QtWidgets.QFileDialog.getOpenFileName(self, "Open ISAW UB file", filter=QString("Mat file (*.mat);;All Files (*)"))
if isinstance(fname, tuple):
fname = fname[0]
if not fname:
return
__tempws = mantid.simpleapi.CreateSingleValuedWorkspace(0.0)
mantid.simpleapi.LoadIsawUB(__tempws, str(fname))
ol = mantid.geometry.OrientedLattice(__tempws.sample().getOrientedLattice())
ol.setU(__tempws.sample().getOrientedLattice().getU())
self.UBmodel.updateOL(ol)
self.UBmodel.sendSignal()
mantid.simpleapi.DeleteWorkspace(__tempws)
except Exception as e:
mantid.logger.error("Could not open the file, or not a valid UB matrix: {}".format(e))
def METHOD_NAME(self):
# pylint: disable=bare-except
try:
fname = QtWidgets.QFileDialog.getOpenFileName(
self, "Open Nexus file to extract UB matrix", filter=QString("Nexus file (*.nxs.h5);;All Files (*)")
)
if isinstance(fname, tuple):
fname = fname[0]
if not fname:
return
__tempUB = LoadNexusUB(str(fname))
ol = mantid.geometry.OrientedLattice()
ol.setUB(__tempUB)
self.UBmodel.updateOL(ol)
self.UBmodel.sendSignal()
except Exception as e:
mantid.logger.error("Could not open the Nexus file, or could not find UB matrix: {}".format(e))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
inputol = mantid.geometry.OrientedLattice(2, 3, 4, 90, 90, 90)
mainForm = MatrixUBInputWidget(inputol)
mainForm.show()
sys.exit(app.exec_())
| null |
5,195 |
from __future__ import annotations
import pathlib
from typing import List, Any, Optional, TYPE_CHECKING
from ..locale import list_timezones, list_keyboard_languages
from ..menu import MenuSelectionType, Menu, TextInput
from ..models.audio_configuration import Audio, AudioConfiguration
from ..output import warn
from ..packages.packages import validate_package_list
from ..storage import storage
from ..translationhandler import Language
if TYPE_CHECKING:
_: Any
def ask_ntp(preset: bool = True) -> bool:
prompt = str(_('Would you like to use automatic time synchronization (NTP) with the default time servers?\n'))
prompt += str(_('Hardware time and other post-configuration steps might be required in order for NTP to work.\nFor more information, please check the Arch wiki'))
if preset:
preset_val = Menu.yes()
else:
preset_val = Menu.no()
choice = Menu(prompt, Menu.yes_no(), skip=False, preset_values=preset_val, default_option=Menu.yes()).run()
return False if choice.value == Menu.no() else True
def ask_hostname(preset: str = '') -> str:
while True:
hostname = TextInput(
str(_('Desired hostname for the installation: ')),
preset
).run().strip()
if hostname:
return hostname
def METHOD_NAME(preset: Optional[str] = None) -> Optional[str]:
timezones = list_timezones()
default = 'UTC'
choice = Menu(
_('Select a timezone'),
list(timezones),
preset_values=preset,
default_option=default
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return choice.single_value
return None
def ask_for_audio_selection(
current: Optional[AudioConfiguration] = None
) -> Optional[AudioConfiguration]:
choices = [
Audio.Pipewire.name,
Audio.Pulseaudio.name,
Audio.no_audio_text()
]
preset = current.audio.name if current else None
choice = Menu(
_('Choose an audio server'),
choices,
preset_values=preset
).run()
match choice.type_:
case MenuSelectionType.Skip: return current
case MenuSelectionType.Selection:
value = choice.single_value
if value == Audio.no_audio_text():
return None
else:
return AudioConfiguration(Audio[value])
return None
def select_language(preset: Optional[str] = None) -> Optional[str]:
"""
Asks the user to select a language
Usually this is combined with :ref:`archinstall.list_keyboard_languages`.
:return: The language/dictionary key of the selected language
:rtype: str
"""
kb_lang = list_keyboard_languages()
# sort alphabetically and then by length
sorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)
choice = Menu(
_('Select keyboard layout'),
sorted_kb_lang,
preset_values=preset,
sort=False
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return choice.single_value
return None
def select_archinstall_language(languages: List[Language], preset: Language) -> Language:
# these are the displayed language names which can either be
# the english name of a language or, if present, the
# name of the language in its own language
options = {lang.display_name: lang for lang in languages}
title = 'NOTE: If a language can not displayed properly, a proper font must be set manually in the console.\n'
title += 'All available fonts can be found in "/usr/share/kbd/consolefonts"\n'
title += 'e.g. setfont LatGrkCyr-8x16 (to display latin/greek/cyrillic characters)\n'
choice = Menu(
title,
list(options.keys()),
default_option=preset.display_name,
preview_size=0.5
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return options[choice.single_value]
raise ValueError('Language selection not handled')
def ask_additional_packages_to_install(preset: List[str] = []) -> List[str]:
# Additional packages (with some light weight error handling for invalid package names)
print(_('Only packages such as base, base-devel, linux, linux-firmware, efibootmgr and optional profile packages are installed.'))
print(_('If you desire a web browser, such as firefox or chromium, you may specify it in the following prompt.'))
def read_packages(p: List = []) -> list:
display = ' '.join(p)
input_packages = TextInput(_('Write additional packages to install (space separated, leave blank to skip): '), display).run().strip()
return input_packages.split() if input_packages else []
preset = preset if preset else []
packages = read_packages(preset)
if not storage['arguments']['offline'] and not storage['arguments']['no_pkg_lookups']:
while True:
if len(packages):
# Verify packages that were given
print(_("Verifying that additional packages exist (this might take a few seconds)"))
valid, invalid = validate_package_list(packages)
if invalid:
warn(f"Some packages could not be found in the repository: {invalid}")
packages = read_packages(valid)
continue
break
return packages
def add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]:
max_recommended = 5
print(_(f"This option enables the number of parallel downloads that can occur during package downloads"))
print(_("Enter the number of parallel downloads to be enabled.\n\nNote:\n"))
print(str(_(" - Maximum recommended value : {} ( Allows {} parallel downloads at a time )")).format(max_recommended, max_recommended))
print(_(" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\n"))
while True:
try:
input_number = int(TextInput(_("[Default value: 0] > ")).run().strip() or 0)
if input_number <= 0:
input_number = 0
break
except:
print(str(_("Invalid input! Try again with a valid input [or 0 to disable]")).format(max_recommended))
pacman_conf_path = pathlib.Path("/etc/pacman.conf")
with pacman_conf_path.open() as f:
pacman_conf = f.read().split("\n")
with pacman_conf_path.open("w") as fwrite:
for line in pacman_conf:
if "ParallelDownloads" in line:
fwrite.write(f"ParallelDownloads = {input_number}\n") if not input_number == 0 else fwrite.write("#ParallelDownloads = 0\n")
else:
fwrite.write(f"{line}\n")
return input_number
def select_additional_repositories(preset: List[str]) -> List[str]:
"""
Allows the user to select additional repositories (multilib, and testing) if desired.
:return: The string as a selected repository
:rtype: string
"""
repositories = ["multilib", "testing"]
choice = Menu(
_('Choose which optional additional repositories to enable'),
repositories,
sort=False,
multi=True,
preset_values=preset,
allow_reset=True
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Reset: return []
case MenuSelectionType.Selection: return choice.single_value
return []
| null |
5,196 |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import mindspore.context as context
class GatherNdNet(nn.Cell):
def __init__(self):
super(GatherNdNet, self).__init__()
self.gathernd = P.GatherNd()
def construct(self, x, indices):
return self.gathernd(x, indices)
def gathernd0(nptype):
x = Tensor(np.arange(3 * 2, dtype=nptype).reshape(3, 2))
indices = Tensor(np.array([[1, 1], [0, 1]]).astype(np.int32))
expect = np.array([3, 1]).astype(nptype)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
gathernd = GatherNdNet()
output = gathernd(x, indices)
assert np.array_equal(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd0_float64():
gathernd0(np.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd0_float32():
gathernd0(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd0_float16():
gathernd0(np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd0_int32():
gathernd0(np.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
gathernd0(np.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd0_uint8():
gathernd0(np.uint8)
def gathernd1(nptype):
x = Tensor(np.arange(2 * 3 * 4 * 5, dtype=nptype).reshape(2, 3, 4, 5))
indices = Tensor(np.array([[[[[l, k, j, i] for i in [1, 3, 4]] for j in range(4)]
for k in range(3)] for l in range(2)], dtype='i4'))
expect = np.array([[[[1., 3., 4.],
[6., 8., 9.],
[11., 13., 14.],
[16., 18., 19.]],
[[21., 23., 24.],
[26., 28., 29.],
[31., 33., 34.],
[36., 38., 39.]],
[[41., 43., 44.],
[46., 48., 49.],
[51., 53., 54.],
[56., 58., 59.]]],
[[[61., 63., 64.],
[66., 68., 69.],
[71., 73., 74.],
[76., 78., 79.]],
[[81., 83., 84.],
[86., 88., 89.],
[91., 93., 94.],
[96., 98., 99.]],
[[101., 103., 104.],
[106., 108., 109.],
[111., 113., 114.],
[116., 118., 119.]]]]).astype(nptype)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
gather = GatherNdNet()
output = gather(x, indices)
assert np.array_equal(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_float64():
gathernd1(np.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_float32():
gathernd1(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_float16():
gathernd1(np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_int32():
gathernd1(np.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_int16():
gathernd1(np.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd1_uint8():
gathernd1(np.uint8)
def gathernd2(nptype):
x = Tensor(np.array([[4., 5., 4., 1., 5.],
[4., 9., 5., 6., 4.],
[9., 8., 4., 3., 6.],
[0., 4., 2., 2., 8.],
[1., 8., 6., 2., 8.],
[8., 1., 9., 7., 3.],
[7., 9., 2., 5., 7.],
[9., 8., 6., 8., 5.],
[3., 7., 2., 7., 4.],
[4., 2., 8., 2., 9.]]).astype(np.float16))
indices = Tensor(np.array([[0], [1], [3]]).astype(np.int32))
expect = np.array([[4., 5., 4., 1., 5.],
[4., 9., 5., 6., 4.],
[0., 4., 2., 2., 8.]])
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
gathernd = GatherNdNet()
output = gathernd(x, indices)
assert np.array_equal(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_float64():
gathernd2(np.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_float32():
gathernd2(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_float16():
gathernd2(np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_int32():
gathernd2(np.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_int16():
gathernd2(np.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd2_uint8():
gathernd2(np.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd_bool():
x = Tensor(np.array([[True, False], [False, False]]).astype(np.bool))
indices = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).astype(np.int32))
expect = np.array([True, False, False, False]).astype(np.bool)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
gathernd = GatherNdNet()
output = gathernd(x, indices)
assert np.array_equal(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gathernd_indices_int64():
x = Tensor(np.array([[True, False], [False, False]]).astype(np.bool))
indices = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).astype(np.int64))
expect = np.array([True, False, False, False]).astype(np.bool)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
gathernd = GatherNdNet()
output = gathernd(x, indices)
assert np.array_equal(output.asnumpy(), expect)
| null |
5,197 |
import json
from django.contrib import admin
from django.utils.html import format_html
from devilry.apps.core.models import AssignmentGroup, Subject, Period, Assignment, PeriodTag, \
CandidateAssignmentGroupHistory, ExaminerAssignmentGroupHistory, Examiner, RelatedStudent, RelatedExaminer, \
AssignmentGroupHistory, GroupInvite
from django.utils.translation import gettext_lazy
class ExaminerAdmin(admin.ModelAdmin):
pass
admin.site.register(Examiner, ExaminerAdmin)
class RelatedExaminerAdmin(admin.ModelAdmin):
pass
admin.site.register(RelatedExaminer, RelatedExaminerAdmin)
class RelatedStudentAdmin(admin.ModelAdmin):
pass
admin.site.register(RelatedStudent, RelatedStudentAdmin)
class BaseNodeAdmin(admin.ModelAdmin):
filter_horizontal = ['admins']
raw_id_fields = [
'parentnode',
]
# Added between id,name and admins in :meth:`.get_list_display`.
list_display_middle = []
# Added to search_fields in :meth:`.get_search_fields`.
extra_search_fields = []
def get_search_fields(self, request):
return [
'id',
'short_name',
'long_name',
'admins__shortname',
'admins__fullname',
] + self.extra_search_fields
def METHOD_NAME(self, request):
return [
'id',
'short_name',
'long_name',
] + self.list_display_middle + [
'admins_as_string',
]
def admins_as_string(self, obj):
return ', '.join([user.shortname for user in obj.admins.all()])
admins_as_string.short_description = gettext_lazy("Admins")
def get_queryset(self, request):
return super(BaseNodeAdmin, self).get_queryset(request) \
.prefetch_related('admins')
class SubjectAdmin(BaseNodeAdmin):
raw_id_fields = []
admin.site.register(Subject, SubjectAdmin)
class PeriodAdmin(BaseNodeAdmin):
extra_search_fields = [
'parentnode__long_name',
'parentnode__short_name',
]
list_display_middle = [
'get_subject',
'start_time',
'end_time',
]
list_filter = [
'start_time',
'end_time',
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__short_name'
admin.site.register(Period, PeriodAdmin)
class AssignmentAdmin(BaseNodeAdmin):
extra_search_fields = [
'parentnode__long_name',
'parentnode__short_name',
'parentnode__parentnode__long_name',
'parentnode__parentnode__short_name',
]
list_display_middle = [
'get_subject',
'get_period',
'publishing_time',
'first_deadline',
]
list_filter = [
'anonymizationmode',
'publishing_time',
'first_deadline',
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__parentnode__short_name'
def get_period(self, obj):
return obj.period.short_name
get_period.short_description = gettext_lazy('Period')
get_period.admin_order_field = 'parentnode__short_name'
admin.site.register(Assignment, AssignmentAdmin)
class AssignmentGroupHistoryInline(admin.StackedInline):
model = AssignmentGroupHistory
extra = 0
exclude = ['merge_history_json']
readonly_fields = [
'get_merge_history_json_pretty',
]
def get_merge_history_json_pretty(self, obj):
return format_html(
'<pre>{}</pre>',
json.dumps(obj.merge_history, indent=2, sort_keys=True)
)
class AssignmentGroupAdmin(admin.ModelAdmin):
list_display = [
'id',
'get_subject',
'get_period',
'get_assignment',
'short_displayname',
'created_datetime',
]
search_fields = [
'id',
'parentnode__long_name',
'parentnode__short_name',
'parentnode__parentnode__long_name',
'parentnode__parentnode__short_name',
'parentnode__parentnode__parentnode__long_name',
'parentnode__parentnode__parentnode__short_name',
]
readonly_fields = [
'parentnode',
'feedback',
]
list_filter = [
'created_datetime',
]
raw_id_fields = [
'last_deadline',
'batchoperation',
'copied_from'
]
inlines = [
AssignmentGroupHistoryInline
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__parentnode__parentnode__short_name'
def get_period(self, obj):
return obj.period.short_name
get_period.short_description = gettext_lazy('Period')
get_period.admin_order_field = 'parentnode__parentnode__short_name'
def get_assignment(self, obj):
return obj.assignment.short_name
get_assignment.short_description = gettext_lazy('Assignment')
get_assignment.admin_order_field = 'parentnode__short_name'
def get_queryset(self, request):
return super(AssignmentGroupAdmin, self).get_queryset(request) \
.select_related('parentnode',
'parentnode__parentnode',
'parentnode__parentnode__parentnode')
admin.site.register(AssignmentGroup, AssignmentGroupAdmin)
class PeriodTagAdmin(admin.ModelAdmin):
raw_id_fields = ['period']
list_display = [
'id',
'prefix',
'tag',
'is_hidden',
]
filter_horizontal = [
'relatedstudents',
'relatedexaminers',
]
list_filter = [
'prefix'
]
admin.site.register(PeriodTag, PeriodTagAdmin)
class GroupInviteAdmin(admin.ModelAdmin):
raw_id_fields = [
'group',
'sent_by',
'sent_to'
]
list_display = [
'group',
'sent_by',
'sent_to',
'accepted',
'responded_datetime'
]
readonly_fields = [
'group',
'sent_by',
'sent_to',
'accepted',
'responded_datetime'
]
admin.site.register(GroupInvite, GroupInviteAdmin)
class CandidateAssignmentGroupHistoryAdmin(admin.ModelAdmin):
raw_id_fields = [
'assignment_group',
'user'
]
list_display = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
readonly_fields = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
admin.site.register(CandidateAssignmentGroupHistory, CandidateAssignmentGroupHistoryAdmin)
class ExaminerAssignmentGroupHistoryAdmin(admin.ModelAdmin):
raw_id_fields = [
'assignment_group',
'user'
]
list_display = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
readonly_fields = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
admin.site.register(ExaminerAssignmentGroupHistory, ExaminerAssignmentGroupHistoryAdmin)
| null |
5,198 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name
from mantid.simpleapi import *
class DarkRunCorrection(object):
"""
This class performs the dark run correction for ISIS SANS instruments
"""
def __init__(self):
super(DarkRunCorrection, self).__init__()
self._normalization_extractor = DarkRunNormalizationExtractor()
# Should we look at a mean value of the dark count over all pixels.
# Only applicable if the data is uniform
self._use_mean = False
# Should we use time logs or uamph logs to calculat the normalization ratio.
# In the former case we treat the dark run signal as uniform, ie constant
# (excpt for stat. fluctuations) over time. In the latter case it is treated
# as non-uniform
self._use_time = True
# Should we use the detectors
self._use_detectors = True
# Should we use the monitors = False
self._use_monitors = False
# Which monitor numbers should be used
self._mon_numbers = []
def _reset_settings(self):
self._use_mean = False
self._use_time = True
self._use_detectors = True
self._use_monitors = False
self._mon_numbers = []
def set_use_mean(self, use_mean):
self._use_mean = use_mean
def set_use_time(self, use_time):
self._use_time = use_time
def set_use_detectors(self, use_detectors):
self._use_detectors = use_detectors
def set_use_monitors(self, use_monitors):
self._use_monitors = use_monitors
def set_mon_numbers(self, mon_numbers):
if mon_numbers is None:
self._mon_numbers = []
else:
self._mon_numbers = mon_numbers
def execute(self, scatter_workspace, dark_run):
"""
Perform the dark run correction.
@param scatter_workspace: the workspace which needs correcting
@param dark_run: the dark run
"""
# Get the normalization ratio from the workspaces
normalization_ratio = self._normalization_extractor.extract_normalization(scatter_workspace, dark_run, self._use_time)
# Run the correction algorithm with the user settings
corrected_ws_name = scatter_workspace.name() + "_dark_workspace_corrected"
alg_dark = AlgorithmManager.createUnmanaged("SANSDarkRunBackgroundCorrection")
alg_dark.initialize()
alg_dark.setChild(True)
alg_dark.setProperty("InputWorkspace", scatter_workspace)
alg_dark.setProperty("DarkRun", dark_run)
alg_dark.setProperty("Mean", self._use_mean)
alg_dark.setProperty("Uniform", self._use_time) # If we use time, then it is uniform
alg_dark.setProperty("NormalizationRatio", normalization_ratio)
alg_dark.setProperty("ApplyToDetectors", self._use_detectors)
alg_dark.setProperty("ApplyToMonitors", self._use_monitors)
alg_dark.setProperty("SelectedMonitors", self._mon_numbers)
alg_dark.setProperty("OutputWorkspace", corrected_ws_name)
alg_dark.execute()
# Make sure that we forget about the original settings
self._reset_settings()
return alg_dark.getProperty("OutputWorkspace").value
# pylint: disable=too-few-public-methods
class DarkRunNormalizationExtractor(object):
"""
Extrats the normalization ratio from the scatter workspace
and the dark run workspace depending. The normalization ratio
can be either calculated as a ratio of good proton charges or
a ratio of measurement times
"""
def __init__(self):
super(DarkRunNormalizationExtractor, self).__init__()
def extract_normalization(self, scatter_workspace, dark_run, use_time=True):
"""
Extract the normalization by either looking at the time duration of the measurement (good_frames)
or by looking at the time of the good charge (good_uah_log)
"""
if use_time:
normalization = self._extract_normalization_from_time(scatter_workspace, dark_run)
else:
normalization = self._extract_normalization_from_charge(scatter_workspace, dark_run)
return normalization
def _extract_normalization_from_charge(self, scatter_workspace, dark_run):
"""
We get the get the normalization ration from the gd_prtn_chrg entries.
@param scatter_workspace: the scatter workspace
@param dark_run: the dark run
@returns a normalization factor for good proton charges
"""
scatter_proton_charge = self._get_good_proton_charge(scatter_workspace)
dark_proton_charge = self._get_good_proton_charge(dark_run)
return scatter_proton_charge / dark_proton_charge
def _get_good_proton_charge(self, workspace):
"""
Get the good proton charge
@param workspace: the workspace from which to extract
@returns the proton charge
"""
log_entry = "gd_prtn_chrg"
run = workspace.getRun()
if not run.hasProperty(log_entry):
raise RuntimeError(
"DarkRunCorrection: The workspace does not have a "
+ log_entry
+ "log entry. This is required for calculating the noramlization"
"of the dark run."
)
entry = run.getProperty(log_entry)
return entry.value
def _extract_normalization_from_time(self, scatter_workspace, dark_run):
"""
Create a normalization ratio based on the duration.
@param scatter_workspace: the scatter workspace
@param dark_run: the dark run
@returns a normalization factor based on good frames
"""
scatter_time = self._get_duration_for_frames(scatter_workspace)
dark_time = self._get_duration_for_frames(dark_run)
return scatter_time / dark_time
def _get_duration_for_frames(self, workspace):
"""
Extract the time duration from the logs.
@param workspace: the workspace to extract from
@returns the duration
"""
log_entry = "good_frames"
run = workspace.getRun()
if not run.hasProperty(log_entry):
raise RuntimeError(
"DarkRunCorrection: The workspace does not have a "
+ log_entry
+ "log entry. This is required for calculating the noramlization"
"of the dark run."
)
prop = run.getProperty(log_entry)
frame_time = self.METHOD_NAME(workspace)
number_of_frames = self._get_number_of_good_frames(prop)
return frame_time * number_of_frames
def METHOD_NAME(self, workspace):
"""
Get the time of a frame. Look into the first histogram only.
@param workspace: the workspace from which extract the frame time
"""
return workspace.dataX(0)[-1] - workspace.dataX(0)[0]
def _get_number_of_good_frames(self, prop):
"""
Get the number of good frames.
@param prop: the property from which we extract the frames
@returns the number of good frames
"""
# Since we are dealing with a cumulative sample log, we can extract
# the total number of good frames by looking at the last frame
frames = prop.value
return frames[-1]
| null |
5,199 |
# Copyright 2019 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenStack Security Audit code"""
import collections
from enum import Enum
import traceback
from charmhelpers.core.host import cmp_pkgrevno
import charmhelpers.contrib.openstack.utils as openstack_utils
import charmhelpers.core.hookenv as hookenv
class AuditType(Enum):
OpenStackSecurityGuide = 1
_audits = {}
Audit = collections.namedtuple('Audit', 'func filters')
def audit(*args):
"""Decorator to register an audit.
These are used to generate audits that can be run on a
deployed system that matches the given configuration
:param args: List of functions to filter tests against
:type args: List[Callable[Dict]]
"""
def wrapper(f):
test_name = f.__name__
if _audits.get(test_name):
raise RuntimeError(
"Test name '{}' used more than once"
.format(test_name))
non_callables = [fn for fn in args if not callable(fn)]
if non_callables:
raise RuntimeError(
"Configuration includes non-callable filters: {}"
.format(non_callables))
_audits[test_name] = Audit(func=f, filters=args)
return f
return wrapper
def is_audit_type(*args):
"""This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict]
"""
def _is_audit_type(audit_options):
if audit_options.get('audit_type') in args:
return True
else:
return False
return _is_audit_type
def since_package(pkg, pkg_version):
"""This audit should be run after the specified package version (incl).
:param pkg: Package name to compare
:type pkg: str
:param release: The package version
:type release: str
:rtype: Callable[Dict]
"""
def _since_package(audit_options=None):
return cmp_pkgrevno(pkg, pkg_version) >= 0
return _since_package
def before_package(pkg, pkg_version):
"""This audit should be run before the specified package version (excl).
:param pkg: Package name to compare
:type pkg: str
:param release: The package version
:type release: str
:rtype: Callable[Dict]
"""
def _before_package(audit_options=None):
return not since_package(pkg, pkg_version)()
return _before_package
def since_openstack_release(pkg, release):
"""This audit should run after the specified OpenStack version (incl).
:param pkg: Package name to compare
:type pkg: str
:param release: The OpenStack release codename
:type release: str
:rtype: Callable[Dict]
"""
def _since_openstack_release(audit_options=None):
_release = openstack_utils.get_os_codename_package(pkg)
return openstack_utils.CompareOpenStackReleases(_release) >= release
return _since_openstack_release
def before_openstack_release(pkg, release):
"""This audit should run before the specified OpenStack version (excl).
:param pkg: Package name to compare
:type pkg: str
:param release: The OpenStack release codename
:type release: str
:rtype: Callable[Dict]
"""
def _before_openstack_release(audit_options=None):
return not since_openstack_release(pkg, release)()
return _before_openstack_release
def it_has_config(config_key):
"""This audit should be run based on specified config keys.
:param config_key: Config key to look for
:type config_key: str
:rtype: Callable[Dict]
"""
def _it_has_config(audit_options):
return audit_options.get(config_key) is not None
return _it_has_config
def METHOD_NAME(audit_options):
"""Run the configured audits with the specified audit_options.
:param audit_options: Configuration for the audit
:type audit_options: Config
:rtype: Dict[str, str]
"""
errors = {}
results = {}
for name, audit in sorted(_audits.items()):
result_name = name.replace('_', '-')
if result_name in audit_options.get('excludes', []):
print(
"Skipping {} because it is"
"excluded in audit config"
.format(result_name))
continue
if all(p(audit_options) for p in audit.filters):
try:
audit.func(audit_options)
print("{}: PASS".format(name))
results[result_name] = {
'success': True,
}
except AssertionError as e:
print("{}: FAIL ({})".format(name, e))
results[result_name] = {
'success': False,
'message': e,
}
except Exception as e:
print("{}: ERROR ({})".format(name, e))
errors[name] = e
results[result_name] = {
'success': False,
'message': e,
}
for name, error in errors.items():
print("=" * 20)
print("Error in {}: ".format(name))
traceback.print_tb(error.__traceback__)
print()
return results
def action_parse_results(result):
"""Parse the result of `run` in the context of an action.
:param result: The result of running the security-checklist
action on a unit
:type result: Dict[str, Dict[str, str]]
:rtype: int
"""
passed = True
for test, result in result.items():
if result['success']:
hookenv.action_set({test: 'PASS'})
else:
hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
passed = False
if not passed:
hookenv.action_fail("One or more tests failed")
return 0 if passed else 1
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.