id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,300 |
# Copyright 2023 IOTA Stiftung
# SPDX-License-Identifier: Apache-2.0
from iota_sdk.types.block import Block, BlockMetadata
from iota_sdk.types.common import HexStr
from iota_sdk.types.node_info import NodeInfo, NodeInfoWrapper
from iota_sdk.types.output import OutputWithMetadata, OutputMetadata
from iota_sdk.types.output_id import OutputId
from iota_sdk.types.payload import MilestonePayload
from iota_sdk.types.utxo_changes import UtxoChanges
from typing import List, Union
from dacite import from_dict
class NodeCoreAPI():
"""Node core API.
"""
def get_health(self, url: str):
""" Get node health.
Args:
url: The node's url.
"""
return self._call_method('getHealth', {
'url': url
})
def get_node_info(self, url: str, auth=None) -> NodeInfo:
"""Get node info.
Args:
url: The node's url.
auth: A JWT or username/password authentication object.
"""
return from_dict(NodeInfo, self._call_method('getNodeInfo', {
'url': url,
'auth': auth
}))
def get_info(self) -> NodeInfoWrapper:
"""Return node information together with the url of the used node.
"""
return from_dict(NodeInfoWrapper, self._call_method('getInfo'))
def get_peers(self):
"""Get the peers of the node.
"""
return self._call_method('getPeers')
def get_tips(self) -> List[HexStr]:
"""Request tips from the node.
"""
return self._call_method('getTips')
def post_block(self, block: Block) -> HexStr:
"""Post a block.
Args:
block: The block to post.
Returns:
The block id of the posted block.
"""
return self._call_method('postBlock', {
'block': block.__dict__
})
def get_block_data(self, block_id: HexStr) -> Block:
"""Get the block corresponding to the given block id.
"""
return Block.from_dict(self._call_method('getBlock', {
'blockId': block_id
}))
def get_block_metadata(self, block_id: HexStr) -> BlockMetadata:
"""Get the block metadata corresponding to the given block id.
"""
return BlockMetadata.from_dict(self._call_method('getBlockMetadata', {
'blockId': block_id
}))
def METHOD_NAME(self, block_id: HexStr) -> List[int]:
"""Get the raw bytes of the block corresponding to the given block id.
"""
return self._call_method('getBlockRaw', {
'blockId': block_id
})
def post_block_raw(self, block_bytes: List[int]) -> HexStr:
"""Post a block as raw bytes.
Returns:
The corresponding block id of the block.
"""
return self._call_method('postBlockRaw', {
'blockBytes': block_bytes
})
def get_output(
self, output_id: Union[OutputId, HexStr]) -> OutputWithMetadata:
"""Get the output corresponding to the given output id.
Returns:
The output itself with its metadata.
"""
output_id_str = output_id.output_id if isinstance(
output_id, OutputId) else output_id
return from_dict(OutputWithMetadata, self._call_method('getOutput', {
'outputId': output_id_str
}))
def get_output_metadata(
self, output_id: Union[OutputId, HexStr]) -> OutputMetadata:
"""Get the output metadata corresponding to the given output id.
Returns:
The output metadata.
"""
output_id_str = output_id.output_id if isinstance(
output_id, OutputId) else output_id
return from_dict(OutputMetadata, self._call_method('getOutputMetadata', {
'outputId': output_id_str
}))
def get_milestone_by_id(self, milestone_id: HexStr) -> MilestonePayload:
"""Get the milestone corresponding to the given milestone id.
Returns:
The milestone payload.
"""
result = self._call_method('getMilestoneById', {
'milestoneId': milestone_id
})
return MilestonePayload.from_dict(result)
def get_milestone_by_id_raw(self, milestone_id: HexStr) -> List[int]:
"""Get the raw bytes of the milestone corresponding to the given milestone id.
Returns:
The raw bytes of the milestone.
"""
return self._call_method('getMilestoneByIdRaw', {
'milestoneId': milestone_id
})
def get_milestone_by_index(self, index: int) -> MilestonePayload:
"""Get the milestone by the given milestone index.
Returns:
The milestone payload.
"""
result = self._call_method('getMilestoneByIndex', {
'index': index
})
return MilestonePayload.from_dict(result)
def get_milestone_by_index_raw(self, index: int) -> List[int]:
"""Get the raw bytes of the milestone corresponding to the given milestone index.
Returns:
The raw bytes of the milestone.
"""
return self._call_method('getMilestoneByIndexRaw', {
'index': index
})
def get_utxo_changes_by_id(self, milestone_id: HexStr) -> UtxoChanges:
"""Get the UTXO changes applied in the given milestone.
"""
return from_dict(UtxoChanges, self._call_method('getUtxoChangesById', {
'milestoneId': milestone_id
}))
def get_utxo_changes_by_index(self, index: int) -> UtxoChanges:
"""Get the UTXO changes applied at the given milestone index.
"""
return from_dict(UtxoChanges, self._call_method('getUtxoChangesByIndex', {
'index': index
}))
def get_receipts(self):
"""Get all receipts.
"""
return self._call_method('getReceipts')
def get_receipts_migrated_at(self, milestone_index: int):
"""Get the receipts that were migrated at the given milestone index.
"""
return self._call_method('getReceiptsMigratedAt', {
'milestoneIndex': milestone_index
})
def get_treasury(self):
"""Get the treasury output.
"""
return self._call_method('getTreasury')
def get_included_block(self, transaction_id: HexStr) -> Block:
"""Returns the included block of the given transaction.
Returns:
The included block.
"""
return Block.from_dict(self._call_method('getIncludedBlock', {
'transactionId': transaction_id
}))
def get_included_block_metadata(
self, transaction_id: HexStr) -> BlockMetadata:
"""Returns the metadata of the included block of the given transaction.
Returns:
The metadata of the included block.
"""
return BlockMetadata.from_dict(self._call_method('getIncludedBlockMetadata', {
'transactionId': transaction_id
}))
def call_plugin_route(self, base_plugin_path: str, method: str,
endpoint: str, query_params: [str] = None, request: str = None):
"""Extension method which provides request methods for plugins.
Args:
base_plugin_path: The base path of the routes provided by the plugin.
method: The HTTP method.
endpoint: The endpoint to query provided by the plugin.
query_params: The parameters of the query.
request: The request object sent to the endpoint of the plugin.
"""
if query_params is None:
query_params = []
return self._call_method('callPluginRoute', {
'basePluginPath': base_plugin_path,
'method': method,
'endpoint': endpoint,
'queryParams': query_params,
'request': request,
})
| null |
5,301 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for standard TFX Artifact types."""
import math
from typing import Any, Dict
from unittest import mock
import absl
import tensorflow as tf
from tfx.types import standard_artifacts
from tfx.utils import json_utils
# Define constant value for tests.
_TEST_BYTE_RAW = b'hello world'
_TEST_BYTE_DECODED = b'hello world'
_TEST_STRING_RAW = b'hello world'
_TEST_STRING_DECODED = u'hello world'
_TEST_BOOL_RAW = b'1'
_TEST_BOOL_DECODED = True
_TEST_INT_RAW = b'19260817'
_TEST_INT_DECODED = 19260817
_TEST_FLOAT_RAW = b'3.1415926535'
_TEST_FLOAT_DECODED = 3.1415926535
_TEST_FLOAT128_RAW = b'3.14159265358979323846264338327950288'
_TEST_FLOAT128 = 3.14159265358979323846264338327950288 # Too precise
_TEST_JSONVALUE_LIST_RAW = '[42, 42.0]'
_TEST_JSONVALUE_LIST_DECODED = [42, 42.0]
_TEST_JSONVALUE_DICT_RAW = '{\"x\": 42}'
_TEST_JSONVALUE_DICT_DECODED = {'x': 42}
class TestJsonableCls(json_utils.Jsonable):
"""A test class that implements the Jsonable interface."""
def __init__(self, x):
self._x = x
def METHOD_NAME(self) -> Dict[str, Any]:
return {'x': self._x}
@classmethod
def from_json_dict(cls, dict_data: Dict[str, Any]) -> 'TestJsonableCls':
return TestJsonableCls(dict_data['x'])
def __eq__(self, other):
return isinstance(other, TestJsonableCls) and other._x == self._x
_TEST_JSONVALUE_OBJ_RAW = (
'{\"__class__\": \"TestJsonableCls\", \"__module__\":'
' \"__main__\", \"__tfx_object_type__\": '
'\"jsonable\", \"x\": 42}')
_TEST_JSONVALUE_OBJ_DECODED = TestJsonableCls(42)
class StandardArtifactsTest(tf.test.TestCase):
def testUseTfxType(self):
instance = standard_artifacts.ExampleStatistics()
self.assertIsInstance(instance, standard_artifacts.ExampleStatistics)
def testBytesType(self):
instance = standard_artifacts.Bytes()
self.assertEqual(_TEST_BYTE_RAW, instance.encode(_TEST_BYTE_DECODED))
self.assertEqual(_TEST_BYTE_DECODED, instance.decode(_TEST_BYTE_RAW))
def testStringType(self):
instance = standard_artifacts.String()
self.assertEqual(_TEST_STRING_RAW, instance.encode(_TEST_STRING_DECODED))
self.assertEqual(_TEST_STRING_DECODED, instance.decode(_TEST_STRING_RAW))
def testBoolType(self):
instance = standard_artifacts.Boolean()
self.assertEqual(_TEST_BOOL_RAW, instance.encode(_TEST_BOOL_DECODED))
self.assertEqual(_TEST_BOOL_DECODED, instance.decode(_TEST_BOOL_RAW))
def testIntegerType(self):
instance = standard_artifacts.Integer()
self.assertEqual(_TEST_INT_RAW, instance.encode(_TEST_INT_DECODED))
self.assertEqual(_TEST_INT_DECODED, instance.decode(_TEST_INT_RAW))
def testFloatType(self):
instance = standard_artifacts.Float()
self.assertEqual(_TEST_FLOAT_RAW, instance.encode(_TEST_FLOAT_DECODED))
self.assertAlmostEqual(_TEST_FLOAT_DECODED,
instance.decode(_TEST_FLOAT_RAW))
def testJsonValueList(self):
instance = standard_artifacts.JsonValue()
self.assertEqual(_TEST_JSONVALUE_LIST_RAW,
instance.encode(_TEST_JSONVALUE_LIST_DECODED))
self.assertEqual(_TEST_JSONVALUE_LIST_DECODED,
instance.decode(_TEST_JSONVALUE_LIST_RAW))
def testJsonValueDict(self):
instance = standard_artifacts.JsonValue()
self.assertEqual(_TEST_JSONVALUE_DICT_RAW,
instance.encode(_TEST_JSONVALUE_DICT_DECODED))
self.assertEqual(_TEST_JSONVALUE_DICT_DECODED,
instance.decode(_TEST_JSONVALUE_DICT_RAW))
def testJsonValueObj(self):
instance = standard_artifacts.JsonValue()
self.assertEqual(_TEST_JSONVALUE_OBJ_RAW,
instance.encode(_TEST_JSONVALUE_OBJ_DECODED))
self.assertEqual(_TEST_JSONVALUE_OBJ_DECODED,
instance.decode(_TEST_JSONVALUE_OBJ_RAW))
@mock.patch('absl.logging.warning')
def testFloatTypePrecisionLossWarning(self, *unused_mocks):
instance = standard_artifacts.Float()
# TODO(b/156776413): with self.assertWarnsRegex('lost precision'):
self.assertAlmostEqual(
instance.decode(_TEST_FLOAT128_RAW), _TEST_FLOAT128)
# Lost precision warning
absl.logging.warning.assert_called_once()
@mock.patch('absl.logging.warning')
def testFloatInfNanEncodingWarning(self, *unused_mocks):
instance = standard_artifacts.Float()
instance.encode(float('inf'))
# Non-portable encoding warning
absl.logging.warning.assert_called_once()
def testSpecialFloatValues(self):
coder = standard_artifacts.Float()
positive_infinity_float = float('inf')
negative_infinity_float = float('-inf')
nan_float = float('nan')
encoded_positive_infinity = coder.encode(positive_infinity_float)
encoded_negative_infinity = coder.encode(negative_infinity_float)
encoded_nan = coder.encode(nan_float)
decoded_positive_infinity = coder.decode(encoded_positive_infinity)
decoded_negative_infinity = coder.decode(encoded_negative_infinity)
decoded_nan = coder.decode(encoded_nan)
self.assertEqual(encoded_positive_infinity, b'Infinity')
self.assertEqual(encoded_negative_infinity, b'-Infinity')
self.assertEqual(encoded_nan, b'NaN')
self.assertEqual(decoded_positive_infinity, positive_infinity_float)
self.assertEqual(decoded_negative_infinity, negative_infinity_float)
self.assertTrue(math.isinf(decoded_positive_infinity))
self.assertTrue(math.isinf(decoded_negative_infinity))
self.assertTrue(math.isnan(decoded_nan))
def testExamples(self):
with self.subTest('Initial state'):
examples = standard_artifacts.Examples()
self.assertEqual(examples.split_names, '')
self.assertEmpty(examples.splits)
with self.subTest('Empty splits'):
examples = standard_artifacts.Examples()
examples.splits = []
self.assertEqual(examples.split_names, '[]')
self.assertEmpty(examples.splits)
with self.subTest('Single split'):
examples = standard_artifacts.Examples()
examples.splits = ['train']
self.assertEqual(examples.split_names, '["train"]')
with self.subTest('Multiple splits'):
examples = standard_artifacts.Examples()
examples.splits = ['train', 'validation', 'test']
self.assertEqual(examples.split_names, '["train", "validation", "test"]')
with self.subTest('Invalid splits'):
examples = standard_artifacts.Examples()
with self.assertRaises(ValueError):
examples.splits = ['train', '_validation'] # Should not start with _.
with self.assertRaises(TypeError):
examples.splits = '["train", "validation"]' # Should be Sequence[str]
with self.subTest('Split path'):
examples = standard_artifacts.Examples()
examples.uri = '/test'
examples.splits = ['train']
self.assertEqual(examples.path(split='train'), '/test/Split-train')
with self.assertRaises(ValueError):
examples.path(split='non-existing')
if __name__ == '__main__':
tf.test.main()
| null |
5,302 |
import mock
import textwrap
import uuid
import charmhelpers.contrib.network.ovs.ovsdb as ovsdb
import tests.utils as test_utils
VSCTL_BRIDGE_TBL = textwrap.dedent("""
{"data":[[["uuid","1e21ba48-61ff-4b32-b35e-cb80411da351"],
["set",[]],["set",[]],"0000a0369fdd3890","","<unknown>",
["map",[["charm-ovn-chassis","managed"],["other","value"]]],
["set",[]],["set",[]],["map",[]],["set",[]],false,["set",[]],
"br-test",["set",[]],["map",[]],["set",
[["uuid","617f9359-77e2-41be-8af6-4c44e7a6bcc3"],
["uuid","da840476-8809-4107-8733-591f4696f056"]]],
["set",["OpenFlow10","OpenFlow13","OpenFlow14"]],false,["map",[]],
["set",[]],["map",[]],false],
[["uuid","bb685b0f-a383-40a1-b7a5-b5c2066bfa42"],
["set",[]],["set",[]],"00000e5b68bba140","","<unknown>",
["map",[]],"secure",["set",[]],["map",[]],["set",[]],false,
["set",[]],"br-int",["set",[]],["map",[["disable-in-band","true"]]],
["set",[["uuid","07f4c231-9fd2-49b0-a558-5b69d657fdb0"],
["uuid","8bbd2441-866f-4317-a284-09491702776c"],
["uuid","d9e9c081-6482-4006-b7d6-239182b56c2e"]]],
["set",[]],false,["map",[]],["set",[]],["map",[]],false]],
"headings":["_uuid","auto_attach","controller","datapath_id",
"datapath_type","datapath_version","external_ids","fail_mode",
"flood_vlans","flow_tables","ipfix","mcast_snooping_enable",
"mirrors","name","netflow","other_config","ports","protocols",
"rstp_enable","rstp_status","sflow","status","stp_enable"]}
""")
VSCTL_BRIDGE_TBL_DESERIALIZED = {
'_uuid': uuid.UUID('1e21ba48-61ff-4b32-b35e-cb80411da351'),
'auto_attach': [],
'controller': [],
'datapath_id': '0000a0369fdd3890',
'datapath_type': '',
'datapath_version': '<unknown>',
'external_ids': {
'charm-ovn-chassis': 'managed',
'other': 'value',
},
'fail_mode': [],
'flood_vlans': [],
'flow_tables': {},
'ipfix': [],
'mcast_snooping_enable': False,
'mirrors': [],
'name': 'br-test',
'netflow': [],
'other_config': {},
'ports': [uuid.UUID('617f9359-77e2-41be-8af6-4c44e7a6bcc3'),
uuid.UUID('da840476-8809-4107-8733-591f4696f056')],
'protocols': ['OpenFlow10', 'OpenFlow13', 'OpenFlow14'],
'rstp_enable': False,
'rstp_status': {},
'sflow': [],
'status': {},
'stp_enable': False,
}
class TestSimpleOVSDB(test_utils.BaseTestCase):
def patch_target(self, attr, return_value=None):
mocked = mock.patch.object(self.target, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def METHOD_NAME(self):
with self.assertRaises(RuntimeError):
self.target = ovsdb.SimpleOVSDB('atool')
with self.assertRaises(AttributeError):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.target.unknown_table.find()
def test__find_tbl(self):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.patch_object(ovsdb.utils, '_run')
self._run.return_value = VSCTL_BRIDGE_TBL
self.maxDiff = None
# this in effect also tests the __iter__ front end method
for el in self.target.bridge:
self.assertDictEqual(el, VSCTL_BRIDGE_TBL_DESERIALIZED)
break
self._run.assert_called_once_with(
'ovs-vsctl', '-f', 'json', 'find', 'bridge')
self._run.reset_mock()
# this in effect also tests the find front end method
for el in self.target.bridge.find(condition='name=br-test'):
break
self._run.assert_called_once_with(
'ovs-vsctl', '-f', 'json', 'find', 'bridge', 'name=br-test')
# check the optional args paramenter
self._run.reset_mock()
self.target = ovsdb.SimpleOVSDB('ovs-vsctl', args=['extra', 'args'])
for el in self.target.bridge.find(condition='name=br-test'):
break
self._run.assert_called_once_with(
'ovs-vsctl', 'extra', 'args',
'-f', 'json', 'find', 'bridge', 'name=br-test')
def test__list_tbl_record(self):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.patch_object(ovsdb.utils, '_run')
self._run.return_value = VSCTL_BRIDGE_TBL
self.maxDiff = None
# this in effect also tests the __getitem__ front end method
self.assertEqual(
VSCTL_BRIDGE_TBL_DESERIALIZED,
self.target.bridge[
uuid.UUID('1e21ba48-61ff-4b32-b35e-cb80411da351')])
self._run.assert_called_once_with(
'ovs-vsctl', '-f', 'json', 'list', 'bridge',
'1e21ba48-61ff-4b32-b35e-cb80411da351')
def test_clear(self):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.patch_object(ovsdb.utils, '_run')
self.target.interface.clear('1e21ba48-61ff-4b32-b35e-cb80411da351',
'external_ids')
self._run.assert_called_once_with(
'ovs-vsctl', 'clear', 'interface',
'1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids')
def test_remove(self):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.patch_object(ovsdb.utils, '_run')
self.target.interface.remove('1e21ba48-61ff-4b32-b35e-cb80411da351',
'external_ids', 'other')
self._run.assert_called_once_with(
'ovs-vsctl', 'remove', 'interface',
'1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids', 'other')
def test_set(self):
self.target = ovsdb.SimpleOVSDB('ovs-vsctl')
self.patch_object(ovsdb.utils, '_run')
self.target.interface.set('1e21ba48-61ff-4b32-b35e-cb80411da351',
'external_ids:other', 'value')
self._run.assert_called_once_with(
'ovs-vsctl', 'set', 'interface',
'1e21ba48-61ff-4b32-b35e-cb80411da351', 'external_ids:other=value')
| null |
5,303 |
import sys
from collections import defaultdict
class Heap:
def __init__(self):
self.node_position = []
def get_position(self, vertex):
return self.node_position[vertex]
def METHOD_NAME(self, vertex, pos):
self.node_position[vertex] = pos
def top_to_bottom(self, heap, start, size, positions):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
smallest_child = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
smallest_child = 2 * start + 1
else:
smallest_child = 2 * start + 2
if heap[smallest_child] < heap[start]:
temp, temp1 = heap[smallest_child], positions[smallest_child]
heap[smallest_child], positions[smallest_child] = (
heap[start],
positions[start],
)
heap[start], positions[start] = temp, temp1
temp = self.get_position(positions[smallest_child])
self.METHOD_NAME(
positions[smallest_child], self.get_position(positions[start])
)
self.METHOD_NAME(positions[start], temp)
self.top_to_bottom(heap, smallest_child, size, positions)
# Update function if value of any node in min-heap decreases
def bottom_to_top(self, val, index, heap, position):
temp = position[index]
while index != 0:
parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
heap[index] = heap[parent]
position[index] = position[parent]
self.METHOD_NAME(position[parent], index)
else:
heap[index] = val
position[index] = temp
self.METHOD_NAME(temp, index)
break
index = parent
else:
heap[0] = val
position[0] = temp
self.METHOD_NAME(temp, 0)
def heapify(self, heap, positions):
start = len(heap) // 2 - 1
for i in range(start, -1, -1):
self.top_to_bottom(heap, i, len(heap), positions)
def delete_minimum(self, heap, positions):
temp = positions[0]
heap[0] = sys.maxsize
self.top_to_bottom(heap, 0, len(heap), positions)
return temp
def prisms_algorithm(adjacency_list):
"""
>>> adjacency_list = {0: [[1, 1], [3, 3]],
... 1: [[0, 1], [2, 6], [3, 5], [4, 1]],
... 2: [[1, 6], [4, 5], [5, 2]],
... 3: [[0, 3], [1, 5], [4, 1]],
... 4: [[1, 1], [2, 5], [3, 1], [5, 4]],
... 5: [[2, 2], [4, 4]]}
>>> prisms_algorithm(adjacency_list)
[(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)]
"""
heap = Heap()
visited = [0] * len(adjacency_list)
nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
distance_tv = [] # Heap of Distance of vertices from their neighboring vertex
positions = []
for vertex in range(len(adjacency_list)):
distance_tv.append(sys.maxsize)
positions.append(vertex)
heap.node_position.append(vertex)
tree_edges = []
visited[0] = 1
distance_tv[0] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
nbr_tv[neighbor] = 0
distance_tv[neighbor] = distance
heap.heapify(distance_tv, positions)
for _ in range(1, len(adjacency_list)):
vertex = heap.delete_minimum(distance_tv, positions)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
visited[vertex] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(neighbor)]
):
distance_tv[heap.get_position(neighbor)] = distance
heap.bottom_to_top(
distance, heap.get_position(neighbor), distance_tv, positions
)
nbr_tv[neighbor] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
edges_number = int(input("Enter number of edges: ").strip())
adjacency_list = defaultdict(list)
for _ in range(edges_number):
edge = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| null |
5,304 |
import sys
from collections import defaultdict
class Heap:
def __init__(self):
self.node_position = []
def get_position(self, vertex):
return self.node_position[vertex]
def set_position(self, vertex, pos):
self.node_position[vertex] = pos
def top_to_bottom(self, heap, start, size, positions):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
smallest_child = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
smallest_child = 2 * start + 1
else:
smallest_child = 2 * start + 2
if heap[smallest_child] < heap[start]:
temp, temp1 = heap[smallest_child], positions[smallest_child]
heap[smallest_child], positions[smallest_child] = (
heap[start],
positions[start],
)
heap[start], positions[start] = temp, temp1
temp = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child], self.get_position(positions[start])
)
self.set_position(positions[start], temp)
self.top_to_bottom(heap, smallest_child, size, positions)
# Update function if value of any node in min-heap decreases
def bottom_to_top(self, val, index, heap, position):
temp = position[index]
while index != 0:
parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
heap[index] = heap[parent]
position[index] = position[parent]
self.set_position(position[parent], index)
else:
heap[index] = val
position[index] = temp
self.set_position(temp, index)
break
index = parent
else:
heap[0] = val
position[0] = temp
self.set_position(temp, 0)
def heapify(self, heap, positions):
start = len(heap) // 2 - 1
for i in range(start, -1, -1):
self.top_to_bottom(heap, i, len(heap), positions)
def delete_minimum(self, heap, positions):
temp = positions[0]
heap[0] = sys.maxsize
self.top_to_bottom(heap, 0, len(heap), positions)
return temp
def METHOD_NAME(adjacency_list):
"""
>>> adjacency_list = {0: [[1, 1], [3, 3]],
... 1: [[0, 1], [2, 6], [3, 5], [4, 1]],
... 2: [[1, 6], [4, 5], [5, 2]],
... 3: [[0, 3], [1, 5], [4, 1]],
... 4: [[1, 1], [2, 5], [3, 1], [5, 4]],
... 5: [[2, 2], [4, 4]]}
>>> prisms_algorithm(adjacency_list)
[(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)]
"""
heap = Heap()
visited = [0] * len(adjacency_list)
nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
distance_tv = [] # Heap of Distance of vertices from their neighboring vertex
positions = []
for vertex in range(len(adjacency_list)):
distance_tv.append(sys.maxsize)
positions.append(vertex)
heap.node_position.append(vertex)
tree_edges = []
visited[0] = 1
distance_tv[0] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
nbr_tv[neighbor] = 0
distance_tv[neighbor] = distance
heap.heapify(distance_tv, positions)
for _ in range(1, len(adjacency_list)):
vertex = heap.delete_minimum(distance_tv, positions)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
visited[vertex] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(neighbor)]
):
distance_tv[heap.get_position(neighbor)] = distance
heap.bottom_to_top(
distance, heap.get_position(neighbor), distance_tv, positions
)
nbr_tv[neighbor] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
edges_number = int(input("Enter number of edges: ").strip())
adjacency_list = defaultdict(list)
for _ in range(edges_number):
edge = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(METHOD_NAME(adjacency_list))
| null |
5,305 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Copyright © 2017 Michal Sekletar <[email protected]>
# ATTENTION: This uses the *installed* systemd, not the one from the built
# source tree.
import os
import subprocess
import sys
import time
import unittest
import uuid
from enum import Enum
class InstallChange(Enum):
NO_CHANGE = 0
LINES_SWAPPED = 1
COMMAND_ADDED_BEFORE = 2
COMMAND_ADDED_AFTER = 3
COMMAND_INTERLEAVED = 4
REMOVAL = 5
class ExecutionResumeTest(unittest.TestCase):
def setUp(self):
self.unit = 'test-issue-518.service'
self.unitfile_path = f'/run/systemd/system/{self.unit}'
self.output_file = f"/tmp/test-issue-518-{uuid.uuid4()}"
self.unit_files = {}
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/sleep 3
ExecStart=/bin/bash -c "echo foo >>{self.output_file}"
'''
self.unit_files[InstallChange.NO_CHANGE] = unit_file_content
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/bash -c "echo foo >>{self.output_file}"
ExecStart=/bin/sleep 3
'''
self.unit_files[InstallChange.LINES_SWAPPED] = unit_file_content
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/bash -c "echo bar >>{self.output_file}"
ExecStart=/bin/sleep 3
ExecStart=/bin/bash -c "echo foo >>{self.output_file}"
'''
self.unit_files[InstallChange.COMMAND_ADDED_BEFORE] = unit_file_content
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/sleep 3
ExecStart=/bin/bash -c "echo foo >>{self.output_file}"
ExecStart=/bin/bash -c "echo bar >>{self.output_file}"
'''
self.unit_files[InstallChange.COMMAND_ADDED_AFTER] = unit_file_content
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/bash -c "echo baz >>{self.output_file}"
ExecStart=/bin/sleep 3
ExecStart=/bin/bash -c "echo foo >>{self.output_file}"
ExecStart=/bin/bash -c "echo bar >>{self.output_file}"
'''
self.unit_files[InstallChange.COMMAND_INTERLEAVED] = unit_file_content
unit_file_content = f'''
[Service]
Type=oneshot
ExecStart=/bin/bash -c "echo bar >>{self.output_file}"
ExecStart=/bin/bash -c "echo baz >>{self.output_file}"
'''
self.unit_files[InstallChange.REMOVAL] = unit_file_content
def reload(self):
subprocess.check_call(['systemctl', 'daemon-reload'])
def write_unit_file(self, unit_file_change):
if not isinstance(unit_file_change, InstallChange):
raise ValueError('Unknown unit file change')
content = self.unit_files[unit_file_change]
with open(self.unitfile_path, 'w', encoding='utf-8') as f:
f.write(content)
self.reload()
def check_output(self, expected_output):
for _ in range(15):
# Wait until the unit finishes so we don't check an incomplete log
if subprocess.call(['systemctl', '-q', 'is-active', self.unit]) == 0:
continue
os.sync()
try:
with open(self.output_file, 'r', encoding='utf-8') as log:
output = log.read()
self.assertEqual(output, expected_output)
return
except IOError:
pass
time.sleep(1)
self.fail(f'Timed out while waiting for the output file {self.output_file} to appear')
def setup_unit(self):
self.write_unit_file(InstallChange.NO_CHANGE)
subprocess.check_call(['systemctl', '--job-mode=replace', '--no-block', 'start', self.unit])
time.sleep(1)
def test_no_change(self):
expected_output = 'foo\n'
self.setup_unit()
self.reload()
self.check_output(expected_output)
def test_swapped(self):
self.setup_unit()
self.write_unit_file(InstallChange.LINES_SWAPPED)
self.reload()
self.assertTrue(not os.path.exists(self.output_file))
def test_added_before(self):
expected_output = 'foo\n'
self.setup_unit()
self.write_unit_file(InstallChange.COMMAND_ADDED_BEFORE)
self.reload()
self.check_output(expected_output)
def test_added_after(self):
expected_output = 'foo\nbar\n'
self.setup_unit()
self.write_unit_file(InstallChange.COMMAND_ADDED_AFTER)
self.reload()
self.check_output(expected_output)
def test_interleaved(self):
expected_output = 'foo\nbar\n'
self.setup_unit()
self.write_unit_file(InstallChange.COMMAND_INTERLEAVED)
self.reload()
self.check_output(expected_output)
def METHOD_NAME(self):
self.setup_unit()
self.write_unit_file(InstallChange.REMOVAL)
self.reload()
self.assertTrue(not os.path.exists(self.output_file))
def test_issue_6533(self):
unit = "test-issue-6533.service"
unitfile_path = f"/run/systemd/system/{unit}"
content = '''
[Service]
ExecStart=/bin/sleep 5
'''
with open(unitfile_path, 'w', encoding='utf-8') as f:
f.write(content)
self.reload()
subprocess.check_call(['systemctl', '--job-mode=replace', '--no-block', 'start', unit])
time.sleep(2)
content = '''
[Service]
ExecStart=/bin/sleep 5
ExecStart=/bin/true
'''
with open(unitfile_path, 'w', encoding='utf-8') as f:
f.write(content)
self.reload()
time.sleep(5)
self.assertNotEqual(subprocess.call("journalctl -b _PID=1 | grep -q 'Freezing execution'", shell=True), 0)
def tearDown(self):
for f in [self.output_file, self.unitfile_path]:
try:
os.remove(f)
except OSError:
# ignore error if log file doesn't exist
pass
self.reload()
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=3))
| null |
5,306 |
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToBgr op in DE
"""
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def generate_numpy_random_rgb(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_bgr_hwc_py():
"""
Feature: RgbToBgr Op
Description: Test Python op with HWC input shape in eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = py_vision.RgbToBgr(is_hwc=True)
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def METHOD_NAME():
"""
Feature: RgbToBgr Op
Description: Test C++ op with HWC input shape in eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = vision.RgbToBgr()
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_chw_py():
"""
Feature: RgbToBgr Op
Description: Test Python op in with CHW input shape eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
rgb2bgr_op = py_vision.RgbToBgr(is_hwc=False)
rgb_np_pred = rgb2bgr_op(rgb_np)
rgb_np_gt = rgb_np[::-1, :, :]
assert rgb_np_pred.shape == rgb_np.shape
assert_allclose(rgb_np_pred.flatten(),
rgb_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_pipeline_py():
"""
Feature: RgbToBgr Op
Description: Test Python op in dataset pipeline
Expectation: Dataset pipeline runs successfully and results are verified
"""
# First dataset
transforms1_list = [py_vision.Decode(), py_vision.Resize([64, 64]), py_vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1_list)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
py_vision.Decode(),
py_vision.Resize([64, 64]),
py_vision.ToTensor(),
py_vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[::-1, :, :]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
assert num_iter == 3
def test_rgb_bgr_pipeline_c():
"""
Feature: RgbToBgr Op
Description: Test C++ op in dataset pipeline
Expectation: Dataset pipeline runs successfully and results are verified
"""
# First dataset
transforms1 = [
# Set Decode(rbg=True) for test coverage of deprecated rgb arg
vision.Decode(rgb=True),
vision.Resize([60, 60])
]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2_list = [
# Set Decode(True) for test coverage of deprecated rgb arg
vision.Decode(True),
vision.Resize([60, 60]),
vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2_list)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[:, :, ::-1]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_bgr_hwc_py()
METHOD_NAME()
test_rgb_bgr_chw_py()
test_rgb_bgr_pipeline_py()
test_rgb_bgr_pipeline_c()
| null |
5,307 |
"""
Interface that allows 2D rendering.
"""
from abc import ABC, abstractmethod
from rlberry.rendering.opengl_render2d import OpenGLRender2D
from rlberry.rendering.pygame_render2d import PyGameRender2D
from rlberry.rendering.utils import video_write
import rlberry
logger = rlberry.logger
class RenderInterface(ABC):
"""
Common interface for rendering in rlberry.
"""
def __init__(self):
self._rendering_enabled = False
def is_render_enabled(self):
return self._rendering_enabled
def METHOD_NAME(self):
self._rendering_enabled = True
def disable_rendering(self):
self._rendering_enabled = False
def save_video(self, filename, **kwargs):
"""
Save video file.
"""
pass
def get_video(self, **kwargs):
"""
Get video data.
"""
pass
@abstractmethod
def render(self, **kwargs):
"""
Display on screen.
"""
pass
class RenderInterface2D(RenderInterface):
"""
Interface for 2D rendering in rlberry.
"""
def __init__(self):
RenderInterface.__init__(self)
self._rendering_enabled = False
self._rendering_type = "2d"
self._state_history_for_rendering = []
self._refresh_interval = 50 # in milliseconds
self._clipping_area = (-1.0, 1.0, -1.0, 1.0) # (left,right,bottom,top)
# rendering type, either 'pygame' or 'opengl'
self.renderer_type = "opengl"
def get_renderer(self):
if self.renderer_type == "opengl":
return OpenGLRender2D()
elif self.renderer_type == "pygame":
return PyGameRender2D()
else:
raise NotImplementedError("Unknown renderer type.")
@abstractmethod
def get_scene(self, state):
"""
Return scene (list of shapes) representing a given state
"""
pass
@abstractmethod
def get_background(self):
"""
Returne a scene (list of shapes) representing the background
"""
pass
def append_state_for_rendering(self, state):
self._state_history_for_rendering.append(state)
def set_refresh_interval(self, interval):
self._refresh_interval = interval
def clear_render_buffer(self):
self._state_history_for_rendering = []
def set_clipping_area(self, area):
self._clipping_area = area
def _get_background_and_scenes(self):
# background
background = self.get_background()
# data: convert states to scenes
scenes = []
for state in self._state_history_for_rendering:
scene = self.get_scene(state)
scenes.append(scene)
return background, scenes
def render(self, loop=True, **kwargs):
"""
Function to render an environment that implements the interface.
"""
if self.is_render_enabled():
# background and data
background, data = self._get_background_and_scenes()
if len(data) == 0:
logger.info("No data to render.")
return
# render
renderer = self.get_renderer()
renderer.window_name = self.name
renderer.set_refresh_interval(self._refresh_interval)
renderer.set_clipping_area(self._clipping_area)
renderer.set_data(data)
renderer.set_background(background)
renderer.run_graphics(loop)
return 0
else:
logger.info("Rendering not enabled for the environment.")
return 1
def get_video(self, framerate=25, **kwargs):
# background and data
background, data = self._get_background_and_scenes()
if len(data) == 0:
logger.info("No data to save.")
return
# get video data from renderer
renderer = self.get_renderer()
renderer.window_name = self.name
renderer.set_refresh_interval(self._refresh_interval)
renderer.set_clipping_area(self._clipping_area)
renderer.set_data(data)
renderer.set_background(background)
return renderer.get_video_data()
def save_video(self, filename, framerate=25, **kwargs):
video_data = self.get_video(framerate=framerate, **kwargs)
video_write(filename, video_data, framerate=framerate)
| null |
5,308 |
from builtins import range
import numpy as np
from numba import jit, prange
import cubical.kernels
use_parallel = True if cubical.kernels.num_omp_threads > 1 else False
use_cache = cubical.kernels.use_cache
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def METHOD_NAME(x, xinv, flags, eps, flagbit):
"""
Given an array x of dimensions (d,t,i,a,2,2), computes the inverse of every 2x2 block.
Takes flags of shape (d,t,f,a) into account, and will flag elements if the inverse is
too large.
Args:
x (np.complex64 or np.complex128):
Typed memoryview of X array with dimensions (d, ti, fi, a, c, c)
xinv (np.complex64 or np.complex128):
Typed memoryview of output inverse array with dimensions
(d, ti, fi, a, c, c)
flags (np.uint16_t):
Typed memoryview of flag array with dimensions (d, t, f, a)
eps (float):
Threshold beneath which the denominator is regarded as too small for inversion.
flagbit (int):
The bitflag which will be raised if flagging is required.
Returns:
int:
Number of elements flagged
"""
flag_count = 0
eps = eps**2
n_dir = x.shape[0]
n_tim = x.shape[1]
n_fre = x.shape[2]
n_ant = x.shape[3]
for aa in prange(n_ant):
for t in range(n_tim):
for f in range(n_fre):
for d in range(n_dir):
if flags[d,t,f,aa]:
xinv[d,t,f,aa,0,0] = 0
xinv[d,t,f,aa,1,1] = 0
xinv[d,t,f,aa,0,1] = 0
xinv[d,t,f,aa,1,0] = 0
else:
denom = x[d,t,f,aa,0,0] * x[d,t,f,aa,1,1] - \
x[d,t,f,aa,0,1] * x[d,t,f,aa,1,0]
if (denom*denom.conjugate()).real<=eps:
xinv[d,t,f,aa,0,0] = 0
xinv[d,t,f,aa,1,1] = 0
xinv[d,t,f,aa,0,1] = 0
xinv[d,t,f,aa,1,0] = 0
flags[d,t,f,aa] = flagbit
flag_count += 1
else:
xinv[d,t,f,aa,0,0] = x[d,t,f,aa,1,1]/denom
xinv[d,t,f,aa,1,1] = x[d,t,f,aa,0,0]/denom
xinv[d,t,f,aa,0,1] = -1 * x[d,t,f,aa,0,1]/denom
xinv[d,t,f,aa,1,0] = -1 * x[d,t,f,aa,1,0]/denom
return flag_count
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_diag_inverse(x, xinv, flags, eps, flagbit):
"""
Given an array x of dimensions (d,t,i,a,2,2), computes the inverse of every 2x2 block,
under the assumption that the off-diagonal entries are zero. Takes flags of shape
(d,t,f,a) into account, and will flag elements if the inverse is too large.
Args:
x (np.complex64 or np.complex128):
Typed memoryview of X array with dimensions (d, ti, fi, a, c, c)
xinv (np.complex64 or np.complex128):
Typed memoryview of output inverse array with dimensions
(d, ti, fi, a, c, c)
flags (np.uint16_t):
Typed memoryview of flag array with dimensions (d, t, f, a)
eps (float):
Threshold beneath which the denominator is regarded as too small for inversion.
flagbit (int):
The bitflag which will be raised if flagging is required.
Returns:
int:
Number of elements flagged
"""
flag_count = 0
eps = eps**2
n_dir = x.shape[0]
n_tim = x.shape[1]
n_fre = x.shape[2]
n_ant = x.shape[3]
for aa in prange(n_ant):
for t in range(n_tim):
for f in range(n_fre):
for d in range(n_dir):
xinv[d,t,f,aa,0,1] = xinv[d,t,f,aa,1,0] = 0
if flags[d,t,f,aa]:
xinv[d,t,f,aa,0,0] = xinv[d,t,f,aa,1,1] = 0
else:
denom = x[d,t,f,aa,0,0] * x[d,t,f,aa,1,1]
if (denom.real**2 + denom.imag**2)<=eps:
xinv[d,t,f,aa,0,0] = xinv[d,t,f,aa,1,1] = 0
flags[d,t,f,aa] = flagbit
flag_count += 1
else:
xinv[d,t,f,aa,0,0] = 1/x[d,t,f,aa,0,0]
xinv[d,t,f,aa,1,1] = 1/x[d,t,f,aa,1,1]
return flag_count
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_chisq(r, chisq):
"""
Compute chi-square over correlations, models, and one antenna axis.
Args:
r (np.complex64 or np.complex128):
Array with dimensions (i, t, f , a, a, c, c)
chisq (np.float64):
Array with dimensions (t, f, a)
"""
n_mod = r.shape[0]
n_tim = r.shape[1]
n_fre = r.shape[2]
n_ant = r.shape[3]
for aa in prange(n_ant):
for ab in range(n_ant):
for i in range(n_mod):
for t in range(n_tim):
for f in range(n_fre):
for c1 in range(2):
for c2 in range(2):
chisq[t,f,aa] += r[i,t,f,aa,ab,c1,c2].real**2 + r[i,t,f,aa,ab,c1,c2].imag**2
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_chisq_diag(r, chisq):
"""
Compute chi-square over diagonal correlations, models, and one antenna axis.
Args:
r (np.complex64 or np.complex128):
Array with dimensions (i, t, f , a, a, c, c)
chisq (np.float64):
Array with dimensions (t, f, a)
"""
n_mod = r.shape[0]
n_tim = r.shape[1]
n_fre = r.shape[2]
n_ant = r.shape[3]
for aa in prange(n_ant):
for ab in range(n_ant):
for i in range(n_mod):
for t in range(n_tim):
for f in range(n_fre):
for c in range(2):
chisq[t,f,aa] += r[i,t,f,aa,ab,c,c].real**2 + r[i,t,f,aa,ab,c,c].imag**2
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_chisq_offdiag(r, chisq):
"""
Compute chi-square over off-diagonal correlations, models, and one antenna axis.
Args:
r (np.complex64 or np.complex128):
Array with dimensions (i, t, f , a, a, c, c)
chisq (np.float64):
Array with dimensions (t, f, a)
"""
n_mod = r.shape[0]
n_tim = r.shape[1]
n_fre = r.shape[2]
n_ant = r.shape[3]
for aa in prange(n_ant):
for ab in range(n_ant):
for i in range(n_mod):
for t in range(n_tim):
for f in range(n_fre):
for c in range(2):
chisq[t,f,aa] += r[i,t,f,aa,ab,c,1-c].real**2 + r[i,t,f,aa,ab,c,1-c].imag**2
| null |
5,309 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetIotHubResult',
'AwaitableGetIotHubResult',
'get_iot_hub',
'get_iot_hub_output',
]
@pulumi.output_type
class GetIotHubResult:
"""
A collection of values returned by getIotHub.
"""
def __init__(__self__, hostname=None, id=None, identities=None, name=None, resource_group_name=None, METHOD_NAME=None):
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identities and not isinstance(identities, list):
raise TypeError("Expected argument 'identities' to be a list")
pulumi.set(__self__, "identities", identities)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
@property
@pulumi.getter
def hostname(self) -> str:
"""
The Hostname of the IoTHub.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identities(self) -> Sequence['outputs.GetIotHubIdentityResult']:
"""
A `identity` block as defined below.
"""
return pulumi.get(self, "identities")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
class AwaitableGetIotHubResult(GetIotHubResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIotHubResult(
hostname=self.hostname,
id=self.id,
identities=self.identities,
name=self.name,
resource_group_name=self.resource_group_name,
METHOD_NAME=self.METHOD_NAME)
def get_iot_hub(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
METHOD_NAME: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIotHubResult:
"""
Use this data source to access information about an existing IoTHub.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.iot.get_iot_hub(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this IoTHub.
:param str resource_group_name: The name of the Resource Group where the IoTHub exists.
:param Mapping[str, str] tags: A mapping of tags which should be assigned to the IoTHub.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['tags'] = METHOD_NAME
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:iot/getIotHub:getIotHub', __args__, opts=opts, typ=GetIotHubResult).value
return AwaitableGetIotHubResult(
hostname=pulumi.get(__ret__, 'hostname'),
id=pulumi.get(__ret__, 'id'),
identities=pulumi.get(__ret__, 'identities'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
METHOD_NAME=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_iot_hub)
def get_iot_hub_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
METHOD_NAME: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIotHubResult]:
"""
Use this data source to access information about an existing IoTHub.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.iot.get_iot_hub(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this IoTHub.
:param str resource_group_name: The name of the Resource Group where the IoTHub exists.
:param Mapping[str, str] tags: A mapping of tags which should be assigned to the IoTHub.
"""
...
| null |
5,310 |
"""
Logging and Data Scaling Utilities
Added array saver/loader to save/resume training
"""
import numpy as np
import os
import shutil
import glob
import csv
class Scaler(object):
""" Generate scale and offset based on running mean and stddev along axis=0
offset = running mean
scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0)
"""
def __init__(self, obs_dim, env_name):
"""
Args:
obs_dim: dimension of axis=1
"""
self.env_name = env_name
self.vars = np.zeros(obs_dim)
self.means = np.zeros(obs_dim)
self.m = 0
self.n = 0
self.first_pass = True
def update(self, x):
""" Update running mean and variance (this is an exact method)
Args:
x: NumPy array, shape = (N, obs_dim)
see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled-
variance-of-two-groups-given-known-group-variances-mean
"""
if self.first_pass:
if os.path.isfile("./savedmodel/"+self.env_name+"/scaler.dat") :
resumed = np.load("./savedmodel/"+self.env_name+"/scaler.dat", allow_pickle=True)
self.means = resumed[0: self.means.size]
self.vars = resumed[self.means.size : self.means.size+self.vars.size]
self.m = resumed[self.means.size+self.vars.size: resumed.size]
else:
self.means = np.mean(x, axis=0)
self.vars = np.var(x, axis=0)
self.m = x.shape[0]
self.first_pass = False
else:
n = x.shape[0]
new_data_var = np.var(x, axis=0)
new_data_mean = np.mean(x, axis=0)
new_data_mean_sq = np.square(new_data_mean)
new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)
self.vars = (((self.m * (self.vars + np.square(self.means))) +
(n * (new_data_var + new_data_mean_sq))) / (self.m + n) -
np.square(new_means))
self.vars = np.maximum(0.0, self.vars) # occasionally goes negative, clip
self.means = new_means
self.m += n
def resume(self):
if os.path.isfile("./savedmodel/" + self.env_name + "/scaler.dat"):
resumed = np.load("./savedmodel/"+self.env_name+"/scaler.dat")
self.means = resumed[0: self.means.size]
self.vars = resumed[self.means.size : self.means.size+self.vars.size]
self.m = resumed[self.means.size+self.vars.size: resumed.size]
self.first_pass = False
else:
return 0
def get(self):
""" returns 2-tuple: (scale, offset) """
return 1/(np.sqrt(self.vars) + 0.1)/3, self.means
def METHOD_NAME(self):
""" Saves states variance and mean to resume learning"""
path = os.path.join('savedmodel/'+self.env_name)
path = os.path.join(path, 'scaler.dat')
saved = np.concatenate ([self.means, self.vars])
saved = np.append(saved, [self.m])
saved.dump(path)
class Logger(object):
""" Simple training logger: saves to file and optionally prints to stdout """
def __init__(self, logname, now):
"""
Args:
logname: name for log (e.g. 'Hopper-v1')
now: unique sub-directory name (e.g. date/time string)
"""
path = os.path.join('log-files', logname, now)
os.makedirs(path)
#filenames = glob.glob('*.py') # put copy of all python files in log_dir
#for filename in filenames: # for reference
# shutil.copy(filename, path)
path = os.path.join(path, 'log.csv')
self.write_header = True
self.log_entry = {}
self.f = open(path, 'w')
self.writer = None # DictWriter created with first call to write() method
def write(self, display=True):
""" Write 1 log entry to file, and optionally to stdout
Log fields preceded by '_' will not be printed to stdout
Args:
display: boolean, print to stdout
"""
if display:
self.disp(self.log_entry)
if self.write_header:
fieldnames = [x for x in self.log_entry.keys()]
self.writer = csv.DictWriter(self.f, fieldnames=fieldnames)
self.writer.writeheader()
self.write_header = False
self.writer.writerow(self.log_entry)
self.log_entry = {}
@staticmethod
def disp(log):
"""Print metrics to stdout"""
log_keys = [k for k in log.keys()]
log_keys.sort()
print('***** Episode {}, Mean R = {:.1f} *****'.format(log['_Episode'],
log['_MeanReward']))
for key in log_keys:
if key[0] != '_': # don't display log items with leading '_'
print('{:s}: {:.3g}'.format(key, log[key]))
print('\n')
def log(self, items):
""" Update fields in log (does not write to file, used to collect updates.
Args:
items: dictionary of items to update
"""
self.log_entry.update(items)
def close(self):
""" Close log file - log cannot be written after this """
self.f.close()
| null |
5,311 |
from sympy.combinatorics.permutations import Permutation
from sympy.core.symbol import symbols
from sympy.matrices import Matrix
from sympy.utilities.iterables import variations, rotate_left
def symmetric(n):
"""
Generates the symmetric group of order n, Sn.
Examples
========
>>> from sympy.combinatorics.generators import symmetric
>>> list(symmetric(3))
[(2), (1 2), (2)(0 1), (0 1 2), (0 2 1), (0 2)]
"""
for perm in variations(range(n), n):
yield Permutation(perm)
def cyclic(n):
"""
Generates the cyclic group of order n, Cn.
Examples
========
>>> from sympy.combinatorics.generators import cyclic
>>> list(cyclic(5))
[(4), (0 1 2 3 4), (0 2 4 1 3),
(0 3 1 4 2), (0 4 3 2 1)]
See Also
========
dihedral
"""
gen = list(range(n))
for i in range(n):
yield Permutation(gen)
gen = rotate_left(gen, 1)
def alternating(n):
"""
Generates the alternating group of order n, An.
Examples
========
>>> from sympy.combinatorics.generators import alternating
>>> list(alternating(3))
[(2), (0 1 2), (0 2 1)]
"""
for perm in variations(range(n), n):
p = Permutation(perm)
if p.is_even:
yield p
def METHOD_NAME(n):
"""
Generates the dihedral group of order 2n, Dn.
The result is given as a subgroup of Sn, except for the special cases n=1
(the group S2) and n=2 (the Klein 4-group) where that's not possible
and embeddings in S2 and S4 respectively are given.
Examples
========
>>> from sympy.combinatorics.generators import dihedral
>>> list(dihedral(3))
[(2), (0 2), (0 1 2), (1 2), (0 2 1), (2)(0 1)]
See Also
========
cyclic
"""
if n == 1:
yield Permutation([0, 1])
yield Permutation([1, 0])
elif n == 2:
yield Permutation([0, 1, 2, 3])
yield Permutation([1, 0, 3, 2])
yield Permutation([2, 3, 0, 1])
yield Permutation([3, 2, 1, 0])
else:
gen = list(range(n))
for i in range(n):
yield Permutation(gen)
yield Permutation(gen[::-1])
gen = rotate_left(gen, 1)
def rubik_cube_generators():
"""Return the permutations of the 3x3 Rubik's cube, see
https://www.gap-system.org/Doc/Examples/rubik.html
"""
a = [
[(1, 3, 8, 6), (2, 5, 7, 4), (9, 33, 25, 17), (10, 34, 26, 18),
(11, 35, 27, 19)],
[(9, 11, 16, 14), (10, 13, 15, 12), (1, 17, 41, 40), (4, 20, 44, 37),
(6, 22, 46, 35)],
[(17, 19, 24, 22), (18, 21, 23, 20), (6, 25, 43, 16), (7, 28, 42, 13),
(8, 30, 41, 11)],
[(25, 27, 32, 30), (26, 29, 31, 28), (3, 38, 43, 19), (5, 36, 45, 21),
(8, 33, 48, 24)],
[(33, 35, 40, 38), (34, 37, 39, 36), (3, 9, 46, 32), (2, 12, 47, 29),
(1, 14, 48, 27)],
[(41, 43, 48, 46), (42, 45, 47, 44), (14, 22, 30, 38),
(15, 23, 31, 39), (16, 24, 32, 40)]
]
return [Permutation([[i - 1 for i in xi] for xi in x], size=48) for x in a]
def rubik(n):
"""Return permutations for an nxn Rubik's cube.
Permutations returned are for rotation of each of the slice
from the face up to the last face for each of the 3 sides (in this order):
front, right and bottom. Hence, the first n - 1 permutations are for the
slices from the front.
"""
if n < 2:
raise ValueError('dimension of cube must be > 1')
# 1-based reference to rows and columns in Matrix
def getr(f, i):
return faces[f].col(n - i)
def getl(f, i):
return faces[f].col(i - 1)
def getu(f, i):
return faces[f].row(i - 1)
def getd(f, i):
return faces[f].row(n - i)
def setr(f, i, s):
faces[f][:, n - i] = Matrix(n, 1, s)
def setl(f, i, s):
faces[f][:, i - 1] = Matrix(n, 1, s)
def setu(f, i, s):
faces[f][i - 1, :] = Matrix(1, n, s)
def setd(f, i, s):
faces[f][n - i, :] = Matrix(1, n, s)
# motion of a single face
def cw(F, r=1):
for _ in range(r):
face = faces[F]
rv = []
for c in range(n):
for r in range(n - 1, -1, -1):
rv.append(face[r, c])
faces[F] = Matrix(n, n, rv)
def ccw(F):
cw(F, 3)
# motion of plane i from the F side;
# fcw(0) moves the F face, fcw(1) moves the plane
# just behind the front face, etc...
def fcw(i, r=1):
for _ in range(r):
if i == 0:
cw(F)
i += 1
temp = getr(L, i)
setr(L, i, list(getu(D, i)))
setu(D, i, list(reversed(getl(R, i))))
setl(R, i, list(getd(U, i)))
setd(U, i, list(reversed(temp)))
i -= 1
def fccw(i):
fcw(i, 3)
# motion of the entire cube from the F side
def FCW(r=1):
for _ in range(r):
cw(F)
ccw(B)
cw(U)
t = faces[U]
cw(L)
faces[U] = faces[L]
cw(D)
faces[L] = faces[D]
cw(R)
faces[D] = faces[R]
faces[R] = t
def FCCW():
FCW(3)
# motion of the entire cube from the U side
def UCW(r=1):
for _ in range(r):
cw(U)
ccw(D)
t = faces[F]
faces[F] = faces[R]
faces[R] = faces[B]
faces[B] = faces[L]
faces[L] = t
def UCCW():
UCW(3)
# defining the permutations for the cube
U, F, R, B, L, D = names = symbols('U, F, R, B, L, D')
# the faces are represented by nxn matrices
faces = {}
count = 0
for fi in range(6):
f = []
for a in range(n**2):
f.append(count)
count += 1
faces[names[fi]] = Matrix(n, n, f)
# this will either return the value of the current permutation
# (show != 1) or else append the permutation to the group, g
def perm(show=0):
# add perm to the list of perms
p = []
for f in names:
p.extend(faces[f])
if show:
return p
g.append(Permutation(p))
g = [] # container for the group's permutations
I = list(range(6*n**2)) # the identity permutation used for checking
# define permutations corresponding to cw rotations of the planes
# up TO the last plane from that direction; by not including the
# last plane, the orientation of the cube is maintained.
# F slices
for i in range(n - 1):
fcw(i)
perm()
fccw(i) # restore
assert perm(1) == I
# R slices
# bring R to front
UCW()
for i in range(n - 1):
fcw(i)
# put it back in place
UCCW()
# record
perm()
# restore
# bring face to front
UCW()
fccw(i)
# restore
UCCW()
assert perm(1) == I
# D slices
# bring up bottom
FCW()
UCCW()
FCCW()
for i in range(n - 1):
# turn strip
fcw(i)
# put bottom back on the bottom
FCW()
UCW()
FCCW()
# record
perm()
# restore
# bring up bottom
FCW()
UCCW()
FCCW()
# turn strip
fccw(i)
# put bottom back on the bottom
FCW()
UCW()
FCCW()
assert perm(1) == I
return g
| null |
5,312 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import config, mtd, CloneWorkspace, D7YIGPositionCalibration, Load
from mantid.api import ITableWorkspace, WorkspaceGroup
import os.path
from os import path
import tempfile
class D7YIGPositionCalibrationTest(unittest.TestCase):
@classmethod
def METHOD_NAME(cls):
config.appendDataSearchSubDir("ILL/D7/")
Load("402652_403041.nxs", OutputWorkspace="shortWavelengthScan")
@classmethod
def tearDownClass(cls):
mtd.clear()
output_path = os.path.join(tempfile.gettempdir(), "test_shortWavelength.xml")
if path.exists(output_path):
os.remove(output_path)
def test_algorithm_with_no_input_workspace_raises_exception(self):
with self.assertRaisesRegex(
RuntimeError,
"Either a list of file names containing YIG scan or the workspace with the loaded scan is required for calibration.",
):
D7YIGPositionCalibration()
def test_no_fitting(self):
approximate_wavelength = "3.14" # Angstrom
self.assertTrue(mtd["shortWavelengthScan"])
CloneWorkspace(InputWorkspace="shortWavelengthScan", OutputWorkspace="shortWavelengthScan_clone")
D7YIGPositionCalibration(
InputWorkspace="shortWavelengthScan_clone",
ApproximateWavelength=approximate_wavelength,
YIGPeaksFile="D7_YIG_peaks.xml",
FitOutputWorkspace="test_no_fitting",
FittingMethod="None",
ClearCache=False,
)
self.assertTrue(mtd["peak_fits_test_no_fitting"])
self.assertTrue(isinstance(mtd["peak_fits_test_no_fitting"], WorkspaceGroup))
def test_shortWavelength(self):
approximate_wavelength = "3.14" # Angstrom
self.assertTrue(mtd["shortWavelengthScan"])
CloneWorkspace(InputWorkspace="shortWavelengthScan", OutputWorkspace="shortWavelengthScan_clone")
output_filename = os.path.join(tempfile.gettempdir(), "test_shortWavelength.xml")
D7YIGPositionCalibration(
InputWorkspace="shortWavelengthScan_clone",
ApproximateWavelength=approximate_wavelength,
YIGPeaksFile="D7_YIG_peaks.xml",
CalibrationOutputFile=output_filename,
MinimalDistanceBetweenPeaks=1.75,
BankOffsets=[3, 3, -1],
FitOutputWorkspace="test_shortWavelength",
FittingMethod="Individual",
)
self.assertTrue(path.exists(output_filename))
self.assertTrue(mtd["test_shortWavelength"])
self.assertTrue(isinstance(mtd["test_shortWavelength"], ITableWorkspace))
self._check_fit_output("test_shortWavelength")
def _check_fit_output(self, fitTableName):
"""Checks the TableWorkspace if the output values are reasonable,
then check if the output IPF can be read by the Loader"""
pixels_per_bank = 44
self.assertNotEqual(mtd[fitTableName], None)
self.assertTrue(isinstance(mtd[fitTableName], ITableWorkspace))
wavelength = float(mtd[fitTableName].column(1)[1])
self.assertAlmostEqual(wavelength, 1.0, delta=5e-2) # +/- 5 %
bank2_slope = 1.0 / float(mtd[fitTableName].column(1)[0])
self.assertAlmostEqual(bank2_slope, 1.0, delta=2e-2) # +/- 2 %
bank3_slope = 1.0 / float(mtd[fitTableName].column(1)[4 * pixels_per_bank])
self.assertAlmostEqual(bank3_slope, 1.0, delta=2e-2) # +/- 2%
bank4_slope = 1.0 / float(mtd[fitTableName].column(1)[8 * pixels_per_bank])
self.assertAlmostEqual(bank4_slope, 1.0, delta=2e-2) # +/- 2 %
for row_no in range(mtd[fitTableName].rowCount()):
row_data = mtd[fitTableName].row(row_no)
if ".offset" in row_data["Name"]:
offset = row_data["Value"]
self.assertAlmostEqual(offset, 0.0, delta=24.0) # +- 24 degrees
if __name__ == "__main__":
unittest.main()
| null |
5,313 |
from gemstone.common.configurable_model import ConfigurableModel
from hwtypes import BitVector
from enum import Enum
import magma as m
import fault
class GCRegAddr(Enum):
TST_ADDR = 0
STALL_ADDR = 1
CLK_SEL_ADDR = 2
RW_DELAY_SEL_ADDR = 3
CLK_SWITCH_DELAY_SEL_ADDR = 4
class GCOp(Enum):
NOP = 0
CONFIG_WRITE = 1
CONFIG_READ = 2
WRITE_A050 = 4
WRITE_TST = 5
READ_TST = 6
GLOBAL_RESET = 7
WRITE_STALL = 8
READ_STALL = 9
ADVANCE_CLK = 10
READ_CLK_DOMAIN = 11
SWITCH_CLK = 12
WRITE_RW_DELAY_SEL = 13
READ_RW_DELAY_SEL = 14
WRITE_CLK_SWITCH_DELAY_SEL = 15
READ_CLK_SWITCH_DELAY_SEL = 16
def gen_global_controller(config_data_width: int,
config_addr_width: int,
config_op_width: int):
class _GlobalController(ConfigurableModel(32, 32)):
def __init__(self):
super().__init__()
self.num_stall_domains = 4
self.reset()
def reset(self):
self.TST = [BitVector[config_data_width](0)]
self.stall = [BitVector[self.num_stall_domains](0)]
self.clk_sel = [BitVector[1](0)]
self.rw_delay_sel = [BitVector[config_data_width](2)]
self.clk_switch_delay_sel = [BitVector[1](0)]
self.reset_out = [0]
self.config_addr_out = [BitVector[config_addr_width](0)]
self.config_data_out = [BitVector[config_data_width](0)]
self.config_data_in = fault.UnknownValue
self.read = [0]
self.write = [0]
self.config_data_to_jtag = [BitVector[config_data_width](0)]
def config_read(self, addr):
rw_delay = self.rw_delay_sel[0]
duration = rw_delay.as_uint()
self.read = [1] * duration + [0]
self.write = [0] * (duration + 1)
self.config_addr_out = [BitVector[config_addr_width](addr)] \
* (duration + 1)
self.config_data_to_jtag = [self.config_data_to_jtag[-1]] \
+ [self.config_data_in] * duration
def config_write(self, addr, data):
rw_delay = self.rw_delay_sel[0]
duration = rw_delay.as_uint()
self.read = [0] * (duration + 1)
self.write = [1] * duration + [0]
self.config_addr_out = [BitVector[config_addr_width](addr)] \
* (duration + 1)
self.config_data_out = [BitVector[config_data_width](data)] \
* (duration + 1)
def read_gc_reg(self, addr):
if (addr == GCRegAddr.TST_ADDR):
out = self.TST[-1]
elif (addr == GCRegAddr.STALL_ADDR):
out = self.stall[-1]
elif (addr == GCRegAddr.CLK_SEL_ADDR):
out = self.clk_sel[-1]
elif (addr == GCRegAddr.RW_DELAY_SEL_ADDR):
out = self.rw_delay_sel[-1]
elif (addr == GCRegAddr.CLK_SWITCH_DELAY_SEL_ADDR):
out = self.clk_switch_delay_sel[-1]
else:
raise ValueError("Reading from invalid GC_reg address")
self.config_data_to_jtag = [BitVector[config_data_width](out)]
def write_gc_reg(self, addr, data):
if (addr == GCRegAddr.TST_ADDR):
self.TST = [BitVector[config_data_width](data)]
elif (addr == GCRegAddr.STALL_ADDR):
self.stall = [BitVector[self.num_stall_domains](data)]
elif (addr == GCRegAddr.CLK_SEL_ADDR):
self.clk_sel = [BitVector[1](data)]
elif (addr == GCRegAddr.RW_DELAY_SEL_ADDR):
self.rw_delay_sel = [BitVector[config_data_width](data)]
elif (addr == GCRegAddr.CLK_SWITCH_DELAY_SEL_ADDR):
self.clk_switch_delay_sel = [BitVector[1](data)]
else:
raise ValueError("Writing to invalid GC_reg address")
def global_reset(self, data):
if (data > 0):
self.reset_out = [1] * data + [0]
else:
self.reset_out = [1] * 20 + [0]
def wr_A050(self):
self.config_data_to_jtag = [BitVector[config_data_width](0xA050)]
def METHOD_NAME(self, addr, data):
save_stall_reg = self.stall[-1]
temp_stall_reg = BitVector[self.num_stall_domains](0)
mask = BitVector[self.num_stall_domains](addr)
for i in range(self.num_stall_domains):
if (mask[i] == 1):
temp_stall_reg[i] = 0
self.stall = [temp_stall_reg] * data + [save_stall_reg]
def set_config_data_in(self, data):
self.config_data_in = BitVector[config_data_width](data)
def __cleanup(self):
# Remove sequences from outputs/regs in preparation for the next
# op.
self.stall = [self.stall[-1]]
self.config_addr_out = [self.config_addr_out[-1]]
self.config_data_out = [self.config_data_out[-1]]
self.read = [self.read[-1]]
self.write = [self.write[-1]]
self.config_data_to_jtag = [self.config_data_to_jtag[-1]]
def __call__(self, **kwargs):
self.__cleanup()
# Op is mandatory. Other args are optional
op = kwargs['op']
if 'data' in kwargs:
data = kwargs['data']
if 'addr' in kwargs:
addr = kwargs['addr']
# Decode op
if (op == GCOp.CONFIG_WRITE):
self.config_write(addr, data)
if (op == GCOp.CONFIG_READ):
self.config_read(addr)
elif (op == GCOp.WRITE_A050):
self.wr_A050()
elif (op == GCOp.WRITE_TST):
self.write_gc_reg(GCRegAddr.TST_ADDR, data)
elif (op == GCOp.READ_TST):
self.read_gc_reg(GCRegAddr.TST_ADDR)
elif (op == GCOp.GLOBAL_RESET):
self.global_reset(data)
elif (op == GCOp.WRITE_STALL):
self.write_gc_reg(GCRegAddr.STALL_ADDR, data)
elif (op == GCOp.READ_STALL):
self.read_gc_reg(GCRegAddr.STALL_ADDR)
elif (op == GCOp.ADVANCE_CLK):
self.METHOD_NAME(addr, data)
elif (op == GCOp.READ_CLK_DOMAIN):
self.read_gc_reg(GCRegAddr.CLK_SEL_ADDR)
elif (op == GCOp.SWITCH_CLK):
self.write_gc_reg(GCRegAddr.CLK_SEL_ADDR, data)
elif (op == GCOp.WRITE_RW_DELAY_SEL):
self.write_gc_reg(GCRegAddr.RW_DELAY_SEL_ADDR, data)
elif (op == GCOp.READ_RW_DELAY_SEL):
self.read_gc_reg(GCRegAddr.RW_DELAY_SEL_ADDR)
elif (op == GCOp.WRITE_CLK_SWITCH_DELAY_SEL):
self.write_gc_reg(GCRegAddr.CLK_SWITCH_DELAY_SEL_ADDR, data)
elif (op == GCOp.READ_CLK_SWITCH_DELAY_SEL):
self.read_gc_reg(GCRegAddr.CLK_SWITCH_DELAY_SEL_ADDR)
return self
return _GlobalController
| null |
5,314 |
#!/usr/bin/env python3
# Copyright (c) 2015-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Dummy Socks5 server for testing."""
import socket
import threading
import queue
import logging
logger = logging.getLogger("TestFramework.socks5")
# Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
# Utility functions
def recvall(s, n):
"""Receive n bytes from a socket, or fail."""
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
# Implementation classes
class Socks5Configuration():
"""Proxy configuration."""
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command():
"""Information about an incoming socks5 command."""
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection():
def __init__(self, serv, conn):
self.serv = serv
self.conn = conn
def handle(self):
"""Handle socks5 request according to RFC192."""
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
ver, cmd, _, atyp = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = recvall(self.conn, n)
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
logger.info('Proxy: %s', cmdin)
# Fall through to disconnect
except Exception as e:
logger.exception("socks5 request handling failed.")
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server():
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, _) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.METHOD_NAME()
def METHOD_NAME(self):
assert not self.running
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.METHOD_NAME()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| null |
5,315 |
import json
import base64
from django.test import TestCase
from django.urls import reverse
from django.conf import settings
from ..models import Agent
from adl_lrs.views import register
class AgentTests(TestCase):
@classmethod
def setUpClass(cls):
print("\n%s" % __name__)
super(AgentTests, cls).setUpClass()
def setUp(self):
self.username = "tester"
self.password = "test"
self.email = "[email protected]"
self.auth = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username, self.password))
form = {'username': self.username, 'password': self.password,
'password2': self.password, 'email': self.email}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_no_agents(self):
agent = json.dumps({"name": "me", "mbox": "mailto:[email protected]"})
response = self.client.get(reverse('lrs:agents'), {
'agent': agent}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.content, "Error with Agent. The agent partial did not match any agents on record")
def test_get(self):
a = json.dumps({"name": "me", "mbox": "mailto:[email protected]"})
Agent.objects.retrieve_or_create(**json.loads(a))
response = self.client.get(reverse('lrs:agents'), {
'agent': a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
r_data = json.loads(response.content)
self.assertTrue(isinstance(r_data['mbox'], list))
self.assertTrue(isinstance(r_data['name'], list))
self.assertEqual(r_data['mbox'], ['mailto:[email protected]'])
self.assertEqual(r_data['name'], ['me'])
self.assertEqual(r_data['objectType'], 'Person')
self.assertIn('content-length', response._headers)
def test_get_no_existing_agent(self):
a = json.dumps({"mbox": "mailto:[email protected]"})
response = self.client.get(reverse('lrs:agents'), {
'agent': a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(
response.content, 'Error with Agent. The agent partial did not match any agents on record')
self.assertEqual(response.status_code, 404)
def test_get_bad_agent(self):
a = json.dumps({})
response = self.client.get(reverse('lrs:agents'), {
'agent': a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(
response.content, 'One and only one of mbox, mbox_sha1sum, openid, account may be supplied with an Agent')
self.assertEqual(response.status_code, 400)
def test_head(self):
a = json.dumps({"name": "me", "mbox": "mailto:[email protected]"})
Agent.objects.retrieve_or_create(**json.loads(a))
response = self.client.head(reverse('lrs:agents'), {
'agent': a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.content, '')
self.assertIn('content-length', response._headers)
def test_get_no_agent(self):
response = self.client.get(reverse(
'lrs:agents'), Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
def METHOD_NAME(self):
agent = json.dumps({"name": "me", "mbox": "mailto:[email protected]"})
response = self.client.post(reverse('lrs:agents'), {
'agent': agent}, content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 405)
| null |
5,316 |
import mock
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from cradmin_legacy import crinstance
from model_bakery import baker
from devilry.apps.core.baker_recipes import ACTIVE_PERIOD_START, ACTIVE_PERIOD_END
from devilry.devilry_admin.views.subject import overview
from devilry.utils import datetimeutils
class TestOverview(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = overview.Overview
def test_title(self):
testsubject = baker.make('core.Subject',
short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('testsubject',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testsubject = baker.make('core.Subject',
long_name='Test Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('Test Subject',
mockresponse.selector.one('h1').alltext_normalized)
def test_createperiod_link_text(self):
testsubject = baker.make('core.Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('Create new semester',
mockresponse.selector.one(
'#devilry_admin_period_createperiod_link').alltext_normalized)
def test_link_urls(self):
testsubject = baker.make('core.Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual(3, len(mockresponse.request.cradmin_instance.reverse_url.call_args_list))
self.assertEqual(
mock.call(appname='edit', args=(), viewname='INDEX', kwargs={}),
mockresponse.request.cradmin_instance.reverse_url.call_args_list[0])
self.assertEqual(
mock.call(appname='createperiod', args=(), viewname='INDEX', kwargs={}),
mockresponse.request.cradmin_instance.reverse_url.call_args_list[1])
self.assertEqual(
mock.call(appname='admins', args=(), viewname='INDEX', kwargs={}),
mockresponse.request.cradmin_instance.reverse_url.call_args_list[2])
def METHOD_NAME(self):
testsubject = baker.make('core.Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertFalse(mockresponse.selector.exists('#devilry_admin_period_overview_periodlist'))
def test_periodlist_itemrendering_name(self):
testsubject = baker.make('core.Subject')
baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Test Period')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('Test Period',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_periodlist_itemrendering_url(self):
testsubject = baker.make('core.Subject')
testperiod = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Test Period')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual(crinstance.reverse_cradmin_url(instanceid='devilry_admin_periodadmin',
appname='overview',
roleid=testperiod.id),
mockresponse.selector.one(
'.devilry-admin-period-overview-perioditemframe')['href'])
def test_periodlist_itemrendering_start_time(self):
testsubject = baker.make('core.Subject')
baker.make_recipe('devilry.apps.core.period_active', parentnode=testsubject)
with self.settings(DATETIME_FORMAT=datetimeutils.ISODATETIME_DJANGOFORMAT, USE_L10N=False):
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual(datetimeutils.isoformat_noseconds(ACTIVE_PERIOD_START),
mockresponse.selector.one(
'.devilry-cradmin-perioditemvalue-start-time-value').alltext_normalized)
def test_periodlist_itemrendering_end_time(self):
testsubject = baker.make('core.Subject')
baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject)
with self.settings(DATETIME_FORMAT=datetimeutils.ISODATETIME_DJANGOFORMAT, USE_L10N=False):
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual(datetimeutils.isoformat_noseconds(ACTIVE_PERIOD_END),
mockresponse.selector.one(
'.devilry-cradmin-perioditemvalue-end-time-value').alltext_normalized)
def test_periodlist_ordering(self):
testsubject = baker.make('core.Subject')
baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Period 2')
baker.make_recipe('devilry.apps.core.period_old',
parentnode=testsubject,
long_name='Period 1')
baker.make_recipe('devilry.apps.core.period_future',
parentnode=testsubject,
long_name='Period 3')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
periodnames = [
element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual([
'Period 3',
'Period 2',
'Period 1',
], periodnames)
def test_periodlist_only_periods_in_subject(self):
testsubject = baker.make('core.Subject')
othersubject = baker.make('core.Subject')
baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Testsubject Period 1')
baker.make_recipe('devilry.apps.core.period_active',
parentnode=othersubject,
long_name='Othersubject Period 1')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual(
1,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue-titledescription-title')
)
self.assertEqual(
'Testsubject Period 1',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized
)
| null |
5,317 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzer helpers."""
import importlib
import os
import re
from typing import Optional
from common import fuzzer_config
from common import logs
from common import utils
DEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'
FUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'
# Must be a valid python module and docker tag.
VALID_FUZZER_REGEX = re.compile(r'^[a-z][a-z0-9_]*$')
FUZZERS_DIR = os.path.join(utils.ROOT_DIR, 'fuzzers')
COVERAGE_TOOLS = {'coverage', 'coverage_source_based'}
class FuzzerDirectory:
"""Class representing a fuzzer directory in fuzzers/."""
def __init__(self, name):
# TOOD(metzman): Use this class to represent fuzzers in general.
# For example, replace the dict format we use for variants with this.
self.name = name
@property
def directory(self):
"""Returns the path to the directory in fuzzers/."""
return os.path.join(FUZZERS_DIR, self.name)
@property
def fuzzer_py(self):
"""Returns the path to the fuzzer.py file in fuzzer directory."""
return os.path.join(self.directory, 'fuzzer.py')
@property
def METHOD_NAME(self):
"""Returns the path to the runner.Dockerfile file in fuzzer
directory."""
return os.path.join(self.directory, 'runner.Dockerfile')
@property
def builder_dockerfile(self):
"""Returns the path to the builder.Dockerfile file in fuzzer
directory."""
return os.path.join(self.directory, 'builder.Dockerfile')
@property
def dockerfiles(self):
"""Returns a list of paths to the runner and builder dockerfiles in the
fuzzer directory."""
return [self.METHOD_NAME, self.builder_dockerfile]
def get_fuzz_target_binary(search_directory: str,
fuzz_target_name: str) -> Optional[str]:
"""Return target binary path."""
if fuzz_target_name:
fuzz_target_binary = os.path.join(search_directory, fuzz_target_name)
if os.path.exists(fuzz_target_binary):
return fuzz_target_binary
return None
default_fuzz_target_binary = os.path.join(search_directory,
DEFAULT_FUZZ_TARGET_NAME)
if os.path.exists(default_fuzz_target_binary):
return default_fuzz_target_binary
for root, _, files in os.walk(search_directory):
if root == 'uninstrumented':
continue
for filename in files:
if filename.endswith('-uninstrumented'):
# Skip uninstrumented binaries (e.g. with QSYM).
continue
file_path = os.path.join(root, filename)
with open(file_path, 'rb') as file_handle:
if FUZZ_TARGET_SEARCH_STRING in file_handle.read():
return file_path
return None
def validate_name(fuzzer):
"""Return True if |fuzzer| is a valid fuzzbench fuzzer name."""
# Although importing probably allows a subset of what the regex allows, use
# the regex anyway to be safe. The regex is enforcing that the fuzzer is a
# valid path for GCS or a linux system.
if VALID_FUZZER_REGEX.match(fuzzer) is None:
logs.error('Fuzzer: %s does not conform to pattern: %s.', fuzzer,
VALID_FUZZER_REGEX.pattern)
return False
return True
def validate(fuzzer):
"""Return True if |fuzzer| is a valid fuzzbench fuzzer."""
if not validate_name(fuzzer):
return False
# Try importing the fuzzer module.
module_name = f'fuzzers.{fuzzer}.fuzzer'
try:
importlib.import_module(module_name)
return True
except Exception as error: # pylint: disable=broad-except
logs.error('Encountered "%s" while trying to import %s.', error,
module_name)
return False
def get_fuzzer_names():
"""Returns a list of names of all fuzzers."""
fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')
fuzzers = []
for fuzzer in os.listdir(fuzzers_dir):
if not os.path.isfile(os.path.join(fuzzers_dir, fuzzer, 'fuzzer.py')):
continue
if fuzzer in COVERAGE_TOOLS:
continue
fuzzers.append(fuzzer)
return fuzzers
def get_languages(fuzzer):
"""Returns the programming languages |fuzzer| can fuzz."""
config = fuzzer_config.get_config(fuzzer)
return config.get('languages', ['c++'])
| null |
5,318 |
# Copyright (c) 2023 Diego Gasco ([email protected]), Diegomangasco on GitHub
"""
Requirements:
- numpy version 1.21
- scipy version 1.3.3
Notes:
- Each column of the features matrix corresponds to a class item
"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def column_reshape(input_array: np.ndarray) -> np.ndarray:
"""Function to reshape a row Numpy array into a column Numpy array
>>> input_array = np.array([1, 2, 3])
>>> column_reshape(input_array)
array([[1],
[2],
[3]])
"""
return input_array.reshape((input_array.size, 1))
def covariance_within_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
"""Function to compute the covariance matrix inside each class.
>>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> labels = np.array([0, 1, 0])
>>> covariance_within_classes(features, labels, 2)
array([[0.66666667, 0.66666667, 0.66666667],
[0.66666667, 0.66666667, 0.66666667],
[0.66666667, 0.66666667, 0.66666667]])
"""
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
data_mean = data.mean(1)
# Centralize the data of class i
centered_data = data - column_reshape(data_mean)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(centered_data, centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = np.dot(centered_data, centered_data.T)
return covariance_sum / features.shape[1]
def METHOD_NAME(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
"""Function to compute the covariance matrix between multiple classes
>>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]])
>>> labels = np.array([0, 1, 0])
>>> covariance_between_classes(features, labels, 2)
array([[ 3.55555556, 1.77777778, -2.66666667],
[ 1.77777778, 0.88888889, -1.33333333],
[-2.66666667, -1.33333333, 2. ]])
"""
general_data_mean = features.mean(1)
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
device_data = data.shape[1]
data_mean = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
return covariance_sum / features.shape[1]
def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray:
"""
Principal Component Analysis.
For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis.
Parameters:
* features: the features extracted from the dataset
* dimensions: to filter the projected data for the desired dimension
>>> test_principal_component_analysis()
"""
# Check if the features have been loaded
if features.any():
data_mean = features.mean(1)
# Center the dataset
centered_data = features - np.reshape(data_mean, (data_mean.size, 1))
covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1]
_, eigenvectors = np.linalg.eigh(covariance_matrix)
# Take all the columns in the reverse order (-1), and then takes only the first
filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
projected_data = np.dot(filtered_eigenvectors.T, features)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def linear_discriminant_analysis(
features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int
) -> np.ndarray:
"""
Linear Discriminant Analysis.
For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis.
Parameters:
* features: the features extracted from the dataset
* labels: the class labels of the features
* classes: the number of classes present in the dataset
* dimensions: to filter the projected data for the desired dimension
>>> test_linear_discriminant_analysis()
"""
# Check if the dimension desired is less than the number of classes
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_, eigenvectors = eigh(
METHOD_NAME(features, labels, classes),
covariance_within_classes(features, labels, classes),
)
filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions]
svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors)
filtered_svd_matrix = svd_matrix[:, 0:dimensions]
projected_data = np.dot(filtered_svd_matrix.T, features)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def test_linear_discriminant_analysis() -> None:
# Create dummy dataset with 2 classes and 3 features
features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
labels = np.array([0, 0, 0, 1, 1])
classes = 2
dimensions = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(AssertionError) as error_info:
projected_data = linear_discriminant_analysis(
features, labels, classes, dimensions
)
if isinstance(projected_data, np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes"
)
assert error_info.type is AssertionError
def test_principal_component_analysis() -> None:
features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dimensions = 2
expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
with pytest.raises(AssertionError) as error_info:
output = principal_component_analysis(features, dimensions)
if not np.allclose(expected_output, output):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| null |
5,319 |
from __future__ import annotations
from datetime import datetime
from typing import Any, Iterable, Union
from dcs import Mission
from dcs.planes import AJS37, F_14B, JF_17
from dcs.point import MovingPoint, PointAction
from dcs.unitgroup import FlyingGroup
from game.ato import Flight, FlightWaypoint
from game.ato.flightwaypointtype import FlightWaypointType
from game.ato.traveltime import GroundSpeed
from game.flightplan.waypointactions.taskcontext import TaskContext
from game.missiongenerator.missiondata import MissionData
from game.theater import MissionTarget, TheaterUnit
from game.unitmap import UnitMap
TARGET_WAYPOINTS = (
FlightWaypointType.TARGET_GROUP_LOC,
FlightWaypointType.TARGET_POINT,
FlightWaypointType.TARGET_SHIP,
)
class PydcsWaypointBuilder:
def __init__(
self,
waypoint: FlightWaypoint,
group: FlyingGroup[Any],
flight: Flight,
mission: Mission,
now: datetime,
mission_data: MissionData,
unit_map: UnitMap,
generated_waypoint_idx: int,
) -> None:
self.waypoint = waypoint
self.group = group
self.package = flight.package
self.flight = flight
self.mission = mission
self.now = now
self.mission_data = mission_data
self.unit_map = unit_map
self.generated_waypoint_idx = generated_waypoint_idx
def dcs_name_for_waypoint(self) -> str:
return self.waypoint.name
def METHOD_NAME(self) -> MovingPoint:
waypoint = self.group.add_waypoint(
self.waypoint.position,
self.waypoint.alt.meters,
# The speed we pass will be overridden for most waypoints because we'll set
# a TOT and leave the speed up to the AI, but for the few types of waypoints
# that don't have TOTs (e.g. nav points), we set a reasonable cruise speed
# to pydcs doesn't assign the default of 600kph ground speed (which is very
# slow at most altitudes).
#
# Calling GroundSpeed.for_flight isn't really a correct fix here. We ought
# to be using FlightPlan.speed_between_waypoints, but the way the waypoint
# builder is called makes it difficult to track the previous waypoint. This
# is probably good enough for a stop gap, and most of the flight planning
# code is hopefully being rewritten soon anyway.
#
# https://github.com/dcs-liberation/dcs_liberation/issues/3113
speed=GroundSpeed.for_flight(self.flight, self.waypoint.alt).kph,
name=self.dcs_name_for_waypoint(),
)
if self.waypoint.flyover:
waypoint.action = PointAction.FlyOverPoint
# It seems we need to leave waypoint.type exactly as it is even
# though it's set to "Turning Point". If I set this to "Fly Over
# Point" and then save the mission in the ME DCS resets it.
if self.flight.client_count > 0:
# Set Altitute to 0 AGL for player flights so that they can slave target pods or weapons to the waypoint
waypoint.alt = 0
waypoint.alt_type = "RADIO"
waypoint.alt_type = self.waypoint.alt_type
tot = self.flight.flight_plan.tot_for_waypoint(self.waypoint)
if tot is not None:
self.set_waypoint_tot(waypoint, tot)
self.add_tasks(waypoint)
return waypoint
def add_tasks(self, waypoint: MovingPoint) -> None:
ctx = TaskContext(self.now)
for action in self.waypoint.actions:
for task in action.iter_tasks(ctx):
waypoint.add_task(task)
for option in self.waypoint.options.values():
for task in option.iter_tasks(ctx):
waypoint.add_task(task)
def set_waypoint_tot(self, waypoint: MovingPoint, tot: datetime) -> None:
self.waypoint.tot = tot
if not self._viggen_client_tot():
waypoint.ETA = int((tot - self.now).total_seconds())
waypoint.ETA_locked = True
waypoint.speed_locked = False
def _viggen_client_tot(self) -> bool:
"""Viggen player aircraft consider any waypoint with a TOT set to be a target ("M") waypoint.
If the flight is a player controlled Viggen flight, no TOT should be set on any waypoint except actual target waypoints.
"""
if (
self.flight.client_count > 0
and self.flight.unit_type.dcs_unit_type == AJS37
) and (self.waypoint.waypoint_type not in TARGET_WAYPOINTS):
return True
else:
return False
def register_special_waypoints(
self, targets: Iterable[Union[MissionTarget, TheaterUnit]]
) -> None:
"""Create special target waypoints for various aircraft"""
for i, t in enumerate(targets):
if self.group.units[0].unit_type == JF_17 and i < 4:
self.group.add_nav_target_point(t.position, "PP" + str(i + 1))
if self.group.units[0].unit_type == F_14B and i == 0:
self.group.add_nav_target_point(t.position, "ST")
| null |
5,320 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
model_position = 0
model_selected = 1
model_enabled = 2
view_item_name = 0
view_item_selected = 1
view_item_enabled = 2
class ListSelectorPresenter(object):
def __init__(self, view, model):
"""
Initialize with a view and model reference
:param view: A reference to a view to control
:param model: A reference to a dictionary to act as a model. The following structure is expected:
{'name': [index_position, ticked, enabled]}
"""
self.view = view
self.model = model
self.filter_string = ""
self.filter_type = "Include"
self.filter_list = []
self.show_only_selected = False
self.view.set_filter_line_edit_changed_action(self.METHOD_NAME)
self.view.set_item_selection_changed_action(self.handle_selection_changed)
self.view.set_filter_type_combo_changed_action(self.set_filter_type)
self.view.set_select_all_checkbox_action(self.handle_select_all_checkbox_changed)
self.view.set_row_moved_checkbox_action(self.handle_row_moved)
self.view.set_show_selected_checkbox_changed(self.handle_show_selected_checked)
def update_view_from_model(self):
filtered_list = self.get_filtered_list()
self.view.clearItems()
self.view.addItems(filtered_list)
number_selected = len([item for item in filtered_list if item[view_item_selected]])
number_of_selected_displayed = len([item for item in self.model.values() if item[model_selected]])
self.view.update_number_of_selected_label(number_selected, number_of_selected_displayed)
def METHOD_NAME(self, filter_string):
self.filter_string = filter_string
self.update_view_from_model()
def handle_selection_changed(self, name, state):
self.model[name][model_selected] = state
self.update_view_from_model()
def handle_select_all_checkbox_changed(self, state):
for item in self.get_filtered_list():
self.model[item[view_item_name]][model_selected] = state
self.update_view_from_model()
def get_selected_items(self):
selected_items = [name for name in self.model if self.model[name][model_selected]]
selected_items.sort(key=lambda selected_item: self.model[selected_item][model_position])
return selected_items
def get_selected_items_and_positions(self):
"""
:return: A list of 2-tuples containing the (name, list position)
of each selected item. They are ordered by the position
"""
selected_items = [(name, self.model[name][model_position]) for name in self.model if self.model[name][model_selected]]
selected_items.sort(key=lambda selected_item: self.model[selected_item[0]][model_position])
return selected_items
def set_filter_type(self):
self.filter_type = self.view.filter_type_combo_box.currentText()
self.update_view_from_model()
def handle_row_moved(self, insertion_index, rows_moved):
filtered_list = self.get_filtered_list()
if insertion_index >= len(filtered_list):
new_position = len(filtered_list)
else:
name_of_row_to_insert_before = self.get_filtered_list()[insertion_index][view_item_name]
new_position = self.model[name_of_row_to_insert_before][model_position]
names_of_rows_moved = [self.get_filtered_list()[index][view_item_name] for index in rows_moved]
for index, row in enumerate(names_of_rows_moved):
old_position = self.model[row][model_position] + index
new_position_temp = new_position + index
for name in self.model:
if new_position_temp <= self.model[name][model_position] < old_position:
self.model[name][model_position] += 1
self.model[row][model_position] = new_position_temp
def handle_show_selected_checked(self, check_state):
if check_state:
self.show_only_selected = True
self.view.disable_filtering_options()
else:
self.show_only_selected = False
self.view.enable_filtering_options()
self.update_view_from_model()
def get_filtered_list(self):
if self.show_only_selected:
filtered_list = [
[name, vals[model_selected], vals[model_enabled]] for (name, vals) in self.model.items() if vals[model_selected]
]
filtered_list.sort(key=lambda item: self.model[item[view_item_name]][model_position])
return filtered_list
if self.filter_type == "Include":
filtered_list = [
[name, vals[model_selected], vals[model_enabled]] for (name, vals) in self.model.items() if self.filter_string in name
]
else:
filtered_list = [
[name, vals[model_selected], vals[model_enabled]] for (name, vals) in self.model.items() if self.filter_string not in name
]
filtered_list.sort(key=lambda item: self.model[item[view_item_name]][model_position])
filtered_list = [item for item in filtered_list if item[view_item_name] not in self.filter_list]
return filtered_list
def update_filter_list(self, filter_list):
self.filter_list = filter_list
self.update_view_from_model()
def update_model(self, new_model):
self.model = new_model
self.update_view_from_model()
| null |
5,321 |
from abc import ABC
from os import path
import rdflib as rl
from django.forms.widgets import TextInput
from django.template import loader
from api.error import APIError
from modelview.rdf import factory, handler, widget
from modelview.rdf.widget import DynamicFactoryArrayWidget
class Field(ABC):
"""
:ivar rdf_name: IRI of this property
:ivar verbose_name: A readable label (not `rdfs:label`)
:ivar handler: A handler used to parse this field.
Defaults to `handler.DefaultHandler`
:ivar help_text: Some helpful text
"""
_handler = handler.DefaultHandler
def __init__(
self,
rdf_name,
verbose_name=None,
handler: handler.Handler = None,
help_text: str = None,
widget_cls=TextInput,
widget_kwargs_gen=None,
filter=None,
subclass=False,
follow=True,
hidden=False,
inverse=False,
template=None,
):
self.rdf_name = rdf_name # Some IRI
self.verbose_name = verbose_name
self.handler = handler if handler else self._handler()
self.help_text = help_text
self.filter = filter
self.subclass = subclass
self.follow = follow
self.hidden = hidden
self._widget = DynamicFactoryArrayWidget(
subwidget_form=widget_cls, subwidget_form_kwargs_gen=widget_kwargs_gen
)
self.inverse = inverse
self.is_literal = True
@property
def template(self):
raise NotImplementedError
def widget(self):
return self._widget
def _build_query_parts(
self, subject, object, where=None, filter=None, options=None
):
if filter is None:
filter = []
if where is None:
where = []
filter = filter.copy()
filter.append(f"?p = <{self.rdf_name}>")
if self.filter:
where += [f"{object} {f} ." for f in self.filter]
if options is None:
options = []
if self.inverse:
where.append(f"{object} ?p {subject}.")
else:
where.append(f"{subject} ?p {object}.")
return where, options, filter
@property
def _label_option(self):
return "?o rdfs:label ?lo"
def METHOD_NAME(self, subject, object, where=None, filter=None, options=None):
where, options, filter = self._build_query_parts(
subject, object, where=where, filter=filter, options=options
)
query = " ".join(where)
for o in options:
query += f"OPTIONAL {{ {o} }} . "
if filter:
query += f"FILTER ({' && ' .join(filter)}) . "
return query
def combine_with_form(self, value, index=None):
FORM_TEMPLATE = loader.get_template(
path.join("modelview", "widgets", "dynamic_edit.html")
)
field = self._widget.subwidget_form(
**self._widget.subwidget_kwargs_gen()
).render(
"",
value,
{
"class": "form-control",
"id": "input__" + index,
"onfocusout": f"hide_field('{index}')",
},
)
return FORM_TEMPLATE.render(dict(value=value, id=index, field=field))
@property
def spec(self):
return dict(
verbose_name=self.verbose_name,
help_text=self.help_text,
template=self.template,
literal=self.is_literal,
)
def process_data(self, data):
raise NotImplementedError
class TextField(Field):
@property
def template(self):
return {"type": "text", "field": "literal"}
def process_data(self, data):
x = data.get("literal")
if not x:
return None
return f'"{x}"'
class IRIField(TextField):
def __init__(self, rdf_name, **kwargs):
super().__init__(rdf_name, **kwargs)
@property
def template(self):
return {"type": "text", "field": "iri"}
def process_data(self, data):
x = data.get("iri")
if not x:
return None
elif not handler.url_regex.match(x):
raise APIError("Invalid IRI")
else:
return f"<{x}>"
class TextAreaField(TextField):
@property
def template(self):
return {"type": "textarea"}
class YearField(Field):
def __init__(self, rdf_name, **kwargs):
super().__init__(rdf_name, **kwargs)
@property
def template(self):
return {"type": "text"}
def process_data(self, data):
x = data.get("literal")
if not x:
return None
try:
return int(x)
except ValueError:
raise APIError("Invalid year")
class PredefinedInstanceField(IRIField):
def __init__(self, rdf_name, cls_iri, subclass=False, **kwargs):
super().__init__(rdf_name, **kwargs)
self.is_literal = False
self.filter = [f"{'rdfs:subClassOf' if subclass else 'a'} <{cls_iri}>"]
self.cls_iri = cls_iri
self.subclass = subclass
self.handler = handler.IRIHandler()
@property
def template(self):
return {"type": "select", "class": self.cls_iri, "subclass": self.subclass}
class FactoryField(IRIField):
def __init__(self, factory, **kwargs):
super(FactoryField, self).__init__(**kwargs)
self.factory = factory
self._widget = DynamicFactoryArrayWidget(
subwidget_form=widget._factory_field(factory)
)
self.is_literal = False
self.handler = handler.IRIHandler()
@property
def spec(self):
return dict(factory=self.factory._factory_id, **super(FactoryField, self).spec)
@property
def template(self):
return None
@property
def _label_option(self):
custom = self.factory._label_option("?o", "?lo")
if custom:
return custom
return super(FactoryField, self)._label_option
class Container(handler.Rederable):
def __init__(self, field):
self.field = field
self.values = []
def to_json(self):
for v in self.values:
if isinstance(v, rl.Literal):
yield v
elif isinstance(v, (rl.URIRef, rl.BNode)):
yield dict(iri=v)
elif isinstance(v, factory.RDFFactory):
d = dict(iri=v.iri.values[0])
if v.label:
d["label"] = v.label
yield d
elif isinstance(v, handler.NamedIRI):
yield dict(iri=v.iri, label=v.label)
else:
raise Exception(v)
| null |
5,322 |
from __future__ import annotations
import os
import shutil
import subprocess
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx import phil
from scitbx import matrix
from dials.algorithms.refinement import RefinerFactory
from dials.array_family import flex
def test1(dials_data, tmp_path):
data_dir = dials_data("refinement_test_data", pathlib=True)
result = subprocess.run(
[
shutil.which("dials.refine"),
data_dir / "multi_stills_combined.json",
data_dir / "multi_stills_combined.pickle",
],
cwd=tmp_path,
)
assert not result.returncode and not result.stderr
# load results
reg_exp = ExperimentListFactory.from_json_file(
data_dir / "multi_stills_regression.json", check_format=False
)
ref_exp = ExperimentListFactory.from_json_file(
tmp_path / "refined.expt", check_format=False
)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(
b2,
wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol,
)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(
d2,
fast_axis_tolerance=1e-4,
slow_axis_tolerance=1e-4,
origin_tolerance=1e-2,
)
@pytest.mark.skipif(
os.name == "nt",
reason="Multiprocessing error on Windows: 'This class cannot be instantiated from Python'",
)
def test_multi_process_refinement_gives_same_results_as_single_process_refinement(
dials_data, tmp_path
):
data_dir = dials_data("refinement_test_data", pathlib=True)
cmd = [
shutil.which("dials.refine"),
data_dir / "multi_stills_combined.json",
data_dir / "multi_stills_combined.pickle",
"outlier.algorithm=null",
"engine=LBFGScurvs",
"output.reflections=None",
]
result = subprocess.run(
cmd + ["output.experiments=refined_nproc4.expt", "nproc=4"],
cwd=tmp_path,
)
assert not result.returncode and not result.stderr
result = subprocess.run(
cmd + ["output.experiments=refined_nproc1.expt", "nproc=1"],
cwd=tmp_path,
)
assert not result.returncode and not result.stderr
# load results
nproc1 = ExperimentListFactory.from_json_file(
tmp_path / "refined_nproc1.expt", check_format=False
)
nproc4 = ExperimentListFactory.from_json_file(
tmp_path / "refined_nproc4.expt", check_format=False
)
# compare results
for b1, b2 in zip(nproc1.beams(), nproc4.beams()):
assert b1.is_similar_to(b2)
for c1, c2 in zip(nproc1.crystals(), nproc4.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(nproc1.detectors(), nproc4.detectors()):
assert d1.is_similar_to(
d2,
fast_axis_tolerance=5e-5,
slow_axis_tolerance=5e-5,
origin_tolerance=5e-5,
)
def METHOD_NAME(dials_data, tmp_path):
# Avoid a regression to https://github.com/dials/dials/issues/1142 by
# testing that refinement succeeds when some parameterisations are fixed
# by parameter auto reduction code, but restraints are requested for
# those parameterisations.
# The phil scope
from dials.algorithms.refinement.refiner import phil_scope
user_phil = phil.parse(
"""
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 90
action = fail *fix remove
}
crystal {
unit_cell {
restraints {
tie_to_target {
values = 95 95 132 90 90 120
sigmas = 1 1 1 0 0 0
id = 0 1 2 3 4 5 6 7 8 9
}
}
}
}
}
}
"""
)
working_phil = phil_scope.fetch(source=user_phil)
working_params = working_phil.extract()
# use the multi stills test data
data_dir = dials_data("refinement_test_data", pathlib=True)
experiments_path = data_dir / "multi_stills_combined.json"
pickle_path = data_dir / "multi_stills_combined.pickle"
experiments = ExperimentListFactory.from_json_file(
experiments_path, check_format=False
)
reflections = flex.reflection_table.from_file(pickle_path)
refiner = RefinerFactory.from_parameters_data_experiments(
working_params, reflections, experiments
)
history = refiner.run()
rmsd_limits = (0.2044, 0.2220, 0.0063)
for a, b in zip(history["rmsd"][-1], rmsd_limits):
assert a < b
| null |
5,323 |
#
# demo application for http3_server.py
#
import datetime
import os
from urllib.parse import urlencode
import httpbin
from asgiref.wsgi import WsgiToAsgi
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Mount, Route, WebSocketRoute
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
from starlette.types import Receive, Scope, Send
from starlette.websockets import WebSocketDisconnect
ROOT = os.path.dirname(__file__)
STATIC_ROOT = os.environ.get("STATIC_ROOT", os.path.join(ROOT, "htdocs"))
STATIC_URL = "/"
LOGS_PATH = os.path.join(STATIC_ROOT, "logs")
QVIS_URL = "https://qvis.quictools.info/"
templates = Jinja2Templates(directory=os.path.join(ROOT, "templates"))
async def METHOD_NAME(request):
"""
Simple homepage.
"""
await request.send_push_promise("/style.css")
return templates.TemplateResponse("index.html", {"request": request})
async def echo(request):
"""
HTTP echo endpoint.
"""
content = await request.body()
media_type = request.headers.get("content-type")
return Response(content, media_type=media_type)
async def logs(request):
"""
Browsable list of QLOG files.
"""
logs = []
for name in os.listdir(LOGS_PATH):
if name.endswith(".qlog"):
s = os.stat(os.path.join(LOGS_PATH, name))
file_url = "https://" + request.headers["host"] + "/logs/" + name
logs.append(
{
"date": datetime.datetime.utcfromtimestamp(s.st_mtime).strftime(
"%Y-%m-%d %H:%M:%S"
),
"file_url": file_url,
"name": name[:-5],
"qvis_url": QVIS_URL
+ "?"
+ urlencode({"file": file_url})
+ "#/sequence",
"size": s.st_size,
}
)
return templates.TemplateResponse(
"logs.html",
{
"logs": sorted(logs, key=lambda x: x["date"], reverse=True),
"request": request,
},
)
async def padding(request):
"""
Dynamically generated data, maximum 50MB.
"""
size = min(50000000, request.path_params["size"])
return PlainTextResponse("Z" * size)
async def ws(websocket):
"""
WebSocket echo endpoint.
"""
if "chat" in websocket.scope["subprotocols"]:
subprotocol = "chat"
else:
subprotocol = None
await websocket.accept(subprotocol=subprotocol)
try:
while True:
message = await websocket.receive_text()
await websocket.send_text(message)
except WebSocketDisconnect:
pass
async def wt(scope: Scope, receive: Receive, send: Send) -> None:
"""
WebTransport echo endpoint.
"""
# accept connection
message = await receive()
assert message["type"] == "webtransport.connect"
await send({"type": "webtransport.accept"})
# echo back received data
while True:
message = await receive()
if message["type"] == "webtransport.datagram.receive":
await send(
{
"data": message["data"],
"type": "webtransport.datagram.send",
}
)
elif message["type"] == "webtransport.stream.receive":
await send(
{
"data": message["data"],
"stream": message["stream"],
"type": "webtransport.stream.send",
}
)
starlette = Starlette(
routes=[
Route("/", METHOD_NAME),
Route("/{size:int}", padding),
Route("/echo", echo, methods=["POST"]),
Mount("/httpbin", WsgiToAsgi(httpbin.app)),
Route("/logs", logs),
WebSocketRoute("/ws", ws),
Mount(STATIC_URL, StaticFiles(directory=STATIC_ROOT, html=True)),
]
)
async def app(scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] == "webtransport" and scope["path"] == "/wt":
await wt(scope, receive, send)
else:
await starlette(scope, receive, send)
| null |
5,324 |
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticDocIndex
from tests.index.elastic.fixture import start_storage_v8, tmp_index_name # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index, pytest.mark.elasticv8]
def METHOD_NAME(tmp_index_name): # noqa: F811
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
index = ElasticDocIndex[MyDoc](index_name=tmp_index_name)
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
index.index(index_docs)
query = 'world'
docs, _ = index.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object(tmp_index_name): # noqa: F811
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
index = ElasticDocIndex[MyDoc](index_name=tmp_index_name)
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
index.index(doc)
id_ = doc[0].id
assert index[id_].id == id_
assert index[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point(tmp_index_name): # noqa: F811
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
index = ElasticDocIndex[MyDoc](index_name=tmp_index_name)
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
index.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range(tmp_index_name): # noqa: F811
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
index = ElasticDocIndex[MyDoc](index_name=tmp_index_name)
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
index.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_index_name():
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type='text')
index = ElasticDocIndex[TextDoc]()
assert index.index_name == TextDoc.__name__.lower()
index = ElasticDocIndex[StringDoc]()
assert index.index_name == StringDoc.__name__.lower()
| null |
5,325 |
import ctypes
import datetime
import shutil
import struct
import warnings
from pathlib import Path
from pymobiledevice3.exceptions import PyMobileDevice3Exception
SIZE_FORMAT = '>I'
CODE_FORMAT = '>B'
CODE_FILE_DATA = 0xc
CODE_ERROR_REMOTE = 0xb
CODE_ERROR_LOCAL = 0x6
CODE_SUCCESS = 0
FILE_TRANSFER_TERMINATOR = b'\x00\x00\x00\x00'
BULK_OPERATION_ERROR = -13
APPLE_EPOCH = 978307200
ERRNO_TO_DEVICE_ERROR = {
2: -6,
17: -7,
20: -8,
21: -9,
62: -10,
5: -11,
28: -15,
}
class DeviceLink:
def __init__(self, service, root_path: Path):
self.service = service
self.root_path = root_path
self._dl_handlers = {
'DLMessageCreateDirectory': self.create_directory,
'DLMessageUploadFiles': self.upload_files,
'DLMessageGetFreeDiskSpace': self.get_free_disk_space,
'DLMessageMoveItems': self.METHOD_NAME,
'DLMessageRemoveItems': self.remove_items,
'DLMessageDownloadFiles': self.download_files,
'DLContentsOfDirectory': self.contents_of_directory,
'DLMessageCopyItem': self.copy_item,
}
def dl_loop(self, progress_callback=lambda x: None):
while True:
message = self.receive_message()
command = message[0]
if command in ('DLMessageDownloadFiles', 'DLMessageMoveFiles', 'DLMessageMoveItems', 'DLMessageRemoveFiles',
'DLMessageRemoveItems'):
progress_callback(message[3])
elif command == 'DLMessageUploadFiles':
progress_callback(message[2])
if command == 'DLMessageProcessMessage':
if not message[1]['ErrorCode']:
return message[1].get('Content')
else:
raise PyMobileDevice3Exception(f'Device link error: {message[1]}')
self._dl_handlers[command](message)
def version_exchange(self):
dl_message_version_exchange = self.receive_message()
version_major = dl_message_version_exchange[1]
self.service.send_plist(['DLMessageVersionExchange', 'DLVersionsOk', version_major])
dl_message_device_ready = self.receive_message()
if dl_message_device_ready[0] != 'DLMessageDeviceReady':
raise PyMobileDevice3Exception('Device link didn\'t return ready state')
def send_process_message(self, message):
self.service.send_plist(['DLMessageProcessMessage', message])
def download_files(self, message):
status = {}
for file in message[1]:
self.service.sendall(struct.pack(SIZE_FORMAT, len(file)))
self.service.sendall(file.encode())
try:
data = (self.root_path / file).read_bytes()
except IOError as e:
status[file] = {
'DLFileErrorString': e.strerror,
'DLFileErrorCode': ctypes.c_uint64(ERRNO_TO_DEVICE_ERROR[e.errno]).value
}
self.service.sendall(struct.pack(SIZE_FORMAT, len(e.strerror) + struct.calcsize(CODE_FORMAT)))
self.service.sendall(struct.pack(CODE_FORMAT, CODE_ERROR_LOCAL) + e.strerror.encode())
else:
self.service.sendall(struct.pack(SIZE_FORMAT, len(data) + struct.calcsize(CODE_FORMAT)))
self.service.sendall(struct.pack(CODE_FORMAT, CODE_FILE_DATA) + data)
buffer = struct.pack(SIZE_FORMAT, struct.calcsize(CODE_FORMAT)) + struct.pack(CODE_FORMAT, CODE_SUCCESS)
self.service.sendall(buffer)
self.service.sendall(FILE_TRANSFER_TERMINATOR)
if status:
self.status_response(BULK_OPERATION_ERROR, 'Multi status', status)
else:
self.status_response(0)
def contents_of_directory(self, message):
data = {}
path = self.root_path / message[1]
for file in path.iterdir():
ftype = 'DLFileTypeUnknown'
if file.is_dir():
ftype = 'DLFileTypeDirectory'
if file.is_file():
ftype = 'DLFileTypeRegular'
data[file.name] = {
'DLFileType': ftype,
'DLFileSize': file.stat().st_size,
'DLFileModificationDate': datetime.datetime.utcfromtimestamp(file.stat().st_mtime - APPLE_EPOCH)
}
self.status_response(0, status_dict=data)
def upload_files(self, message):
while True:
device_name = self._prefixed_recv()
if not device_name:
break
file_name = self._prefixed_recv()
size, = struct.unpack(SIZE_FORMAT, self.service.recvall(struct.calcsize(SIZE_FORMAT)))
code, = struct.unpack(CODE_FORMAT, self.service.recvall(struct.calcsize(CODE_FORMAT)))
size -= struct.calcsize(CODE_FORMAT)
with open(self.root_path / file_name, 'wb') as fd:
while size and code == CODE_FILE_DATA:
fd.write(self.service.recvall(size))
size, = struct.unpack(SIZE_FORMAT, self.service.recvall(struct.calcsize(SIZE_FORMAT)))
code, = struct.unpack(CODE_FORMAT, self.service.recvall(struct.calcsize(CODE_FORMAT)))
size -= struct.calcsize(CODE_FORMAT)
if code == CODE_ERROR_REMOTE:
# iOS 17 beta devices give this error for: backup_manifest.db
error_message = self.service.recvall(size).decode()
warnings.warn(f'Failed to fully upload: {file_name}. Device file name: {device_name}. Reason: '
f'{error_message}')
continue
assert code == CODE_SUCCESS
self.status_response(0)
def get_free_disk_space(self, message):
freespace = shutil.disk_usage(self.root_path).free
self.status_response(0, status_dict=freespace)
def METHOD_NAME(self, message):
for src, dst in message[1].items():
dest = self.root_path / dst
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.move(self.root_path / src, dest)
self.status_response(0)
def copy_item(self, message):
src = self.root_path / message[1]
dest = self.root_path / message[2]
dest.parent.mkdir(parents=True, exist_ok=True)
if src.is_dir():
shutil.copytree(src, dest)
else:
shutil.copy(src, dest)
self.status_response(0)
def remove_items(self, message):
for path in message[1]:
rm_path = self.root_path / path
if rm_path.is_dir():
shutil.rmtree(rm_path)
else:
rm_path.unlink(missing_ok=True)
self.status_response(0)
def create_directory(self, message):
path = message[1]
(self.root_path / path).mkdir(parents=True, exist_ok=True)
self.status_response(0)
def status_response(self, status_code, status_str='', status_dict=None):
self.service.send_plist([
'DLMessageStatusResponse', ctypes.c_uint64(status_code).value,
status_str if status_str else '___EmptyParameterString___',
status_dict if status_dict is not None else {},
])
def receive_message(self):
return self.service.recv_plist()
def disconnect(self):
self.service.send_plist(['DLMessageDisconnect', '___EmptyParameterString___'])
def _prefixed_recv(self):
size, = struct.unpack(SIZE_FORMAT, self.service.recvall(struct.calcsize(SIZE_FORMAT)))
return self.service.recvall(size).decode()
| null |
5,326 |
"""AppIndicator based tray icon"""
from gettext import gettext as _
import gi
from gi.repository import Gtk
from lutris.database.games import get_games
from lutris.game import Game
try:
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
APP_INDICATOR_SUPPORTED = True
except (ImportError, ValueError):
APP_INDICATOR_SUPPORTED = False
class LutrisStatusIcon:
def __init__(self, application):
self.application = application
self.icon = self.create()
self.menu = self.get_menu()
self.set_visible(True)
if APP_INDICATOR_SUPPORTED:
self.icon.set_menu(self.menu)
else:
self.icon.connect("activate", self.on_activate)
self.icon.connect("popup-menu", self.on_menu_popup)
def create(self):
"""Create an appindicator"""
if APP_INDICATOR_SUPPORTED:
return AppIndicator.Indicator.new(
"net.lutris.Lutris", "lutris", AppIndicator.IndicatorCategory.APPLICATION_STATUS
)
return LutrisTray(self.application)
def METHOD_NAME(self):
"""Whether the icon is visible"""
if APP_INDICATOR_SUPPORTED:
return self.icon.get_status() != AppIndicator.IndicatorStatus.PASSIVE
return self.icon.get_visible()
def set_visible(self, value):
"""Set the visibility of the icon"""
if APP_INDICATOR_SUPPORTED:
if value:
visible = AppIndicator.IndicatorStatus.ACTIVE
else:
visible = AppIndicator.IndicatorStatus.ACTIVE
self.icon.set_status(visible)
else:
self.icon.set_visible(value)
def get_menu(self):
"""Instantiates the menu attached to the tray icon"""
menu = Gtk.Menu()
installed_games = self.add_games()
number_of_games_in_menu = 10
for game in installed_games[:number_of_games_in_menu]:
menu.append(self._make_menu_item_for_game(game))
menu.append(Gtk.SeparatorMenuItem())
self.present_menu = Gtk.ImageMenuItem()
self.present_menu.set_image(Gtk.Image.new_from_icon_name("lutris", Gtk.IconSize.MENU))
self.present_menu.set_label(_("Show Lutris"))
self.present_menu.connect("activate", self.on_activate)
menu.append(self.present_menu)
quit_menu = Gtk.MenuItem()
quit_menu.set_label(_("Quit"))
quit_menu.connect("activate", self.on_quit_application)
menu.append(quit_menu)
menu.show_all()
return menu
def update_present_menu(self):
app_window = self.application.window
if app_window:
if app_window.get_visible():
self.present_menu.set_label(_("Hide Lutris"))
else:
self.present_menu.set_label(_("Show Lutris"))
def on_activate(self, _status_icon, _event=None):
"""Callback to show or hide the window"""
app_window = self.application.window
if app_window.get_visible():
# If the window has any transients, hiding it will hide them too
# never to be shown again, which is broken. So we don't allow that.
windows = Gtk.Window.list_toplevels()
for w in windows:
if w.get_visible() and w.get_transient_for() == app_window:
return
app_window.hide()
else:
app_window.show()
def on_menu_popup(self, _status_icon, button, time):
"""Callback to show the contextual menu"""
self.menu.popup(None, None, None, None, button, time)
def on_quit_application(self, _widget):
"""Callback to quit the program"""
self.application.quit()
def _make_menu_item_for_game(self, game):
menu_item = Gtk.MenuItem()
menu_item.set_label(game["name"])
menu_item.connect("activate", self.on_game_selected, game["id"])
return menu_item
@staticmethod
def add_games():
"""Adds installed games in order of last use"""
installed_games = get_games(filters={"installed": 1})
installed_games.sort(
key=lambda game: max(game["lastplayed"] or 0, game["installed_at"] or 0),
reverse=True,
)
return installed_games
def on_game_selected(self, _widget, game_id):
launch_ui_delegate = self.application.get_launch_ui_delegate()
Game(game_id).launch(launch_ui_delegate)
class LutrisTray(Gtk.StatusIcon):
"""Lutris tray icon"""
def __init__(self, application, **_kwargs):
super().__init__()
self.set_tooltip_text(_("Lutris"))
self.set_visible(True)
self.application = application
self.set_from_icon_name("lutris")
| null |
5,327 |
import os
import mock
import pytest
import libensemble.tests.unit_tests.setup as setup
from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first
from libensemble.comms.logs import LogConfig
from libensemble.libE import libE
from libensemble.manager import LoggedException
from libensemble.resources.resources import Resources
from libensemble.tests.regression_tests.common import mpi_comm_excl
class MPIAbortException(Exception):
"""Raised when mock mpi abort is called"""
class MPISendException(Exception):
"""Raised when mock mpi abort is called"""
class Fake_MPI:
"""Explicit mocking of MPI communicator"""
def Get_size(self):
return 2
def Get_rank(self):
return 0
def Barrier(self):
return 0
def Dup(self):
return self
def Free(self):
return
def isend(self, msg, dest, tag):
raise MPISendException()
def Abort(self, flag):
assert flag == 1, "Aborting without exit code of 1"
raise MPIAbortException()
class Fake_MPI_1P(Fake_MPI):
def Get_size(self):
return 1
fake_mpi = Fake_MPI()
fake_mpi_1p = Fake_MPI_1P()
alloc_specs = {"alloc_f": give_sim_work_first}
hfile_abort = "libE_history_at_abort_0.npy"
pfile_abort = "libE_persis_info_at_abort_0.pickle"
# Run by pytest at end of module
def teardown_module(module):
try:
print(f"teardown_module module:{module.__name__}")
except AttributeError:
print(f"teardown_module (direct run) module:{module}")
if Resources.resources is not None:
del Resources.resources
Resources.resources = None
# Run by pytest before each function
def setup_function(function):
print(f"setup_function function:{function.__name__}")
if Resources.resources is not None:
del Resources.resources
Resources.resources = None
def remove_file_if_exists(filename):
try:
os.remove(filename)
except OSError:
pass
def test_manager_exception():
"""Checking dump of history and pickle file on abort"""
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
remove_file_if_exists(hfile_abort)
remove_file_if_exists(pfile_abort)
with mock.patch("libensemble.manager.manager_main") as managerMock:
managerMock.side_effect = Exception
# Collision between libE.py and libE() (after mods to __init__.py) means
# libensemble.libE.comms_abort tries to refer to the function, not file
with mock.patch("libensemble.libE.comms_abort") as abortMock:
abortMock.side_effect = Exception
# Need fake MPI to get past the Manager only check and dump history
with pytest.raises(Exception):
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert os.path.isfile(hfile_abort), "History file not dumped"
assert os.path.isfile(pfile_abort), "Pickle file not dumped"
os.remove(hfile_abort)
os.remove(pfile_abort)
# Test that History and Pickle files NOT created when disabled
with pytest.raises(Exception):
libE_specs = {"mpi_comm": fake_mpi, "save_H_and_persis_on_abort": False}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert not os.path.isfile(hfile_abort), "History file dumped"
assert not os.path.isfile(pfile_abort), "Pickle file dumped"
# Note - this could be combined now with above tests as fake_MPI prevents need for use of mock module
# Only way that is better is that this will simply hit first code exception - (when fake_MPI tries to isend)
# While first test triggers on call to manager
def METHOD_NAME():
"""Running until fake_MPI tries to send msg to test (mocked) comm.Abort is called
Manager should raise MPISendException when fakeMPI tries to send message, which
will be caught by libE and raise MPIAbortException from fakeMPI.Abort"""
with pytest.raises(MPIAbortException):
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected MPIAbortException exception")
def test_exception_raising_manager_no_abort():
"""Running until fake_MPI tries to send msg to test (mocked) comm.Abort is called
Manager should raise MPISendException when fakeMPI tries to send message, which
will be caught by libE and raise MPIAbortException from fakeMPI.Abort"""
libE_specs = {"abort_on_exception": False, "mpi_comm": fake_mpi, "disable_resource_manager": True}
with pytest.raises(LoggedException):
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected MPISendException exception")
# So it's a key error rather than assertion error as does not test if "in" is
# missing, only that it's a list - needs updating in future.
def test_exception_raising_check_inputs():
"""Intentionally running without sim_specs["in"] to test exception raising (Fails)"""
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
with pytest.raises(Exception):
H, _, _ = libE({"out": [("f", float)]}, {"out": [("x", float)]}, {"sim_max": 1}, libE_specs=libE_specs)
pytest.fail("Expected ValidationError exception")
def test_proc_not_in_communicator():
"""Checking proc not in communicator returns exit status of 3"""
libE_specs = {}
libE_specs["mpi_comm"], mpi_comm_null = mpi_comm_excl()
H, _, flag = libE(
{"in": ["x"], "out": [("f", float)]}, {"out": [("x", float)]}, {"sim_max": 1}, libE_specs=libE_specs
)
assert flag == 3, "libE return flag should be 3. Returned: " + str(flag)
# def test_exception_raising_worker():
# # Intentionally running without sim_specs["in"] to test exception raising (Fails)
# H, _, _ = libE({"out": [("f", float)]}, {"out": [("x", float)]},
# {"sim_max": 1}, libE_specs={"mpi_comm": MPI.COMM_WORLD})
# assert H==[]
def rmfield(a, *fieldnames_to_remove):
return a[[name for name in a.dtype.names if name not in fieldnames_to_remove]]
@pytest.mark.extra
def test_logging_disabling():
remove_file_if_exists("ensemble.log")
remove_file_if_exists("libE_stats.txt")
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE_specs = {"mpi_comm": fake_mpi, "comms": "mpi", "disable_log_files": True}
logconfig = LogConfig.config
logconfig.logger_set = False
with mock.patch("libensemble.manager.manager_main") as managerMock:
managerMock.side_effect = Exception
with mock.patch("libensemble.libE.comms_abort") as abortMock:
abortMock.side_effect = Exception
with pytest.raises(Exception):
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert not os.path.isfile("ensemble.log"), "ensemble.log file dumped"
assert not os.path.isfile("libE_stats.txt"), "libE_stats.txt file dumped"
if __name__ == "__main__":
test_manager_exception()
METHOD_NAME()
test_exception_raising_manager_no_abort()
test_exception_raising_check_inputs()
test_proc_not_in_communicator()
test_logging_disabling()
| null |
5,328 |
import mock
from django import test
from django.conf import settings
from model_bakery import baker
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_examiner.views.selfassign import crinstance_selfassign
class TestCradminInstanceAssignment(test.TestCase):
def METHOD_NAME(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_user_not_related_examiner_on_period_sanity(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment = baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
baker.make('core.AssignmentGroup', parentnode=assignment)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 0)
def test_period_not_active_ended(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment = baker.make_recipe(
'devilry.apps.core.assignment_oldperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
baker.make('core.RelatedExaminer', period=assignment.parentnode, user=examiner_user)
baker.make('core.AssignmentGroup', parentnode=assignment)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 0)
def test_period_not_active_not_started(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment = baker.make_recipe(
'devilry.apps.core.assignment_futureperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
baker.make('core.RelatedExaminer', period=assignment.parentnode, user=examiner_user)
baker.make('core.AssignmentGroup', parentnode=assignment)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 0)
def test_single_period_accessible_sanity(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment = baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
baker.make('core.RelatedExaminer', period=assignment.parentnode, user=examiner_user)
baker.make('core.AssignmentGroup', parentnode=assignment)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 1)
def test_multiple_periods_accessible_sanity(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment1 = baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
assignment2 = baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment Two',
examiners_can_self_assign=True
)
baker.make('core.RelatedExaminer', period=assignment1.parentnode, user=examiner_user)
baker.make('core.AssignmentGroup', parentnode=assignment1)
baker.make('core.RelatedExaminer', period=assignment2.parentnode, user=examiner_user)
baker.make('core.AssignmentGroup', parentnode=assignment2)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 2)
self.assertIn(assignment1.period, crinstance.get_rolequeryset())
self.assertIn(assignment2.period, crinstance.get_rolequeryset())
def test_period_accessible_user_already_examiner(self):
examiner_user = baker.make(settings.AUTH_USER_MODEL)
assignment = baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
long_name='Assignment One',
examiners_can_self_assign=True
)
relatedexaminer = baker.make('core.RelatedExaminer', period=assignment.parentnode, user=examiner_user)
group = baker.make('core.AssignmentGroup', parentnode=assignment)
baker.make('core.Examiner', assignmentgroup=group, relatedexaminer=relatedexaminer)
mockrequest = mock.MagicMock()
mockrequest.user = examiner_user
crinstance = crinstance_selfassign.CrAdminInstance(request=mockrequest)
self.assertEqual(crinstance.get_rolequeryset().count(), 1)
| null |
5,329 |
import os
from datetime import datetime
from typing import List
import pandas as pd
import pandas_market_calendars as mcal
import pytz
import requests
from meta.data_processors._base import _Base
# from _base import _Base
class Iexcloud(_Base):
@classmethod
def _get_base_url(self, mode: str) -> str:
as1 = "mode must be sandbox or production."
assert mode in {"sandbox", "production"}, as1
if mode == "sandbox":
return "https://sandbox.iexapis.com"
return "https://cloud.iexapis.com"
def __init__(
self,
data_source: str,
start_date: str,
end_date: str,
time_interval: str,
**kwargs,
):
super().__init__(data_source, start_date, end_date, time_interval, **kwargs)
self.base_url = self._get_base_url(mode=kwargs["mode"])
self.token = kwargs["token"] or os.environ.get("IEX_TOKEN")
def METHOD_NAME(
self, ticker_list: List[str], save_path: str = "./data/dataset.csv"
):
"""Returns end of day historical data for up to 15 years.
Args:
ticker_list (List[str]): List of the tickers to retrieve information.
start_date (str): Oldest date of the range.
end_date (str): Latest date of the range.
Returns:
pd.DataFrame: A pandas dataframe with end of day historical data
for the specified tickers with the following columns:
date, tic, open, high, low, close, adjusted_close, volume.
Examples:
kwargs['mode'] = 'sandbox'
kwargs['token'] = 'Tsk_d633e2ff10d463...'
>>> iex_dloader = Iexcloud(data_source='iexcloud', **kwargs)
>>> iex_dloader.download_data(ticker_list=["AAPL", "NVDA"],
start_date='2014-01-01',
end_date='2021-12-12',
time_interval = '1D')
"""
assert self.time_interval == "1D" # one day
price_data = pd.DataFrame()
query_params = {
"token": self.token,
}
if self.start_date and self.end_date:
query_params["from"] = self.start_date
query_params["to"] = self.end_date
for stock in ticker_list:
end_point = f"{self.base_url}/stable/time-series/HISTORICAL_PRICES/{stock}"
response = requests.get(
url=end_point,
params=query_params,
)
if response.status_code != 200:
raise requests.exceptions.RequestException(response.text)
temp = pd.DataFrame.from_dict(data=response.json())
temp["ticker"] = stock
price_data = price_data.append(temp)
price_data = price_data[
[
"date",
"ticker",
"open",
"high",
"low",
"close",
"fclose",
"volume",
]
]
price_data = price_data.rename(
columns={
"ticker": "tic",
"date": "time",
"fclose": "adjusted_close",
}
)
price_data.date = price_data.date.map(
lambda x: datetime.fromtimestamp(x / 1000, pytz.UTC).strftime("%Y-%m-%d")
)
self.dataframe = price_data
self.save_data(save_path)
print(
f"Download complete! Dataset saved to {save_path}. \nShape of DataFrame: {self.dataframe.shape}"
)
def get_trading_days(self, start: str, end: str) -> List[str]:
"""Retrieves every trading day between two dates.
Args:
start (str): Oldest date of the range.
end (str): Latest date of the range.
Returns:
List[str]: List of all trading days in YYYY-dd-mm format.
Examples:
>>> iex_dloader = Iexcloud(data_source='iexcloud',
mode='sandbox',
token='Tsk_d633e2ff10d463...')
>>> iex_dloader.get_trading_days(start='2014-01-01',
end='2021-12-12')
['2021-12-15', '2021-12-16', '2021-12-17']
"""
nyse = mcal.get_calendar("NYSE")
df = nyse.schedule(
start_date=pd.Timestamp(start, tz=pytz.UTC),
end_date=pd.Timestamp(end, tz=pytz.UTC),
)
return df.applymap(lambda x: x.strftime("%Y-%m-%d")).market_open.to_list()
| null |
5,330 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
from qtpy.QtCore import Qt
from qtpy import QtGui
from qtpy.QtWidgets import QWidget
from mantidqt.utils.qt import load_ui
class WorkspaceCalculatorView(QWidget):
"""The view for the workspace calculator widget."""
def __init__(self, parent=None):
super().__init__(parent)
self.ui = load_ui(__file__, "workspace_calculator.ui", baseinstance=self)
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.label_validation_lhs.setVisible(False)
self.label_validation_rhs.setVisible(False)
scale_validator = ScaleValidator()
self.lhs_scaling.setValidator(scale_validator)
self.rhs_scaling.setValidator(scale_validator)
# setting these selectors to be optional allows to have an empty entry
self.lhs_ws.setOptional(True)
self.rhs_ws.setOptional(True)
self.output_ws.setOptional(True)
# connecting ADS observers
self.lhs_ws.focussed.connect(lambda: self.connectADS("lhs"))
self.rhs_ws.focussed.connect(lambda: self.connectADS("rhs"))
self.output_ws.focussed.connect(lambda: self.connectADS("output"))
# cases for disconnecting ADS observers
self.lhs_ws.activated.connect(lambda: self.disconnectADS("lhs"))
self.rhs_ws.activated.connect(lambda: self.disconnectADS("rhs"))
self.output_ws.activated.connect(lambda: self.disconnectADS("output"))
# by default the observers to the ADS should be disconnected, and connected only when user focuses on the widget
self.lhs_ws.disconnectObservers()
self.rhs_ws.disconnectObservers()
self.output_ws.disconnectObservers()
def setValidationLabel(self, ws, validationValue, tooltip=""):
"""Sets the visibility of the validity indicator (asterisk) next to the workspace selector."""
if ws == "LHS":
if isinstance(tooltip, list):
tooltip = tooltip[0]
self.label_validation_lhs.setVisible(not validationValue)
self.label_validation_lhs.setToolTip(tooltip)
else:
if isinstance(tooltip, list):
tooltip = tooltip[1]
self.label_validation_rhs.setVisible(not validationValue)
self.label_validation_rhs.setToolTip(tooltip)
def connectADS(self, selector_name):
"""Explicitly connects the workspace selector observers to the ADS."""
if selector_name == "lhs" and not self.lhs_ws.isConnected():
self.lhs_ws.connectObservers()
elif selector_name == "rhs" and not self.rhs_ws.isConnected():
self.rhs_ws.connectObservers()
elif selector_name == "output" and not self.output_ws.isConnected():
self.output_ws.connectObservers()
def disconnectADS(self, selector_name=""):
"""Disconnects connected workspace selectors from signals coming from the ADS."""
if selector_name == "lhs":
self.lhs_ws.disconnectObservers()
elif selector_name == "rhs":
self.rhs_ws.disconnectObservers()
elif selector_name == "output":
self.output_ws.disconnectObservers()
else:
self.lhs_ws.disconnectObservers()
self.rhs_ws.disconnectObservers()
self.output_ws.disconnectObservers()
def hideEvent(self, event):
"""Handles hide event of the calculator widget."""
self.disconnectADS()
super(WorkspaceCalculatorView, self).hideEvent(event)
def closeEvent(self, event):
"""Handles close event of the calculator widget."""
self.deleteLater()
super(WorkspaceCalculatorView, self).closeEvent(event)
class ScaleValidator(QtGui.QValidator):
"""Validator for QLineEdit input used for scaling of LHS and RHS workspaces."""
def __init__(self, parent=None):
QtGui.QValidator.__init__(self, parent=parent)
def validate(self, input, pos):
try:
float(input)
except ValueError:
try:
last_char = input[-1].lower()
penultimate_char = input[-2].lower()
if last_char == "e":
try:
int(penultimate_char)
return QtGui.QValidator.Acceptable, input, pos
except ValueError:
pass
elif penultimate_char == "e" and last_char == "-":
return QtGui.QValidator.Acceptable, input, pos
except IndexError:
pass
return QtGui.QValidator.Invalid, input, pos
if float(input) == 0:
return QtGui.QValidator.Intermediate, input, pos
return QtGui.QValidator.Acceptable, input, pos
def METHOD_NAME(self, input):
return "1.0"
| null |
5,331 |
import asyncio
import pytest
import zigpy.device
import zigpy.types as t
import zigpy.zdo as zdo
import zigpy.zdo.types as zdo_types
from .async_mock import AsyncMock, MagicMock, patch, sentinel
def test_commands():
for cmdid, cmdspec in zdo.types.CLUSTERS.items():
assert 0 <= cmdid <= 0xFFFF
assert isinstance(cmdspec, tuple)
for paramname, paramtype in zip(cmdspec[0], cmdspec[1]):
assert isinstance(paramname, str)
assert hasattr(paramtype, "serialize")
assert hasattr(paramtype, "deserialize")
@pytest.fixture
def zdo_f(app):
ieee = t.EUI64(map(t.uint8_t, [0, 1, 2, 3, 4, 5, 6, 7]))
dev = zigpy.device.Device(app, ieee, 65535)
dev.request = AsyncMock()
app.devices[dev.ieee] = dev
return zdo.ZDO(dev)
def test_deserialize(zdo_f):
hdr, args = zdo_f.deserialize(2, b"\x01\x02\x03xx")
assert hdr.tsn == 1
assert hdr.is_reply is False
assert args == [0x0302]
def test_deserialize_unknown(zdo_f):
with pytest.raises(ValueError):
hdr, args = zdo_f.deserialize(0x0100, b"\x01")
async def test_request(zdo_f):
await zdo_f.request(2, 65535)
app = zdo_f._device._application
assert zdo_f.device.request.call_count == 1
assert app.get_sequence.call_count == 1
async def test_bind(zdo_f):
cluster = MagicMock()
cluster.endpoint.endpoint_id = 1
cluster.cluster_id = 1026
await zdo_f.bind(cluster)
assert zdo_f.device.request.call_count == 1
assert zdo_f.device.request.call_args[0][1] == 0x0021
async def METHOD_NAME(zdo_f):
cluster = MagicMock()
cluster.endpoint.endpoint_id = 1
cluster.cluster_id = 1026
await zdo_f.unbind(cluster)
assert zdo_f.device.request.call_count == 1
assert zdo_f.device.request.call_args[0][1] == 0x0022
@pytest.mark.parametrize(
"remove_children, rejoin, flags",
(
(False, False, 0),
(False, True, 0x80),
(True, False, 0x40),
(True, True, 0xC0),
),
)
async def test_leave(zdo_f, remove_children, rejoin, flags):
"""Test ZDO leave request options."""
with patch.object(zdo_f, "request", AsyncMock()) as req_mock:
await zdo_f.leave(remove_children, rejoin)
assert req_mock.await_count == 1
assert req_mock.await_args[0][0] == 0x0034
assert req_mock.await_args[0][1] == t.EUI64.convert("07:06:05:04:03:02:01:00")
assert req_mock.await_args[0][2] == flags
async def test_permit(zdo_f):
await zdo_f.permit()
assert zdo_f.device.request.call_count == 1
assert zdo_f.device.request.call_args[0][1] == 0x0036
async def test_broadcast(app):
await zigpy.zdo.broadcast(app, 0x0036, 0, 0, 60, 0)
assert app.send_packet.call_count == 1
packet = app.send_packet.mock_calls[0].args[0]
assert packet.dst.addr_mode == t.AddrMode.Broadcast
assert packet.cluster_id == 0x0036
def _handle_match_desc(zdo_f, profile):
zdo_f.reply = AsyncMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Match_Desc_req
zdo_f.handle_message(5, 0x0006, hdr, [None, profile, [], []])
assert zdo_f.reply.call_count == 1
async def test_handle_match_desc_zha(zdo_f):
_handle_match_desc(zdo_f, 260)
await asyncio.wait(asyncio.all_tasks(), return_when=asyncio.FIRST_COMPLETED)
assert zdo_f.reply.await_count == 1
assert zdo_f.reply.call_args[0][3]
async def test_handle_match_desc_generic(zdo_f):
_handle_match_desc(zdo_f, 0)
await asyncio.wait(asyncio.all_tasks(), return_when=asyncio.FIRST_COMPLETED)
assert zdo_f.reply.await_count == 1
assert not zdo_f.reply.call_args[0][3]
async def test_handle_nwk_addr(zdo_f):
ieee = zdo_f._device.application.state.node_info.ieee
zdo_f.reply = MagicMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.NWK_addr_req
zdo_f.handle_message(5, 0x0000, hdr, [ieee, 0x00])
assert zdo_f.reply.call_count == 1
async def test_handle_ieee_addr(zdo_f):
nwk = zdo_f._device.application.state.node_info.nwk
zdo_f.reply = MagicMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.IEEE_addr_req
zdo_f.handle_message(5, 0x0001, hdr, [nwk, 0x00])
assert zdo_f.reply.call_count == 1
def test_handle_announce(zdo_f):
dev = zdo_f._device
listener = MagicMock()
zdo_f.add_listener(listener)
dev._application.devices.pop(dev.ieee)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Device_annce
zdo_f.handle_message(
5, 0x0013, hdr, [dev.nwk, dev.ieee, 0], dst_addressing=sentinel.dst_addr
)
assert listener.device_announce.call_count == 1
assert listener.device_announce.call_args[0][0] is dev
assert listener.zdo_device_annce.call_count == 1
assert listener.zdo_device_annce.call_args[0][0] is dev
assert listener.zdo_device_annce.call_args[0][1] is sentinel.dst_addr
assert listener.zdo_device_annce.call_args[0][2] is hdr
assert listener.zdo_device_annce.call_args[0][3] == [dev.nwk, dev.ieee, 0]
def test_handle_permit_join(zdo_f):
listener = MagicMock()
zdo_f.add_listener(listener)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Mgmt_Permit_Joining_req
zdo_f.handle_message(5, 0x0036, hdr, [100, 1])
assert listener.permit_duration.call_count == 1
def test_handle_unsupported(zdo_f):
listener = MagicMock()
zdo_f.add_listener(listener)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd(0xFFFF)
assert hdr.command_id not in list(zdo_types.ZDOCmd)
zdo_f.request = MagicMock()
zdo_f.reply = MagicMock()
zdo_f.handle_message(5, 0xFFFF, hdr, [])
assert listener.zdo_undefined_0xffff.call_count == 1
assert zdo_f.request.call_count == 0
assert zdo_f.reply.call_count == 0
def test_device_accessor(zdo_f):
assert zdo_f.device.nwk == 65535
async def test_reply(zdo_f):
zdo_f.device.request = AsyncMock()
await zdo_f.reply(0x0005)
assert zdo_f.device.request.call_count == 1
def test_get_attr_error(zdo_f):
with pytest.raises(AttributeError):
zdo_f.no_such_attribute()
async def test_reply_tsn_override(zdo_f, monkeypatch):
clusters = MagicMock()
clusters.__getitem__.return_value = (
sentinel.param_names,
sentinel.scheam,
)
monkeypatch.setattr(zdo_types, "CLUSTERS", clusters)
mock_ser = MagicMock()
mock_ser.return_value = b"\xaa\x55"
monkeypatch.setattr(t, "serialize", mock_ser)
await zdo_f.reply(sentinel.cmd, sentinel.arg1, sentinel.arg2)
seq = zdo_f.device.request.call_args[0][4]
data = zdo_f.device.request.call_args[0][5]
assert seq == 123
assert data[0] == 123
assert data[1:3] == b"\xaa\x55"
# override tsn
tsn = 0x23
await zdo_f.reply(sentinel.cmd, sentinel.arg1, sentinel.arg2, tsn=tsn)
seq = zdo_f.device.request.call_args[0][4]
data = zdo_f.device.request.call_args[0][5]
assert seq == tsn
assert data[0] == tsn
assert data[1:3] == b"\xaa\x55"
| null |
5,332 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils as utils
import unittest
from mantid import simpleapi
from mantid.kernel import ConfigService
from mantid.api import AnalysisDataService, ITableWorkspace
from unittest import mock
def create_simple_workspace(data_x, data_y, run_number=0):
alg = simpleapi.AlgorithmManager.create("CreateWorkspace")
alg.initialize()
alg.setAlwaysStoreInADS(False)
alg.setLogging(False)
alg.setProperty("dataX", data_x)
alg.setProperty("dataY", data_y)
alg.setProperty("OutputWorkspace", "__notUsed")
alg.execute()
ws = alg.getProperty("OutputWorkspace").value
ws.getRun().addProperty("run_number", run_number, "NonDim", True)
return ws
class MuonFileUtilsTest(unittest.TestCase):
def METHOD_NAME(self):
simple_workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 20, 30, 40], run_number=74044)
workspace_list = [simple_workspace] * 5
run_number = utils.get_run_from_multi_period_data(workspace_list)
self.assertEqual(run_number, 74044)
def test_get_run_from_multi_period_data_raises_a_value_error_if_not_all_run_numbers_same(self):
simple_workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 20, 30, 40], run_number=74044)
simple_workspace_1 = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 20, 30, 40], run_number=74045)
workspace_list = [simple_workspace] * 4 + [simple_workspace_1]
self.assertRaises(ValueError, utils.get_run_from_multi_period_data, workspace_list)
def test_default_instrument_returns_default_instrument_if_muon_instrument(self):
ConfigService["default.instrument"] = "MUSR"
instrument = utils.get_default_instrument()
self.assertEqual(instrument, "MUSR")
def test_default_instrument_returns_MUSR_if_default_instrument_is_not_muon_instrument(self):
ConfigService["default.instrument"] = "LOQ"
instrument = utils.get_default_instrument()
self.assertEqual(instrument, "MUSR")
def test_that_load_dead_time_from_filename_places_table_in_ADS(self):
ConfigService.Instance().setString("default.facility", "ISIS")
filename = "MUSR00022725.nsx"
name = utils.load_dead_time_from_filename(filename)
dead_time_table = AnalysisDataService.retrieve("MUSR00022725.nsx_deadtime_table")
self.assertEqual(name, "MUSR00022725.nsx_deadtime_table")
self.assertTrue(isinstance(dead_time_table, ITableWorkspace))
ConfigService.Instance().setString("default.facility", " ")
def test_load_workspace_from_filename_for_existing_file(self):
ConfigService.Instance().setString("default.facility", "ISIS")
filename = "MUSR00022725.nsx"
load_result, run, filename, _ = utils.load_workspace_from_filename(filename)
self.assertEqual(load_result["DeadTimeTable"], None)
self.assertEqual(load_result["FirstGoodData"], 0.106)
self.assertEqual(load_result["MainFieldDirection"], "Transverse")
self.assertAlmostEqual(load_result["TimeZero"], 0.55000, 5)
self.assertEqual(run, 22725)
ConfigService.Instance().setString("default.facility", " ")
@mock.patch("mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils.get_filename_from_alg")
def test_filename_not_caps(self, mock_get_name):
alg = mock.Mock()
mock_path = "C:/users/test/data/"
wrong_path = "C:/users/t/data/"
instrument_names = ["hifi", "Hifi", "hIfI", "hifI"]
run_number = "125846.nxs"
for name in instrument_names:
# this will alway return all caps for the name -> use upper
mock_get_name.return_value = mock_path + name.upper() + run_number
# load will have correct path -> use wrong path for filename
filename = utils.get_correct_file_path(wrong_path + name + run_number, alg)
self.assertEqual(filename, mock_path + name + run_number)
def test_create_load_alg_for_nxs_files(self):
filename = "EMU00019489.nxs"
inputs = {}
alg, psi_data = utils.create_load_algorithm(filename, inputs)
self.assertFalse(psi_data)
def test_create_load_alg_for_nxs_v2_files(self):
filename = "EMU00102347.nxs_v2"
inputs = {}
alg, psi_data = utils.create_load_algorithm(filename, inputs)
self.assertFalse(psi_data)
def test_create_load_alg_for_bin_files(self):
filename = "deltat_tdc_dolly_1529.bin"
inputs = {}
alg, psi_data = utils.create_load_algorithm(filename, inputs)
self.assertTrue(psi_data)
@mock.patch("mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils.CloneWorkspace")
def test_combine_loaded_runs_for_psi_data(self, clone_mock):
workspace = mock.MagicMock()
workspace.workspace.name = "name"
model = mock.MagicMock()
model._data_context.num_periods = mock.Mock(return_value=1)
# Workspace is missing DeadTimeTable and DetectorGroupingTable which should be handled without raising
model._loaded_data_store.get_data = mock.Mock(
return_value={
"workspace": {
"OutputWorkspace": workspace,
"MainFieldDirection": 0,
"TimeZero": 0,
"FirstGoodData": 0,
"DataDeadTimeTable": "dtt",
}
}
)
run_list = [1529]
utils.combine_loaded_runs(model, run_list)
if __name__ == "__main__":
unittest.main(buffer=False, verbosity=2)
| null |
5,333 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import tempfile
import os
import unittest
import warnings
from isis_powder.routines import yaml_parser
class ISISPowderYamlParserTest(unittest.TestCase):
def setUp(self):
self.temp_file_paths = []
def tearDown(self):
for path in self.temp_file_paths:
try:
os.remove(path)
except OSError:
warnings.warn("Failed to remove unit test temp file at the following path:\n" + str(path))
self.temp_file_paths = []
def get_temp_file_handle(self):
# Set to mode manually so we don't need to convert to binary in Python 3
file_handle = tempfile.NamedTemporaryFile(delete=False, mode="w+")
self.temp_file_paths.append(file_handle.name)
return file_handle
def test_dictionary_parses_correctly(self):
expected_value = "bar"
second_value = "foo"
# Write in two ranges to check it determines the correct one
yaml_handle = self.get_temp_file_handle()
yaml_handle.write("100-200:\n")
yaml_handle.write(" test_item: '" + expected_value + "'\n")
yaml_handle.write("201-:\n")
yaml_handle.write(" test_item: '" + second_value + "'\n")
# Close handle so the test can access it
yaml_handle.close()
# Check a random value in the mid point
returned_dict = yaml_parser.get_run_dictionary(run_number_string="150", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value)
# Check lower bound is respected
returned_dict = yaml_parser.get_run_dictionary(run_number_string="100", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Lower bound not respected")
# Check upper bound is respected
returned_dict = yaml_parser.get_run_dictionary(run_number_string="200", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Upper bound not respected")
# Check we can handle a range
returned_dict = yaml_parser.get_run_dictionary(run_number_string="120-130", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Range returned incorrect value")
# Check that the second dictionary works with unbounded ranges
returned_dict = yaml_parser.get_run_dictionary(run_number_string="205", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], second_value)
def test_file_not_found_gives_sane_err(self):
# Create a file then delete it so we know it cannot exist at that path
file_handle = tempfile.NamedTemporaryFile(delete=False)
file_path = file_handle.name
file_handle.close()
os.remove(file_path)
if os.path.exists(file_path):
self.fail("File exists after deleting cannot continue this test")
# Check the error message is there
with self.assertRaisesRegex(ValueError, "Config file not found at path"):
yaml_parser.get_run_dictionary(run_number_string="1", file_path=file_path)
def METHOD_NAME(self):
# Check a valid unbounded range is detected
result = yaml_parser.is_run_range_key_unbounded("10-")
self.assertTrue(result, "Unbounded range not detected")
# Check a bounded range isn't incorrectly detected
result = yaml_parser.is_run_range_key_unbounded("22")
self.assertFalse(result, "Single run incorrectly detected")
# Check a range of runs isn't detected incorrectly
result = yaml_parser.is_run_range_key_unbounded("33-44")
self.assertFalse(result, "Bounded range incorrectly detected")
# What about if it ends in a comma syntax (this will throw elsewhere in the script anyway)
result = yaml_parser.is_run_range_key_unbounded("55-66,")
self.assertFalse(result, "Invalid ending character detected as an unbounded")
def test_blank_file_gives_sane_err(self):
file_handle = self.get_temp_file_handle()
# Write nothing and close
file_path = file_handle.name
file_handle.close()
with self.assertRaisesRegex(ValueError, "YAML files appears to be empty at"):
yaml_parser.get_run_dictionary(run_number_string=1, file_path=file_path)
def test_run_number_not_found_gives_sane_err(self):
expected_val = "yamlParserTest"
file_handle = self.get_temp_file_handle()
file_handle.write("10-20:\n")
file_handle.write(" test_key: '" + expected_val + "'\n")
file_handle.write("21-:\n")
file_handle.write(" test_key: '" + expected_val + "'\n")
file_path = file_handle.name
file_handle.close()
# Test a value in the middle of 1-10
with self.assertRaisesRegex(ValueError, "Run number 5 not recognised in cycle mapping file"):
yaml_parser.get_run_dictionary(run_number_string="5", file_path=file_path)
# Check on edge of invalid numbers
with self.assertRaisesRegex(ValueError, "Run number 9 not recognised in cycle mapping file"):
yaml_parser.get_run_dictionary(run_number_string=9, file_path=file_path)
# What about a range of numbers
with self.assertRaisesRegex(ValueError, "Run number 2 not recognised in cycle mapping file"):
yaml_parser.get_run_dictionary(run_number_string="2-8", file_path=file_path)
# Check valid number still works
returned_dict = yaml_parser.get_run_dictionary(run_number_string="10", file_path=file_path)
self.assertEqual(returned_dict["test_key"], expected_val)
def test_yaml_sanity_check_picks_up_two_unbounded(self):
# Check we can detect two unbounded ranges
file_handle = self.get_temp_file_handle()
file_handle.write("10-:\n")
file_handle.write("20-:\n")
file_path = file_handle.name
file_handle.close()
with self.assertRaisesRegex(ValueError, "Seen multiple unbounded keys in mapping file"):
yaml_parser.get_run_dictionary(run_number_string="11", file_path=file_path)
def test_yaml_sanity_detects_val_larger_than_unbound(self):
# If we have a value that is larger than the unbounded range can we detect this
file_handle = self.get_temp_file_handle()
file_handle.write("30-:\n")
file_handle.write("35:\n")
file_path = file_handle.name
file_handle.close()
with self.assertRaisesRegex(ValueError, "Found a run range in calibration mapping overlaps an unbounded run " "range"):
yaml_parser.get_run_dictionary(run_number_string="32", file_path=file_path)
if __name__ == "__main__":
unittest.main()
| null |
5,334 |
"""Tests for the Job object of the CBC SDK"""
import pytest
import logging
import io
import os
from tempfile import mkstemp
from cbc_sdk.platform import Job
from cbc_sdk.errors import ServerError
from cbc_sdk.rest_api import CBCloudAPI
from tests.unit.fixtures.CBCSDKMock import CBCSDKMock
from tests.unit.fixtures.platform.mock_jobs import (FIND_ALL_JOBS_RESP, JOB_DETAILS_1, JOB_DETAILS_2, PROGRESS_1,
PROGRESS_2, AWAIT_COMPLETION_PROGRESS)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt')
@pytest.fixture(scope="function")
def cb():
"""Create CBCloudAPI singleton"""
return CBCloudAPI(url="https://example.com",
org_key="test",
token="abcd/1234",
ssl_verify=False)
@pytest.fixture(scope="function")
def cbcsdk_mock(monkeypatch, cb):
"""Mocks CBC SDK for unit tests"""
return CBCSDKMock(monkeypatch, cb)
def new_tempfile():
"""Create a temporary file and return the name of it."""
rc = mkstemp()
os.close(rc[0])
return rc[1]
def file_contents(filename):
"""Return a string containing the contents of the file."""
with io.open(filename, "r", encoding="utf-8") as f:
return f.read()
# ==================================== UNIT TESTS BELOW ====================================
def test_get_jobs(cbcsdk_mock):
"""Tests getting the list of all jobs."""
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs', FIND_ALL_JOBS_RESP)
api = cbcsdk_mock.api
query = api.select(Job)
assert query._count() == 2
list_jobs = list(query)
assert len(list_jobs) == 2
assert list_jobs[0].id == 12345
assert list_jobs[0].status == 'COMPLETED'
assert list_jobs[0].progress['num_total'] == 18
assert list_jobs[0].progress['num_completed'] == 18
assert list_jobs[1].id == 23456
assert list_jobs[1].status == 'CREATED'
assert list_jobs[1].progress['num_total'] == 34
assert list_jobs[1].progress['num_completed'] == 16
def test_get_jobs_async(cbcsdk_mock):
"""Tests getting the list of all jobs in an asynchronous fashion."""
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs', FIND_ALL_JOBS_RESP)
api = cbcsdk_mock.api
future = api.select(Job).execute_async()
list_jobs = future.result()
assert len(list_jobs) == 2
assert list_jobs[0].id == 12345
assert list_jobs[0].status == 'COMPLETED'
assert list_jobs[0].progress['num_total'] == 18
assert list_jobs[0].progress['num_completed'] == 18
assert list_jobs[1].id == 23456
assert list_jobs[1].status == 'CREATED'
assert list_jobs[1].progress['num_total'] == 34
assert list_jobs[1].progress['num_completed'] == 16
@pytest.mark.parametrize("jobid, total, completed, msg, load_return, progress_return", [
(12345, 18, 18, None, JOB_DETAILS_1, PROGRESS_1),
(23456, 34, 16, 'Foo', JOB_DETAILS_2, PROGRESS_2)
])
def METHOD_NAME(cbcsdk_mock, jobid, total, completed, msg, load_return, progress_return):
"""Tests loading a job by ID and getting its progress indicators."""
cbcsdk_mock.mock_request('GET', f'/jobs/v1/orgs/test/jobs/{jobid}', load_return)
cbcsdk_mock.mock_request('GET', f'/jobs/v1/orgs/test/jobs/{jobid}/progress', progress_return)
api = cbcsdk_mock.api
job = api.select(Job, jobid)
my_total, my_completed, my_message = job.get_progress()
assert my_total == total
assert my_completed == completed
assert my_message == msg
def test_job_await_completion(cbcsdk_mock):
"""Test the functionality of await_completion()."""
first_time = True
pr_index = 0
def on_progress(url, query_params, default):
nonlocal first_time, pr_index
if first_time:
first_time = False
raise ServerError(400, "Not yet")
assert pr_index < len(AWAIT_COMPLETION_PROGRESS), "Too many progress calls made"
return_value = AWAIT_COMPLETION_PROGRESS[pr_index]
pr_index += 1
return return_value
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345', JOB_DETAILS_1)
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345/progress', on_progress)
api = cbcsdk_mock.api
job = api.select(Job, 12345)
future = job.await_completion()
result = future.result()
assert result is job
assert first_time is False
assert pr_index == len(AWAIT_COMPLETION_PROGRESS)
def test_job_await_completion_error(cbcsdk_mock):
"""Test that await_completion() throws a ServerError if it gets too many ServerErrors internally."""
def on_progress(url, query_params, default):
raise ServerError(400, "Ain't happening")
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345', JOB_DETAILS_1)
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345/progress', on_progress)
api = cbcsdk_mock.api
job = api.select(Job, 12345)
future = job.await_completion()
with pytest.raises(ServerError):
future.result()
def test_job_output_export_string(cbcsdk_mock):
"""Tests exporting the results of a job as a string."""
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345', JOB_DETAILS_1)
cbcsdk_mock.mock_request('STREAM:GET', '/jobs/v1/orgs/test/jobs/12345/download',
CBCSDKMock.StubResponse("ThisIsFine", 200, "ThisIsFine", False))
api = cbcsdk_mock.api
job = api.select(Job, 12345)
output = job.get_output_as_string()
assert output == "ThisIsFine"
def test_job_output_export_file(cbcsdk_mock):
"""Tests exporting the results of a job as a file."""
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345', JOB_DETAILS_1)
cbcsdk_mock.mock_request('STREAM:GET', '/jobs/v1/orgs/test/jobs/12345/download',
CBCSDKMock.StubResponse("ThisIsFine", 200, "ThisIsFine", False))
api = cbcsdk_mock.api
job = api.select(Job, 12345)
tempfile = new_tempfile()
try:
job.get_output_as_file(tempfile)
assert file_contents(tempfile) == "ThisIsFine"
finally:
os.remove(tempfile)
def test_job_output_export_lines(cbcsdk_mock):
"""Tests exporting the results of a query as a list of lines."""
cbcsdk_mock.mock_request('GET', '/jobs/v1/orgs/test/jobs/12345', JOB_DETAILS_1)
data = "AAA\r\nBBB\r\nCCC"
cbcsdk_mock.mock_request('ITERATE:GET', '/jobs/v1/orgs/test/jobs/12345/download',
CBCSDKMock.StubResponse(data, 200, data, False))
api = cbcsdk_mock.api
job = api.select(Job, 12345)
output = list(job.get_output_as_lines())
assert output == ["AAA", "BBB", "CCC"]
| null |
5,335 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from typing import Any as Fixture
from cylc.flow import CYLC_LOG
from cylc.flow.scheduler import Scheduler
from cylc.flow.task_state import TASK_STATUS_RUNNING
async def test_run_job_cmd_no_hosts_error(
flow,
scheduler,
start,
mock_glbl_cfg,
log_filter,
):
"""It should catch NoHostsError.
NoHostsError's should be caught and handled rather than raised because
they will cause problems (i.e. trigger shutdown) if they make it to the
Scheduler.
NoHostError's can occur in the poll & kill logic, this test ensures that
these methods catch the NoHostsError and handle the event as a regular
SSH failure by pushing the issue down the 255 callback.
See https://github.com/cylc/cylc-flow/pull/5195
"""
# define a platform
mock_glbl_cfg(
'cylc.flow.platforms.glbl_cfg',
'''
[platforms]
[[no-host-platform]]
''',
)
# define a workflow with a task which runs on that platform
id_ = flow({
'scheduling': {
'graph': {
'R1': 'foo'
}
},
'runtime': {
'foo': {
'platform': 'no-host-platform'
}
}
})
# start the workflow
schd = scheduler(id_)
async with start(schd) as log:
# set logging to debug level
log.set_level(logging.DEBUG, CYLC_LOG)
# tell Cylc the task is running on that platform
schd.pool.get_tasks()[0].state_reset(TASK_STATUS_RUNNING)
schd.pool.get_tasks()[0].platform = {
'name': 'no-host-platform',
'hosts': ['no-host-platform'],
}
# tell Cylc that that platform is not contactable
# (i.e. all hosts are in bad_hosts)
# (this casuses the NoHostsError to be raised)
schd.task_job_mgr.bad_hosts.add('no-host-platform')
# polling the task should not result in an error...
schd.task_job_mgr.poll_task_jobs(
schd.workflow,
schd.pool.get_tasks()
)
# ...but the failure should be logged
assert log_filter(
log,
contains='No available hosts for no-host-platform',
)
log.clear()
# killing the task should not result in an error...
schd.task_job_mgr.kill_task_jobs(
schd.workflow,
schd.pool.get_tasks()
)
# ...but the failure should be logged
assert log_filter(
log,
contains='No available hosts for no-host-platform',
)
async def METHOD_NAME(
one_conf: Fixture, flow: Fixture, scheduler: Fixture, run: Fixture,
db_select: Fixture, caplog: Fixture
) -> None:
"""TaskJobMg._run_job_cmd handles failure to get platform."""
id_: str = flow(one_conf)
schd: 'Scheduler' = scheduler(id_, paused_start=True)
# Run
async with run(schd):
from types import SimpleNamespace
schd.task_job_mgr._run_job_cmd(
schd.task_job_mgr.JOBS_POLL,
'foo',
[SimpleNamespace(platform={'name': 'culdee fell summit'})],
None
)
warning = caplog.records[-1]
assert warning.levelname == 'ERROR'
assert 'Unable to run command jobs-poll' in warning.msg
| null |
5,336 |
#!/usr/bin/env python
"""Unit tests for union_dict.
"""
from unittest import TestCase, main
from cogent3.util.union_dict import UnionDict
class UnionDictTests(TestCase):
"""Tests of individual functions in union_dict"""
def test_attr(self):
"""test the "." read/write functionality"""
d = UnionDict({"a": 1, "b": 2, "c": 3, "d": {"e": 5, "f": 6}})
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 2)
self.assertEqual(d.d.e, 5)
d.c = 0
d.d.f = 0
self.assertEqual(d.c, 0)
self.assertEqual(d.d.f, 0)
def test_construction(self):
"""should handle deeply nested dict"""
data = {"width": 600.0, "xaxis": {"title": {"text": "Alignment Position"}}}
d = UnionDict(data)
self.assertEqual(d.xaxis.title.text, "Alignment Position")
def test_construct_from_empty(self):
"""successfully define from an empty"""
data = {"width": 600.0, "xaxis": {"title": {"text": "Alignment Position"}}}
# empty object
d = UnionDict()
self.assertTrue(len(d) == 0)
# using update
d.update(data)
self.assertEqual(d.xaxis.title.text, "Alignment Position")
def test_construct_from_kwargs(self):
"""successfully define from an kwargs"""
data = {"width": 600.0, "xaxis": {"title": {"text": "Alignment Position"}}}
# empty object
d = UnionDict(**data)
self.assertEqual(d.xaxis.title.text, "Alignment Position")
def test_union(self):
"""correctly adjust a prob vector so all values > minval"""
d = UnionDict({"a": 1, "b": 2, "c": 3, "d": {"e": 5, "f": 6}})
e = UnionDict({"b": 0, "d": {"f": 0, "g": 7}})
d |= e
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 0)
self.assertEqual(d.d.e, 5)
self.assertEqual(d.d.f, 0)
self.assertEqual(d.d.g, 7)
def test_or(self):
"""should not modify original"""
d = UnionDict({"a": 1, "b": 2, "c": 3, "d": {"e": 5, "f": 6}})
e = UnionDict({"b": 0, "d": {"f": 0, "g": 7}})
f = d | e
self.assertEqual(f.a, 1)
self.assertEqual(f.b, 0)
self.assertEqual(f.d.e, 5)
self.assertEqual(f.d.f, 0)
self.assertEqual(f.d.g, 7)
self.assertTrue(f.d is not e.d)
def METHOD_NAME(self):
"""replacing union or of a value with a dict should be dict"""
d = UnionDict({"A": {"B": "Blah"}})
e = UnionDict({"A": "Blah"})
f = UnionDict(d.copy())
f |= e
self.assertNotEqual(d, f)
e |= d
self.assertEqual(d, e)
def test_union_with_empty_sub_dict(self):
"""unioning with a dict that has an empty sub-dict"""
d = UnionDict({"title": {}})
e = UnionDict({"title": {"text": "Alignment Position"}})
f = UnionDict(e.copy())
e |= d
self.assertEqual(e, f)
def test_sub_dicts_are_union(self):
"""checks if UnionDict is propogated to children"""
d = UnionDict({"a": 1, "b": 2, "c": 3, "d": {"e": 5, "f": 6}})
d.e = {"g": 7}
d.e.g = {"h": 8}
self.assertTrue(isinstance(d, UnionDict))
self.assertTrue(isinstance(d.d, UnionDict))
self.assertTrue(isinstance(d.e, UnionDict))
self.assertTrue(isinstance(d.e.g, UnionDict))
def test_get_subattr(self):
"""_getsubattr_ returns nested values via key"""
d = UnionDict({"a": 1, "b": 2, "c": 3, "d": {"e": 5, "f": 6}})
self.assertEqual(d._getsubattr_([], "a"), 1)
self.assertEqual(d._getsubattr_([], "d"), UnionDict({"e": 5, "f": 6}))
self.assertEqual(d._getsubattr_(["d"], "e"), 5)
def test_setitem(self):
"""should work via property or key"""
d = UnionDict()
d.a = 23
d.b = dict(c=42)
self.assertEqual(d.a, 23)
self.assertEqual(d["a"], 23)
self.assertEqual(d.b, dict(c=42))
self.assertEqual(d.b.c, 42)
self.assertEqual(d["b"], dict(c=42))
self.assertIsInstance(d.b, UnionDict)
if __name__ == "__main__":
main()
| null |
5,337 |
"""
This example shows how to use multinode_objectives.
It replicates the results from getting_started/pendulum.py
"""
import platform
from casadi import MX, sum1, sum2
from bioptim import (
OptimalControlProgram,
DynamicsFcn,
Dynamics,
BoundsList,
InitialGuessList,
OdeSolver,
OdeSolverBase,
Solver,
BiorbdModel,
PenaltyController,
MultinodeObjectiveList,
CostType,
)
def multinode_min_controls(controllers: list[PenaltyController]) -> MX:
"""
This function mimics the ObjectiveFcn.MINIMIZE_CONTROLS with a multinode objective.
Note that it is better to use ObjectiveFcn.MINIMIZE_CONTROLS, here is juste a toy example.
"""
dt = controllers[0].tf / controllers[0].ns
out = 0
for i, ctrl in enumerate(controllers):
out += sum1(ctrl.controls["tau"].cx_start ** 2) * dt
return out
def METHOD_NAME(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
ode_solver: OdeSolverBase = OdeSolver.RK4(),
use_sx: bool = True,
n_threads: int = 1,
assume_phase_dynamics: bool = False,
expand_dynamics: bool = True,
) -> OptimalControlProgram:
"""
The initialization of an ocp
Parameters
----------
biorbd_model_path: str
The path to the biorbd model
final_time: float
The time in second required to perform the task
n_shooting: int
The number of shooting points to define int the direct multiple shooting program
ode_solver: OdeSolverBase = OdeSolver.RK4()
Which type of OdeSolver to use
use_sx: bool
If the SX variable should be used instead of MX (can be extensive on RAM)
n_threads: int
Number of thread to use
assume_phase_dynamics: bool
If the dynamics equation within a phase is unique or changes at each node. True is much faster, but lacks the
capability to have changing dynamics within a phase. A good example of when False should be used is when
different external forces are applied at each node
expand_dynamics: bool
If the dynamics function should be expanded. Please note, this will solve the problem faster, but will slow down
the declaration of the OCP, so it is a trade-off. Also depending on the solver, it may or may not work
(for instance IRK is not compatible with expanded dynamics)
Returns
-------
The OptimalControlProgram ready to be solved
"""
bio_model = BiorbdModel(biorbd_model_path)
# Add objective functions
multinode_objectives = MultinodeObjectiveList()
multinode_objectives.add(
multinode_min_controls,
nodes_phase=[0 for _ in range(n_shooting)],
nodes=[i for i in range(n_shooting)],
weight=10,
quadratic=False,
expand=False,
)
# Dynamics
dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN, expand=expand_dynamics)
# Path constraint
x_bounds = BoundsList()
x_bounds["q"] = bio_model.bounds_from_ranges("q")
x_bounds["q"][:, [0, -1]] = 0
x_bounds["q"][1, -1] = 3.14
x_bounds["qdot"] = bio_model.bounds_from_ranges("qdot")
x_bounds["qdot"][:, [0, -1]] = 0
# Define control path constraint
n_tau = bio_model.nb_tau
tau_min, tau_max = -100, 100
u_bounds = BoundsList()
u_bounds["tau"] = [tau_min] * n_tau, [tau_max] * n_tau
u_bounds["tau"][1, :] = 0 # Prevent the model from actively rotate
return OptimalControlProgram(
bio_model,
dynamics,
n_shooting,
final_time,
x_bounds=x_bounds,
u_bounds=u_bounds,
multinode_objectives=multinode_objectives,
ode_solver=ode_solver,
use_sx=use_sx,
n_threads=n_threads, # This has to be set to 1 by definition.
assume_phase_dynamics=assume_phase_dynamics, # This has to be set to False by definition.
)
def main():
"""
If pendulum is run as a script, it will perform the optimization and animates it
"""
# --- Prepare the ocp --- #
n_shooting = 30
ocp = METHOD_NAME(biorbd_model_path="models/pendulum.bioMod", final_time=1, n_shooting=n_shooting)
ocp.add_plot_penalty(CostType.ALL)
# --- Solve the ocp --- #
sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == "Linux"))
# --- Show the results in a bioviz animation --- #
sol.animate(n_frames=100)
# sol.graphs()
# sol.print_cost()
if __name__ == "__main__":
main()
| null |
5,338 |
#!/usr/bin/env python
"""
Plotting Examples
Suggested Usage: python -i pyglet_plotting.py
"""
from sympy import symbols, sin, cos, pi, sqrt
from sympy.plotting.pygletplot import PygletPlot
from time import sleep, perf_counter
def main():
x, y, z = symbols('x,y,z')
# toggle axes visibility with F5, colors with F6
axes_options = 'visible=false; colored=true; label_ticks=true; label_axes=true; overlay=true; stride=0.5'
# axes_options = 'colored=false; overlay=false; stride=(1.0, 0.5, 0.5)'
p = PygletPlot(
width=600,
height=500,
ortho=False,
invert_mouse_zoom=False,
axes=axes_options,
antialiasing=True)
examples = []
def example_wrapper(f):
examples.append(f)
return f
@example_wrapper
def mirrored_saddles():
p[5] = x**2 - y**2, [20], [20]
p[6] = y**2 - x**2, [20], [20]
@example_wrapper
def mirrored_saddles_saveimage():
p[5] = x**2 - y**2, [20], [20]
p[6] = y**2 - x**2, [20], [20]
p.wait_for_calculations()
# although the calculation is complete,
# we still need to wait for it to be
# rendered, so we'll sleep to be sure.
sleep(1)
p.saveimage("plot_example.png")
@example_wrapper
def mirrored_ellipsoids():
p[2] = x**2 + y**2, [40], [40], 'color=zfade'
p[3] = -x**2 - y**2, [40], [40], 'color=zfade'
@example_wrapper
def saddle_colored_by_derivative():
f = x**2 - y**2
p[1] = f, 'style=solid'
p[1].color = abs(f.diff(x)), abs(f.diff(x) + f.diff(y)), abs(f.diff(y))
@example_wrapper
def ding_dong_surface():
f = sqrt(1.0 - y)*y
p[1] = f, [x, 0, 2*pi,
40], [y, -
1, 4, 100], 'mode=cylindrical; style=solid; color=zfade4'
@example_wrapper
def polar_circle():
p[7] = 1, 'mode=polar'
@example_wrapper
def polar_flower():
p[8] = 1.5*sin(4*x), [160], 'mode=polar'
p[8].color = z, x, y, (0.5, 0.5, 0.5), (
0.8, 0.8, 0.8), (x, y, None, z) # z is used for t
@example_wrapper
def simple_cylinder():
p[9] = 1, 'mode=cylindrical'
@example_wrapper
def cylindrical_hyperbola():
# (note that polar is an alias for cylindrical)
p[10] = 1/y, 'mode=polar', [x], [y, -2, 2, 20]
@example_wrapper
def extruded_hyperbolas():
p[11] = 1/x, [x, -10, 10, 100], [1], 'style=solid'
p[12] = -1/x, [x, -10, 10, 100], [1], 'style=solid'
@example_wrapper
def torus():
a, b = 1, 0.5 # radius, thickness
p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x)) *\
sin(y), b*sin(x), [x, 0, pi*2, 40], [y, 0, pi*2, 40]
@example_wrapper
def warped_torus():
a, b = 2, 1 # radius, thickness
p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x))*sin(y), b *\
sin(x) + 0.5*sin(4*y), [x, 0, pi*2, 40], [y, 0, pi*2, 40]
@example_wrapper
def parametric_spiral():
p[14] = cos(y), sin(y), y / 10.0, [y, -4*pi, 4*pi, 100]
p[14].color = x, (0.1, 0.9), y, (0.1, 0.9), z, (0.1, 0.9)
@example_wrapper
def multistep_gradient():
p[1] = 1, 'mode=spherical', 'style=both'
# p[1] = exp(-x**2-y**2+(x*y)/4), [-1.7,1.7,100], [-1.7,1.7,100], 'style=solid'
# p[1] = 5*x*y*exp(-x**2-y**2), [-2,2,100], [-2,2,100]
gradient = [0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)]
p[1].color = z, [None, None, z], gradient
# p[1].color = 'zfade'
# p[1].color = 'zfade3'
@example_wrapper
def lambda_vs_sympy_evaluation():
start = perf_counter()
p[4] = x**2 + y**2, [100], [100], 'style=solid'
p.wait_for_calculations()
print("lambda-based calculation took %s seconds." % (perf_counter() - start))
start = perf_counter()
p[4] = x**2 + y**2, [100], [100], 'style=solid; use_sympy_eval'
p.wait_for_calculations()
print(
"sympy substitution-based calculation took %s seconds." %
(perf_counter() - start))
@example_wrapper
def gradient_vectors():
def gradient_vectors_inner(f, i):
from sympy import lambdify
from sympy.plotting.plot_interval import PlotInterval
from pyglet.gl import glBegin, glColor3f
from pyglet.gl import glVertex3f, glEnd, GL_LINES
def draw_gradient_vectors(f, iu, iv):
"""
Create a function which draws vectors
representing the gradient of f.
"""
dx, dy, dz = f.diff(x), f.diff(y), 0
FF = lambdify([x, y], [x, y, f])
FG = lambdify([x, y], [dx, dy, dz])
iu.v_steps /= 5
iv.v_steps /= 5
Gvl = [[[FF(u, v), FG(u, v)]
for v in iv.frange()]
for u in iu.frange()]
def draw_arrow(p1, p2):
"""
Draw a single vector.
"""
glColor3f(0.4, 0.4, 0.9)
glVertex3f(*p1)
glColor3f(0.9, 0.4, 0.4)
glVertex3f(*p2)
def draw():
"""
Iterate through the calculated
vectors and draw them.
"""
glBegin(GL_LINES)
for u in Gvl:
for v in u:
point = [[v[0][0], v[0][1], v[0][2]],
[v[0][0] + v[1][0], v[0][1] + v[1][1], v[0][2] + v[1][2]]]
draw_arrow(point[0], point[1])
glEnd()
return draw
p[i] = f, [-0.5, 0.5, 25], [-0.5, 0.5, 25], 'style=solid'
iu = PlotInterval(p[i].intervals[0])
iv = PlotInterval(p[i].intervals[1])
p[i].postdraw.append(draw_gradient_vectors(f, iu, iv))
gradient_vectors_inner(x**2 + y**2, 1)
gradient_vectors_inner(-x**2 - y**2, 2)
def help_str():
s = ("\nPlot p has been created. Useful commands: \n"
" help(p), p[1] = x**2, print(p), p.clear() \n\n"
"Available examples (see source in plotting.py):\n\n")
for i in range(len(examples)):
s += "(%i) %s\n" % (i, examples[i].__name__)
s += "\n"
s += "e.g. >>> example(2)\n"
s += " >>> ding_dong_surface()\n"
return s
def METHOD_NAME(i):
if callable(i):
p.clear()
i()
elif i >= 0 and i < len(examples):
p.clear()
examples[i]()
else:
print("Not a valid example.\n")
print(p)
METHOD_NAME(0) # 0 - 15 are defined above
print(help_str())
if __name__ == "__main__":
main()
| null |
5,339 |
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
class ControlNet(nn.Cell):
def inner_function_1(self, a, b):
return a + b
def METHOD_NAME(self, a, b):
return a - b
def construct(self, x):
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
if a + b > x:
return self.inner_function_1(a, b)
return self.METHOD_NAME(a, b)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_fallback_control_sink_tensor():
"""
Feature: Fallback feature: support define Tensor in Class construct.
Description: Fallback feature: support define Tensor in Class construct.
Expectation: Fallback feature: support define Tensor in Class construct.
"""
x = Tensor(np.array(1), mstype.int32)
net = ControlNet()
output = net(x)
output_expect = Tensor(9, mstype.int32)
assert output == output_expect
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_tensor_list():
"""
Feature: Fallback feature
Description: support Basic method of Tensor list.
Expectation: No exception.
"""
@jit
def np_tensor_list():
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
c = Tensor(np.array(6), mstype.int32)
tensor_list = [a, b]
for tensor in tensor_list:
print(tensor)
tensor_list.append(tensor_list[-1] + c)
return tensor_list
tensor_list = np_tensor_list()
print("tensor_list:", tensor_list)
assert len(tensor_list) == 3
@jit
def np_fallback_func_tensor_index(x):
array_x = tuple([2, 3, 4, 5])
np_x = np.array(array_x).astype(np.float32)
me_x = Tensor(np_x)
me_x = me_x + me_x
return me_x[x]
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_fallback_func_tensor_index():
"""
Feature: Fallback feature: support Tensor index.
Description: Fallback feature: support Tensor index.
Expectation: Fallback feature: support Tensor index.
"""
x = Tensor(1, mstype.int32)
output = np_fallback_func_tensor_index(x)
output_expect = Tensor(6, mstype.float32)
assert output == output_expect
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_fallback_tensor_compare_with_variable():
"""
Feature: Fallback feature
Description: Test ms.Tensor() in graph mode.
Expectation: No exception.
"""
@jit
def foo(x):
while x > Tensor([0]):
x = x - abs(Tensor([-1]))
return x
res = foo(Tensor([6]))
assert res == 0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_tensor_add():
"""
Feature: Fallback feature
Description: support Tensor add.
Expectation: No exception.
"""
@jit
def np_tensor_add():
a = Tensor(np.array(4))
b = Tensor(np.array(5))
tensor_list = [a, b]
for tensor in tensor_list:
print(tensor)
x = 6
np_x = np.array(x)
c = Tensor(np_x)
d = tensor_list[-1] + c
tensor_list.append(d)
return tensor_list
tensor_list = np_tensor_add()
print("tensor_list:", tensor_list)
assert tensor_list[-1] == 11
| null |
5,340 |
from time import perf_counter as clock
import numpy as np
import random
DSN = "dbname=test port = 5435"
# in order to always generate the same random sequence
random.seed(19)
def flatten(l):
"""Flattens list of tuples l."""
return [x[0] for x in l]
def fill_arrays(start, stop):
col_i = np.arange(start, stop, type=np.int32)
if userandom:
col_j = np.random.uniform(0, nrows, size=[stop - start])
else:
col_j = np.array(col_i, type=np.float64)
return col_i, col_j
# Generator for ensure pytables benchmark compatibility
def int_generator(nrows):
step = 1000 * 100
j = 0
for i in range(nrows):
if i >= step * j:
stop = (j + 1) * step
if stop > nrows: # Seems unnecessary
stop = nrows
col_i, col_j = fill_arrays(i, stop)
j += 1
k = 0
yield (col_i[k], col_j[k])
k += 1
def int_generator_slow(nrows):
for i in range(nrows):
if userandom:
yield (i, float(random.randint(0, nrows)))
else:
yield (i, float(i))
class Stream32:
"Object simulating a file for reading"
def __init__(self):
self.n = None
self.read_it = self.read_iter()
# No va! Hi ha que convertir a un de normal!
def METHOD_NAME(self, n=None):
for tup in int_generator(nrows):
sout = "%s\t%s\n" % tup
if n is not None and len(sout) > n:
for i in range(0, len(sout), n):
yield sout[i:i + n]
else:
yield sout
def read_iter(self):
sout = ""
n = self.n
for tup in int_generator(nrows):
sout += "%s\t%s\n" % tup
if n is not None and len(sout) > n:
for i in range(n, len(sout), n):
rout = sout[:n]
sout = sout[n:]
yield rout
yield sout
def read(self, n=None):
self.n = n
try:
str = next(self.read_it)
except StopIteration:
str = ""
return str
def open_db(filename, remove=0):
if not filename:
con = sqlite.connect(DSN)
else:
con = sqlite.connect(filename)
cur = con.cursor()
return con, cur
def create_db(filename, nrows):
con, cur = open_db(filename, remove=1)
try:
cur.execute("create table ints(i integer, j double precision)")
except:
con.rollback()
cur.execute("DROP TABLE ints")
cur.execute("create table ints(i integer, j double precision)")
con.commit()
con.set_isolation_level(2)
t1 = clock()
st = Stream32()
cur.copy_from(st, "ints")
# In case of postgres, the speeds of generator and loop are similar
#cur.executemany("insert into ints values (%s,%s)", int_generator(nrows))
# for i in xrange(nrows):
# cur.execute("insert into ints values (%s,%s)", (i, float(i)))
con.commit()
ctime = clock() - t1
if verbose:
print(f"insert time: {ctime:.5f}")
print(f"Krows/s: {nrows / 1000 / ctime:.5f}")
close_db(con, cur)
def index_db(filename):
con, cur = open_db(filename)
t1 = clock()
cur.execute("create index ij on ints(j)")
con.commit()
itime = clock() - t1
if verbose:
print(f"index time: {itime:.5f}")
print(f"Krows/s: {nrows / itime:.5f}")
# Close the DB
close_db(con, cur)
def query_db(filename, rng):
con, cur = open_db(filename)
t1 = clock()
ntimes = 10
for i in range(ntimes):
# between clause does not seem to take advantage of indexes
# cur.execute("select j from ints where j between %s and %s" % \
cur.execute("select i from ints where j >= %s and j <= %s" %
# cur.execute("select i from ints where i >= %s and i <=
# %s" %
(rng[0] + i, rng[1] + i))
results = cur.fetchall()
con.commit()
qtime = (clock() - t1) / ntimes
if verbose:
print(f"query time: {qtime:.5f}")
print(f"Mrows/s: {nrows / 1000 / qtime:.5f}")
results = sorted(flatten(results))
print(results)
close_db(con, cur)
def close_db(con, cur):
cur.close()
con.close()
if __name__ == "__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
usage = """usage: %s [-v] [-p] [-m] [-i] [-q] [-c] [-R range] [-n nrows] file
-v verbose
-p use "psyco" if available
-m use random values to fill the table
-q do query
-c create the database
-i index the table
-2 use sqlite2 (default is use sqlite3)
-R select a range in a field in the form "start,stop" (def "0,10")
-n sets the number of rows (in krows) in each table
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpmiqc2R:n:')
except:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
usepsyco = 0
userandom = 0
docreate = 0
createindex = 0
doquery = 0
sqlite_version = "3"
rng = [0, 10]
nrows = 1
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-p':
usepsyco = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-i':
createindex = 1
elif option[0] == '-q':
doquery = 1
elif option[0] == '-c':
docreate = 1
elif option[0] == "-2":
sqlite_version = "2"
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-n':
nrows = int(option[1])
# Catch the hdf5 file passed as the last argument
filename = pargs[0]
# if sqlite_version == "2":
# import sqlite
# else:
# from pysqlite2 import dbapi2 as sqlite
import psycopg2 as sqlite
if verbose:
# print "pysqlite version:", sqlite.version
if userandom:
print("using random values")
if docreate:
if verbose:
print("writing %s krows" % nrows)
if psyco_imported and usepsyco:
psyco.bind(create_db)
nrows *= 1000
create_db(filename, nrows)
if createindex:
index_db(filename)
if doquery:
query_db(filename, rng)
| null |
5,341 |
import contextlib
import pytest
import pytorch_pfn_extras as ppe
import torch
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="Moving across devices requires CUDA"
)
class TestPytorchRuntime:
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize(
"batch",
[{"x": torch.zeros(1)}, [torch.zeros(1)], torch.zeros(1), object()],
)
def test_convert_batch(self, device, batch):
rt = ppe.runtime.PyTorchRuntime(device, {})
cbatch = rt.convert_batch(batch)
if isinstance(cbatch, dict):
for _, v in cbatch.items():
assert v.device.type == device
elif isinstance(cbatch, (list, tuple)):
for v in cbatch:
assert v.device.type == device
elif isinstance(cbatch, torch.Tensor):
assert cbatch.device.type == device
else:
assert cbatch is batch
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_move_module(self, device):
rt = ppe.runtime.PyTorchRuntime(device, {})
module = torch.nn.Linear(1, 1)
module = rt.move_module(module)
assert module.weight.device.type == device
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_move_tensor(self, device):
rt = ppe.runtime.PyTorchRuntime(device, {})
tensor = torch.zeros(10)
tensor = rt.move_tensor(tensor)
assert tensor.device.type == device
class DummyRuntime(ppe.runtime.BaseRuntime):
def move_module(self, module):
return module
def move_tensor(self, tensor):
return tensor
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(10, 10)
self.layer2 = torch.nn.Linear(10, 10)
class SplitModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(10, 10)
self.layer2 = torch.nn.Linear(10, 10)
ppe.to(self.layer2, device="dummy", runtime_class=DummyRuntime)
def METHOD_NAME():
module = MyModule()
# This is a top module, so it won't show child ones
for _ in ppe.runtime._runtime.named_runtime_modules(module):
pytest.fail("Never reach")
def test_split_runtime_container():
module = SplitModule()
for name, mod in ppe.runtime._runtime.named_runtime_modules(module):
assert name == "layer2"
assert mod is module.layer2
def test_split_runtime_container_recursive():
class MultiLevelSplitModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = SplitModule()
self.layer2 = SplitModule()
module = MultiLevelSplitModule()
expected = [
("layer2", module.layer1.layer2),
("layer2", module.layer2.layer2),
]
for expected, (name, mod) in zip(
expected, ppe.runtime._runtime.named_runtime_modules(module)
):
assert name == expected[0]
assert mod is expected[1]
for _ in zip(
expected,
ppe.runtime._runtime.named_runtime_modules(module, recursive=False),
):
pytest.fail("Never reach")
def test_module_change_forward():
class Module1(torch.nn.Module):
def forward(self, input):
raise RuntimeError("The module forward should never be executed")
class Module2:
def __init__(self):
self.value = 5
def forward(self, input):
return torch.tensor(self.value)
class ForwardIntercepterRuntime(ppe.runtime.BaseRuntime):
def initialize_module(self, module, loader_or_batch):
self.new_module = Module2()
module.forward = self.new_module.forward
# TODO(ecastill): also reroute state_dict ?
def move_module(self, module):
self.initialize_module(module, None)
return module
module = Module1()
with pytest.raises(RuntimeError):
module(None)
ppe.to(module, device="dummy", runtime_class=ForwardIntercepterRuntime)
assert int(module(None)) == 5
def test_map():
class Module(torch.nn.Module):
def output(self, x):
return {"y": x * 2, "z": x + 1}
module = torch.nn.Sequential(Module())
data = [{"x": torch.ones(1)}, {"x": torch.ones(2)}]
ppe.to(module, device="cpu")
out = list(ppe.map(module[0].output, data))
assert len(out) == 2
assert set(out[0].keys()) == set(["y", "z"])
assert torch.allclose(out[0]["y"], torch.ones(1) * 2)
assert torch.allclose(out[0]["z"], torch.ones(1) + 1)
out = list(ppe.map(module[0].output, data, out_keys=set(["y"])))
assert set(out[0].keys()) == set(["y"])
def test_tracer():
called = 0
class TracerRuntime(ppe.runtime.BaseRuntime):
@classmethod
@contextlib.contextmanager
def trace(cls, event_name, arg):
nonlocal called
called = 1
yield
called = 2
assert called == 0
with ppe.runtime.BaseRuntime.trace("dummy", None):
assert called == 0
assert called == 0
with TracerRuntime.trace("dummy", None):
assert called == 1
assert called == 2
| null |
5,342 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.st.control.cases_register import case_register
import mindspore
from mindspore import context, nn, ops, Tensor, CSRTensor, Parameter, jit, mutable
from mindspore.ops import functional as F
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.print = ops.Print()
self.param_a = Parameter(Tensor(5, mindspore.int32), name='a')
self.param_b = Parameter(Tensor(4, mindspore.int32), name='b')
def construct(self, x):
out = 0
for _ in range(2):
out += self.func1(x)
return out
def func1(self, x):
out = x
i = 0
while i < 1:
out += self.func2(x)
i = i + 1
self.print(out)
return out
def func2(self, x):
if x > 10:
return self.param_a
return self.param_b
@case_register.level0
@case_register.target_gpu
def METHOD_NAME():
"""
Feature: Runtime.
Description: Duplicate side effects depend on stack actors..
Expectation: No exception.
"""
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(np.array([1]), mindspore.int32)
net = Net()
out = net(x)
result = 10
assert out == result
@jit
def switch_op(x, y):
z1 = y + 1
z2 = Tensor(5, mindspore.int32)
return F.switch(x, z1, z2)
@case_register.level0
@case_register.target_gpu
def test_switch_op():
"""
Feature: Runtime.
Description: Test switch op.
Expectation: No exception.
"""
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(False, mindspore.bool_)
y = Tensor(1, mindspore.int32)
out = switch_op(x, y)
assert out == 5
@jit
def switch_single_op(x, y, z):
return F.switch(x, y, z)
@case_register.level0
@case_register.target_gpu
def test_switch_single_op():
"""
Feature: Runtime.
Description: Test switch single op.
Expectation: No exception.
"""
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(False, mindspore.bool_)
y = Tensor(1, mindspore.int32)
z = Tensor(2, mindspore.int32)
out = switch_single_op(x, y, z)
assert out == 2
class TupleNet(nn.Cell):
def construct(self, x, y, z):
while ops.less(x, y):
z = ops.make_tuple(ops.add(F.tuple_getitem(z, 0), 3), ops.add(F.tuple_getitem(z, 1), 2))
x = ops.add(x, 1)
return z
@case_register.level0
@case_register.target_gpu
def test_tuple_parameter():
"""
Feature: Runtime.
Description: input a tuple parameter for root graph.
Expectation: No exception.
"""
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(np.array([2]), mindspore.int32)
y = Tensor(np.array([4]), mindspore.int32)
z1 = Tensor(np.array([8]), mindspore.int32)
z2 = Tensor(np.array([4]), mindspore.int32)
z = mutable((z1, z2))
net = TupleNet()
out = net(x, y, z)
assert out == (14, 8)
class CSRNet(nn.Cell):
def construct(self, x, y, z):
while x < y:
z = CSRTensor(z.indptr, z.indices, z.values + x, z.shape)
x = x + 1
return z
@case_register.level0
@case_register.target_gpu
def test_csr_parameter():
"""
Feature: Runtime.
Description: input a tuple parameter for root graph.
Expectation: No exception.
"""
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(2, mindspore.float32)
y = Tensor(4, mindspore.float32)
indptr = Tensor([0, 1, 2], mindspore.int32)
indices = Tensor([0, 1], mindspore.int32)
values = Tensor([1, 2], mindspore.float32)
shape = (2, 4)
z = CSRTensor(indptr, indices, values, shape)
net = CSRNet()
out = net(x, y, z)
assert np.all(out.values.asnumpy() == [6., 7.])
| null |
5,343 |
from .api.client import APIClient
from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
from .models.networks import NetworkCollection
from .models.METHOD_NAME import NodeCollection
from .models.plugins import PluginCollection
from .models.secrets import SecretCollection
from .models.services import ServiceCollection
from .models.swarm import Swarm
from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
class DockerClient:
"""
A client for communicating with a Docker server.
Example:
>>> import docker
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
"""
Return a client configured from environment variables.
The environment variables used are the same as those used by the
Docker command-line client. They are:
.. envvar:: DOCKER_HOST
The URL to the Docker host.
.. envvar:: DOCKER_TLS_VERIFY
Verify the host against a CA certificate.
.. envvar:: DOCKER_CERT_PATH
A path to a directory containing TLS certificates to use when
connecting to the Docker host.
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
timeout=timeout,
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
)
# Resources
@property
def configs(self):
"""
An object for managing configs on the server. See the
:doc:`configs documentation <configs>` for full details.
"""
return ConfigCollection(client=self)
@property
def containers(self):
"""
An object for managing containers on the server. See the
:doc:`containers documentation <containers>` for full details.
"""
return ContainerCollection(client=self)
@property
def images(self):
"""
An object for managing images on the server. See the
:doc:`images documentation <images>` for full details.
"""
return ImageCollection(client=self)
@property
def networks(self):
"""
An object for managing networks on the server. See the
:doc:`networks documentation <networks>` for full details.
"""
return NetworkCollection(client=self)
@property
def METHOD_NAME(self):
"""
An object for managing nodes on the server. See the
:doc:`nodes documentation <nodes>` for full details.
"""
return NodeCollection(client=self)
@property
def plugins(self):
"""
An object for managing plugins on the server. See the
:doc:`plugins documentation <plugins>` for full details.
"""
return PluginCollection(client=self)
@property
def secrets(self):
"""
An object for managing secrets on the server. See the
:doc:`secrets documentation <secrets>` for full details.
"""
return SecretCollection(client=self)
@property
def services(self):
"""
An object for managing services on the server. See the
:doc:`services documentation <services>` for full details.
"""
return ServiceCollection(client=self)
@property
def swarm(self):
"""
An object for managing a swarm on the server. See the
:doc:`swarm documentation <swarm>` for full details.
"""
return Swarm(client=self)
@property
def volumes(self):
"""
An object for managing volumes on the server. See the
:doc:`volumes documentation <volumes>` for full details.
"""
return VolumeCollection(client=self)
# Top-level methods
def events(self, *args, **kwargs):
return self.api.events(*args, **kwargs)
events.__doc__ = APIClient.events.__doc__
def df(self):
return self.api.df()
df.__doc__ = APIClient.df.__doc__
def info(self, *args, **kwargs):
return self.api.info(*args, **kwargs)
info.__doc__ = APIClient.info.__doc__
def login(self, *args, **kwargs):
return self.api.login(*args, **kwargs)
login.__doc__ = APIClient.login.__doc__
def ping(self, *args, **kwargs):
return self.api.ping(*args, **kwargs)
ping.__doc__ = APIClient.ping.__doc__
def version(self, *args, **kwargs):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
def close(self):
return self.api.close()
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
"object APIClient. See the low-level API section of the "
"documentation for more details.")
raise AttributeError(' '.join(s))
from_env = DockerClient.from_env
| null |
5,344 |
from unittest import TestCase
from contracting.db.driver import CacheDriver, Driver
class TestCacheDriver(TestCase):
def METHOD_NAME(self):
self.d = Driver()
self.d.flush()
self.c = CacheDriver(self.d)
def test_get_adds_to_read(self):
self.c.get('thing')
self.assertTrue('thing' in self.c.pending_reads)
def test_set_adds_to_cache_and_pending_writes(self):
self.c.set('thing', 1234)
self.assertEqual(self.c.pending_writes['thing'], 1234)
def test_object_in_cache_returns_from_cache(self):
self.d.set('thing', 8999)
self.c.get('thing')
self.assertEqual(self.c.get('thing'), 8999)
def test_commit_puts_all_objects_in_pending_writes_to_db(self):
self.c.set('thing1', 1234)
self.c.set('thing2', 1235)
self.c.set('thing3', 1236)
self.c.set('thing4', 1237)
self.c.set('thing5', 1238)
self.assertIsNone(self.d.get('thing1'))
self.assertIsNone(self.d.get('thing2'))
self.assertIsNone(self.d.get('thing3'))
self.assertIsNone(self.d.get('thing4'))
self.assertIsNone(self.d.get('thing5'))
self.c.commit()
self.assertEqual(self.d.get('thing1'), 1234)
self.assertEqual(self.d.get('thing2'), 1235)
self.assertEqual(self.d.get('thing3'), 1236)
self.assertEqual(self.d.get('thing4'), 1237)
self.assertEqual(self.d.get('thing5'), 1238)
def test_clear_pending_state_resets_all_variables(self):
self.c.set('thing1', 1234)
self.c.set('thing2', 1235)
self.c.get('something')
self.assertTrue(len(self.c.pending_reads) > 0)
self.assertTrue(len(self.c.pending_writes) > 0)
self.c.rollback()
self.assertFalse(len(self.c.pending_reads) > 0)
self.assertFalse(len(self.c.pending_writes) > 0)
def test_soft_apply_adds_changes_to_pending_deltas(self):
self.c.driver.set('thing1', 9999)
self.c.set('thing1', 8888)
self.c.soft_apply('0')
expected_deltas = {
'0': {
'writes': {'thing1': (9999, 8888)},
'reads': {'thing1': 9999}
}
}
self.assertDictEqual(self.c.pending_deltas, expected_deltas)
def test_soft_apply_applies_the_changes_to_the_driver_but_not_hard_driver(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
res = self.c.get('thing1')
self.assertEqual(res, 8888)
self.assertEqual(self.c.driver.get('thing1'), 9999)
def test_hard_apply_applies_hcl_if_exists(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.hard_apply('0')
res = self.c.get('thing1')
self.assertEqual(res, 8888)
self.assertEqual(self.c.driver.get('thing1'), 8888)
def test_rollback_applies_hcl_if_exists(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.rollback('0')
res = self.c.get('thing1')
self.assertEqual(res, 9999)
self.assertEqual(self.c.driver.get('thing1'), 9999)
def test_rollback_twice_returns(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.c.rollback('1')
res = self.c.get('thing1')
self.assertEqual(res, 8888)
self.assertEqual(self.c.driver.get('thing1'), 9999)
def test_rollback_removes_hlcs(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.c.rollback('1')
self.assertIsNone(self.c.pending_deltas.get('2'))
self.assertIsNone(self.c.pending_deltas.get('1'))
def test_hard_apply_only_applies_changes_up_to_delta(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.c.set('thing1', 5555)
self.c.soft_apply('3')
self.c.hard_apply('1')
res = self.c.get('thing1')
self.assertEqual(res, 5555)
self.assertEqual(self.c.driver.get('thing1'), 7777)
def test_hard_apply_removes_hcls(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.c.hard_apply('0')
hlcs = {'1':
{'writes': {'thing1': (8888, 7777)}, 'reads': {'thing1': 8888}},
'2':
{'writes': {'thing1': (7777, 6666)}, 'reads': {'thing1': 7777}}
}
self.assertDictEqual(self.c.pending_deltas, hlcs)
def test_rollback_returns_to_initial_state(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.assertEqual(self.c.get('thing1'), 8888)
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.assertEqual(self.c.get('thing1'), 7777)
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.assertEqual(self.c.get('thing1'), 6666)
self.c.rollback()
self.assertEqual(self.c.get('thing1'), 9999)
self.assertEqual(self.c.driver.get('thing1'), 9999)
def test_rollback_removes_hlcs(self):
self.c.set('thing1', 9999)
self.c.commit()
self.c.set('thing1', 8888)
self.c.soft_apply('0')
self.assertEqual(self.c.get('thing1'), 8888)
self.c.set('thing1', 7777)
self.c.soft_apply('1')
self.assertEqual(self.c.get('thing1'), 7777)
self.c.set('thing1', 6666)
self.c.soft_apply('2')
self.assertEqual(self.c.get('thing1'), 6666)
self.c.rollback()
self.assertDictEqual(self.c.pending_deltas, {})
def test_find_returns_none(self):
x = self.c.find('none')
self.assertIsNone(x)
def test_find_returns_driver(self):
self.c.driver.set('none', 123)
x = self.c.find('none')
self.assertEqual(x, 123)
def test_find_returns_cache(self):
self.c.driver.set('none', 123)
self.c.cache['none'] = 999
x = self.c.find('none')
self.assertEqual(x, 999)
def test_find_returns_pending_writes(self):
self.c.driver.set('none', 123)
self.c.cache['none'] = 999
self.c.pending_writes['none'] = 5555
x = self.c.find('none')
self.assertEqual(x, 5555)
| null |
5,345 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.components.base.annotations."""
from typing import Dict
import apache_beam as beam
import tensorflow as tf
from tfx.dsl.component.experimental import annotations
from tfx.types import artifact
from tfx.types import standard_artifacts
from tfx.types import value_artifact
class AnnotationsTest(tf.test.TestCase):
def testArtifactGenericAnnotation(self):
# Error: type hint whose parameter is not an Artifact subclass.
with self.assertRaisesRegex(ValueError,
'expects .* a concrete subclass of'):
_ = annotations._ArtifactGeneric[int] # pytype: disable=unsupported-operands
# Error: type hint with abstract Artifact subclass.
with self.assertRaisesRegex(ValueError,
'expects .* a concrete subclass of'):
_ = annotations._ArtifactGeneric[artifact.Artifact]
# Error: type hint with abstract Artifact subclass.
with self.assertRaisesRegex(ValueError,
'expects .* a concrete subclass of'):
_ = annotations._ArtifactGeneric[value_artifact.ValueArtifact]
# OK.
_ = annotations._ArtifactGeneric[standard_artifacts.Examples]
def METHOD_NAME(self):
_ = annotations.InputArtifact[standard_artifacts.Examples]
_ = annotations.OutputArtifact[standard_artifacts.Examples]
def testPrimitiveTypeGenericAnnotation(self):
# Error: type hint whose parameter is not a primitive type
# pytype: disable=unsupported-operands
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric[artifact.Artifact]
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric[object]
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric[123]
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric['string']
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric[Dict[int, int]]
with self.assertRaisesRegex(
ValueError, 'T to be `int`, `float`, `str`, `bool`'):
_ = annotations._PrimitiveTypeGeneric[bytes]
# pytype: enable=unsupported-operands
# OK.
_ = annotations._PrimitiveTypeGeneric[int]
_ = annotations._PrimitiveTypeGeneric[float]
_ = annotations._PrimitiveTypeGeneric[str]
_ = annotations._PrimitiveTypeGeneric[bool]
_ = annotations._PrimitiveTypeGeneric[Dict[str, float]]
_ = annotations._PrimitiveTypeGeneric[bool]
def testPipelineTypeGenericAnnotation(self):
# Error: type hint whose parameter is not a primitive type
with self.assertRaisesRegex(
ValueError, 'T to be `beam.Pipeline`'):
_ = annotations._PipelineTypeGeneric[artifact.Artifact]
with self.assertRaisesRegex(
ValueError, 'T to be `beam.Pipeline`'):
_ = annotations._PipelineTypeGeneric[object]
# pytype: disable=unsupported-operands
with self.assertRaisesRegex(
ValueError, 'T to be `beam.Pipeline`'):
_ = annotations._PipelineTypeGeneric[123]
with self.assertRaisesRegex(
ValueError, 'T to be `beam.Pipeline`'):
_ = annotations._PipelineTypeGeneric['string']
# pytype: enable=unsupported-operands
# OK.
_ = annotations._PipelineTypeGeneric[beam.Pipeline]
def testParameterUsage(self):
_ = annotations.Parameter[int]
_ = annotations.Parameter[float]
_ = annotations.Parameter[str]
_ = annotations.Parameter[bool]
if __name__ == '__main__':
tf.test.main()
| null |
5,346 |
"""
Copyright (c) 2016, Maarten Everts
All rights reserved.
This source code has been ported from https://github.com/privacybydesign/gabi
The authors of this file are not -in any way- affiliated with the original authors or organizations.
"""
from cryptography.hazmat.primitives.asymmetric.rsa import _modinv # type:ignore
from pyasn1.codec.ber.encoder import encode
from pyasn1.type import univ
from ...primitives.attestation import sha256_as_int
from ...primitives.value import FP2Value
class Record(univ.SequenceOf):
componentType = univ.Integer()
def METHOD_NAME(values):
return encode(values, asn1Spec=Record())
def hashCommit(values, issig):
tmp = [True] if issig else []
tmp = tmp + [len(values)] + values
r = METHOD_NAME(tmp)
if issig:
indx = r.find(b'\x02\x01\x01')
r = r[:indx] + b'\x01\x01\xFF' + r[indx + 3:]
return sha256_as_int(r)
def createChallenge(context, nonce, contributions, issig):
return hashCommit([context] + contributions + [nonce], issig)
class ProofU:
def __init__(self, U, C, VPrimeResponse, SResponse):
self.U = U
self.C = C
self.VPrimeResponse = VPrimeResponse
self.SResponse = SResponse
def MergeProofP(self, proofP, pk):
self.U = (self.U * proofP.P) % pk.N
self.SResponse = self.SResponse + proofP.SResponse
def Verify(self, pk, context, nonce):
return self.VerifyWithChallenge(pk, createChallenge(context, nonce, self.ChallengeContribution(pk), False))
def correctResponseSizes(self, pk):
maximum = (1 << (pk.Params.LvPrimeCommit + 1)) - 1
minimum = -maximum
return self.VPrimeResponse >= minimum and self.VPrimeResponse <= maximum
def VerifyWithChallenge(self, pk, reconstructedChallenge):
return self.correctResponseSizes(pk) and self.C == reconstructedChallenge
def reconstructUcommit(self, pk):
Uc = FP2Value(pk.N, self.U).intpow(-self.C)
Sv = FP2Value(pk.N, pk.S).intpow(self.VPrimeResponse)
R0s = FP2Value(pk.N, pk.R[0]).intpow(self.SResponse)
return (Uc * Sv * R0s).a
def SecretKeyResponse(self):
return self.SResponse
def Challenge(self):
return self.C
def ChallengeContribution(self, pk):
return [self.U, self.reconstructUcommit(pk)]
class ProofS:
def __init__(self, C, EResponse):
self.C = C
self.EResponse = EResponse
def Verify(self, pk, signature, context, nonce):
exponent = self.EResponse * signature.E + self.C
ACommit = FP2Value(pk.N, signature.A).intpow(exponent).a
Q = FP2Value(pk.N, signature.A).intpow(signature.E).a
cPrime = hashCommit([context, Q, signature.A, nonce, ACommit], False)
return self.C == cPrime
class ProofD:
def __init__(self, C, A, EResponse, VResponse, AResponses, ADisclosed):
self.C = C
self.A = A
self.EResponse = EResponse
self.VResponse = VResponse
self.AResponses = AResponses
self.ADisclosed = ADisclosed
def MergeProofP(self, proofP, pk):
self.AResponses[0] += proofP.SResponse
def correctResponseSizes(self, pk):
maximum = (1 << (pk.Params.LmCommit + 1)) - 1
minimum = -maximum
for aResponse in self.AResponses:
if aResponse < minimum or aResponse > maximum:
return False
maximum = (1 << (pk.Params.LeCommit + 1)) - 1
minimum = -maximum
if self.EResponse < minimum or self.EResponse > maximum:
return False
return True
def reconstructZ(self, pk):
numerator = 1 << (pk.Params.Le - 1)
numerator = FP2Value(pk.N, self.A).intpow(numerator).a
for i, exp in self.ADisclosed.items():
if exp.bit_length() > pk.Params.Lm:
exp = sha256_as_int(str(exp))
numerator *= FP2Value(pk.N, pk.R[i]).intpow(exp).a
numerator = numerator % pk.N
known = pk.Z * _modinv(numerator, pk.N)
knownC = FP2Value(pk.N, known).intpow(-self.C).a
Ae = FP2Value(pk.N, self.A).intpow(self.EResponse).a
Sv = FP2Value(pk.N, pk.S).intpow(self.VResponse).a
Rs = 1
for i, response in self.AResponses.items():
Rs *= FP2Value(pk.N, pk.R[i]).intpow(response).a
Z = (knownC * Ae * Rs * Sv) % pk.N
return Z
def Verify(self, pk, context, nonce1, issig):
return self.VerifyWithChallenge(pk, createChallenge(context, nonce1, self.ChallengeContribution(pk), issig))
def VerifyWithChallenge(self, pk, reconstructedChallenge):
return self.correctResponseSizes(pk) and self.C == reconstructedChallenge
def ChallengeContribution(self, pk):
return [self.A, self.reconstructZ(pk)]
def SecretKeyResponse(self):
return self.AResponses[0]
def Challenge(self):
return self.C
def Copy(self):
ADisclosed = {}
for k, v in self.ADisclosed.items():
ADisclosed[k] = v
return ProofD(self.C, self.A, self.EResponse, self.VResponse, self.AResponses, ADisclosed)
class ProofP:
def __init__(self, P, C, SResponse):
self.P = P
self.C = C
self.SResponse = SResponse
class ProofPCommitment:
def __init__(self, P, Pcommit):
self.P = P
self.Pcommit = Pcommit
| null |
5,347 |
import copy
import os
import pytest
from flask_migrate import upgrade
from sqlalchemy_utils import drop_database
from tests.test_utils import (
EagerScheduler,
EnvironmentBuild,
InteractiveRun,
InteractiveSession,
Job,
Pipeline,
Project,
)
import app.core.sessions
from _orchest.internals.test_utils import AbortableAsyncResultMock, CeleryMock, gen_uuid
from app import create_app
from app.apis import (
namespace_environment_builds,
namespace_environment_images,
namespace_jobs,
namespace_runs,
)
from app.connections import db
from config import CONFIG_CLASS
@pytest.fixture()
def METHOD_NAME(monkeypatch):
"""Mock celery and access added and revoked tasks."""
METHOD_NAME = CeleryMock()
for module in [namespace_environment_builds, namespace_runs, namespace_jobs]:
monkeypatch.setattr(module, "make_celery", lambda *args, **kwargs: METHOD_NAME)
return METHOD_NAME
@pytest.fixture()
def abortable_async_res(monkeypatch):
"""Mock an AbortableAsyncResult and access abort()."""
aresult = AbortableAsyncResultMock("uuid")
for module in [namespace_environment_builds, namespace_runs, namespace_jobs]:
monkeypatch.setattr(
module, "AbortableAsyncResult", lambda *args, **kwargs: aresult
)
return aresult
@pytest.fixture(autouse=True)
def monkeypatch_image_utils(monkeypatch):
monkeypatch.setattr(app.utils, "remove_if_dangling", lambda *args, **kwargs: None)
monkeypatch.setattr(
namespace_environment_images,
"docker_images_list_safe",
lambda *args, **kwargs: [],
)
monkeypatch.setattr(
namespace_environment_images,
"docker_images_rm_safe",
lambda *args, **kwargs: None,
)
monkeypatch.setattr(
namespace_jobs, "get_env_uuids_missing_image", lambda *args, **kwargs: []
)
@pytest.fixture(scope="module")
def test_app():
"""Setup a flask application with a working db.
Expects a postgres database service to be running. A new database
will be created with a random name. The database is dropped at the
end of scope of the fixture.
"""
config = copy.deepcopy(CONFIG_CLASS)
# Setup the DB URI.
db_host = os.environ.get("ORCHEST_TEST_DATABASE_HOST", "localhost")
db_port = os.environ.get("ORCHEST_TEST_DATABASE_PORT", "5432")
# Postgres does not accept "-" as part of a name.
db_name = gen_uuid(use_underscores=True)
db_name = "test_db"
SQLALCHEMY_DATABASE_URI = f"postgresql://postgres@{db_host}:{db_port}/{db_name}"
config.SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI
config.TESTING = True
# Migrate DB
app = create_app(config, to_migrate_db=True)
with app.app_context():
upgrade()
app = create_app(config, use_db=True, be_scheduler=False)
scheduler = EagerScheduler(
job_defaults={
# Same settings as the "real" scheduler.
"misfire_grace_time": 2 ** 31,
"coalesce": False,
"max_instances": 2 ** 31,
}
)
app.config["SCHEDULER"] = scheduler
scheduler.start()
yield app
drop_database(app.config["SQLALCHEMY_DATABASE_URI"])
@pytest.fixture()
def client(test_app):
"""Setup a flask test client.
Will delete all data in the db at the end of the scope of the
fixture.
"""
with test_app.test_client() as client:
yield client
# Remove all data, so that every test has access to a clean slate.
with test_app.app_context():
tables = db.engine.table_names()
tables = [t for t in tables if t != "alembic_version"]
tables = ",".join(tables)
# RESTART IDENTITY is to reset sequence generators.
cmd = f"TRUNCATE {tables} RESTART IDENTITY;"
db.engine.execute(cmd)
db.session.commit()
@pytest.fixture()
def project(client):
"""Provides a project backed by an entry in the db."""
return Project(client, gen_uuid())
@pytest.fixture()
def pipeline(client, project):
"""Provides a pipeline backed by an entry in the db."""
return Pipeline(client, project, gen_uuid())
@pytest.fixture()
def monkeypatch_interactive_session(monkeypatch):
monkeypatch.setattr(
app.core.sessions.InteractiveSession, "launch", lambda *args, **kwargs: None
)
monkeypatch.setattr(
app.core.sessions.InteractiveSession,
"get_containers_IP",
lambda *args, **kwargs: app.core.sessions.IP("ip1", "ip2"),
)
@pytest.fixture()
def interactive_session(client, pipeline, monkeypatch_interactive_session, monkeypatch):
"""Provides an interactive session backed by an entry in the db."""
return InteractiveSession(client, pipeline)
@pytest.fixture()
def interactive_run(client, pipeline, METHOD_NAME, monkeypatch):
"""Provides an interactive run backed by an entry in the db."""
monkeypatch.setattr(
namespace_runs, "lock_environment_images_for_run", lambda *args, **kwargs: {}
)
return InteractiveRun(client, pipeline)
@pytest.fixture()
def job(client, pipeline):
"""Provides a job backed by an entry in the db."""
return Job(client, pipeline)
@pytest.fixture()
def environment_build(client, METHOD_NAME, project):
"""Provides an env build backed by an entry in the db."""
return EnvironmentBuild(client, project)
@pytest.fixture(autouse=True)
def monkeypatch_lock_environment_images(monkeypatch):
monkeypatch.setattr(
namespace_runs, "lock_environment_images_for_run", lambda *args, **kwargs: {}
)
monkeypatch.setattr(
namespace_jobs, "lock_environment_images_for_job", lambda *args, **kwargs: {}
)
| null |
5,348 |
import pytest
import numpy as np
from mindspore import context, Tensor
from mindspore.nn import Cell
import mindspore.ops as ops
from mindspore.ops import kernel
#########################
# test cases for serial #
#########################
@kernel
def add_serial_1(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in serial(row):
for j in range(col):
out[i, j] = a[i, j] + b[0, j]
return out
@kernel
def add_serial_2(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in range(row):
for j in serial(col):
out[i, j] = a[i, j] + b[0, j]
return out
###########################
# test cases for parallel #
###########################
@kernel
def add_parallel_1(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in range(row):
for j in parallel(col):
out[i, j] = a[i, j] + b[0, j]
return out
@kernel
def add_parallel_2(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in parallel(row):
for j in range(col):
out[i, j] = a[i, j] + b[0, j]
return out
@kernel
def add_parallel_3(a, b):
l0 = b.shape[1]
l1 = a.shape[0]
l2 = a.shape[1]
out = output_tensor((l0, l1, l2), a.dtype)
for i in range(l0):
for j in parallel(l1):
for k in range(l2):
out[i, j, k] = a[j, k] + b[j, i]
return out
############################
# test cases for vectorize #
############################
@kernel
def add_vectorize_1(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for j in vectorize(col):
for i in range(row):
out[i, j] = a[i, j] + b[0, j]
return out
@kernel
def add_vectorize_2(a, b):
out = output_tensor(a.shape, a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in range(row):
for j in vectorize(col):
out[i, j] = a[i, j] + b[0, j]
return out
@kernel
def add_vectorize_3(a, b):
l0 = b.shape[1]
l1 = a.shape[0]
l2 = a.shape[1]
out = output_tensor((l0, l1, l2), a.dtype)
for i in vectorize(l0):
for j in range(l1):
for k in vectorize(l2):
out[i, j, k] = a[j, k] + b[j, i]
return out
#########################
# test cases for reduce #
#########################
@kernel
def add_reduce_1(a):
out = output_tensor((a.shape[0],), a.dtype)
row = a.shape[0]
col = a.shape[1]
for i in range(row):
out[i] = 0.0
for k in reduce(col):
out[i] = out[i] + a[i, k]
return out
class TestMsHybridDSLSingle(Cell):
"""Net for single input"""
def __init__(self, func, func_type):
super(TestMsHybridDSLSingle, self).__init__()
self.program = ops.Custom(func, func_type=func_type)
def construct(self, x):
return self.program(x)
class TestMsHybridDSLBin(Cell):
"""Net for binary inputs"""
def __init__(self, func, func_type):
super(TestMsHybridDSLBin, self).__init__()
self.program = ops.Custom(func, func_type=func_type)
def construct(self, x, y):
return self.program(x, y)
def METHOD_NAME(dtype, num, kernel_fn):
"""
test case Custom Op with functions written in Hybrid DSL with single input
"""
support_list = {"float16": np.float16, "float32": np.float32}
input1 = np.ones((num, num * 2)).astype(support_list.get(dtype))
test = TestMsHybridDSLSingle(kernel_fn, "hybrid")
output = test(Tensor(input1))
expect = kernel_fn(input1)
compare_res = np.allclose(expect, output.asnumpy(), 0.001, 0.001)
if not compare_res:
raise ValueError("Precision error, compare result: {}".format(compare_res))
def ms_kernel_bin_inputs_test(dtype, kernel_fn):
"""
test case Custom Op with functions written in Hybrid DSL with two inputs
"""
support_list = {"float16": np.float16, "float32": np.float32}
input1 = np.ones((1024, 32)).astype(support_list.get(dtype))
input2 = np.ones((1024, 64)).astype(support_list.get(dtype))
test = TestMsHybridDSLBin(kernel_fn, "hybrid")
output = test(Tensor(input1), Tensor(input2))
expect = kernel_fn(input1, input2)
compare_res = np.allclose(expect, output.asnumpy(), 0.001, 0.001)
if not compare_res:
raise ValueError("Precision error, compare result: {}".format(compare_res))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ms_kernel_ascend_scheduling_intrin():
"""
Feature: test case for Custom op with new scheduling intrin
Description: ascend test case, Python DSL with kernel decorator in GRAPH_MODE.
Expectation: the result match with numpy result
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_serial_1)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_serial_2)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_vectorize_1)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_vectorize_2)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_vectorize_3)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_parallel_1)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_parallel_2)
ms_kernel_bin_inputs_test(dtype="float32", kernel_fn=add_parallel_3)
METHOD_NAME(dtype="float32", num=1024, kernel_fn=add_reduce_1)
| null |
5,349 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore import Tensor, context
from mindspore.nn import TrainOneStepCell, Adam
from tests.ut.python.ops.test_math_ops import VirtualLoss
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
grad_all = C.GradOperation(get_all=True)
@pytest.fixture(name="test_context")
def _test_context():
context.set_context(mode=context.GRAPH_MODE)
yield
context.reset_auto_parallel_context()
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, z):
return grad_all(self.network)(x, y, z)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, z):
predict = self.network(x, y, z)
return self.loss(predict)
class Net(nn.Cell):
def __init__(self, shape, field_size=10, slice_mode=nn.EmbeddingLookup.BATCH_SLICE, target="Device",
operator='SUM'):
super().__init__()
self.embedding = nn.MultiFieldEmbeddingLookup(vocab_size=32, embedding_size=64, target=target,
field_size=field_size, slice_mode=slice_mode, operator=operator)
self.reshape = P.Reshape()
self.batch_size = shape[0]
def construct(self, x, y, z):
out = self.embedding(x, y, z)
out = self.reshape(out, (self.batch_size, -1))
return out
def compile_net(net, shape):
x = Tensor(np.ones(shape), dtype=ms.int32)
y = Tensor(np.ones(shape), dtype=ms.float32)
z = Tensor(np.ones(shape), dtype=ms.int32)
optimizer = Adam(net.trainable_params(), learning_rate=0.1)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_cell_graph_executor.compile(train_net, x, y, z)
context.reset_auto_parallel_context()
def test_embeddinglookup_batch_parallel_sum(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=10, target='DEVICE'))
compile_net(net, shape)
def test_embeddinglookup_row_parallel_sum(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=9, slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, target='DEVICE'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_sum(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=10, slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, target='DEVICE'))
compile_net(net, shape)
def test_embeddinglookup_batch_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, field_size=1, target='DEVICE', operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_row_parallel_mean(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, operator='MEAN'))
compile_net(net, shape)
def test_embeddinglookup_batch_parallel_max(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', operator='MAX'))
compile_net(net, shape)
def test_embeddinglookup_column_parallel_max(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_COLUMN_SLICE, operator='MAX'))
compile_net(net, shape)
def METHOD_NAME(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
shape = [64, 64]
net = NetWithLoss(Net(shape, target='DEVICE', slice_mode=nn.EmbeddingLookup.TABLE_ROW_SLICE, operator='MAX'))
compile_net(net, shape)
| null |
5,350 |
import unittest
from tinygrad.shape.shapetracker import ShapeTracker, View
from tinygrad.shape.symbolic import Variable
from tinygrad.tensor import Tensor
class TestSymbolic(unittest.TestCase):
def test_symbolic_st(self):
x = Variable("x", 1, 100)
st = ShapeTracker((x, 3))
assert st.shape == (x, 3)
assert st.real_strides() == (3, 1)
def test_expr_idxs(self):
x = Variable("x", 1, 100)
st = ShapeTracker((x, 3))
idxs = [Variable("x", 0, 100), Variable("y", 0, 100)]
e1, e2 = st.expr_idxs(idxs)
assert e1.render() == "((x*3)+y)"
assert e2.render() == "1"
st.permute((1, 0))
e1, e2 = st.expr_idxs(idxs)
assert e1.render() == "((y*3)+x)"
assert e2.render() == "1"
def test_cat_strides(self):
i = Variable("i", 1, 5)
j = Variable("j", 1, 5)
k = Variable("k", 1, 5)
t = Tensor.rand(3, 4).reshape(i, 4).cat(Tensor.rand(3, 4).reshape(j, 4), dim=0).cat(Tensor.rand(3, 4).reshape(k, 4), dim=0)
st = t.lazydata.st
assert st.shape == (i+j+k, 4)
assert st.real_strides() == (4, 1)
t = Tensor.rand(3, 4).reshape(3, i).cat(Tensor.rand(3, 4).reshape(3, j), dim=1).cat(Tensor.rand(3, 4).reshape(3, k), dim=1)
st = t.lazydata.st
assert st.shape == (3, i+j+k)
assert st.real_strides() == (i+j+k, 1)
t = Tensor.rand(i, 3).reshape(i, 3).cat(Tensor.rand(3, 3).reshape(i, 3), dim=0).cat(Tensor.rand(3, 3), dim=0)
st = t.lazydata.st
assert st.shape == (2*i+3, 3)
assert st.real_strides() == (3, 1)
class TestSymbolicReshape(unittest.TestCase):
def test_reshape_into_symbols_simple(self):
vi = Variable("i", 1, 5)
for i in range(1, 6):
t = Tensor.rand(i, 4).reshape(vi, 4)
assert t.shape == (vi, 4)
assert t.lazydata.var_vals[vi] == i
t = Tensor.rand(i, 6).reshape(vi, 2, 3)
assert t.shape == (vi, 2, 3)
assert t.lazydata.var_vals[vi] == i
def test_reshape_symbols_reshape_ints(self):
vi = Variable("i", 1, 5)
for i in range(1, 6):
t = Tensor.rand(i, 4).reshape(vi, 4)
assert t.shape == (vi, 4)
assert t.lazydata.var_vals == {vi: i}
t = t.reshape(i, 4)
assert t.shape == (i, 4)
assert t.lazydata.var_vals == {vi: i}
def test_reshape_reuse_var_same_value_ok(self):
vi = Variable("i", 1, 5)
for i in range(1, 6):
a = Tensor.rand(i, 4).reshape(vi, 4)
b = Tensor.rand(i, 3).reshape(vi, 3)
assert a.lazydata.var_vals[vi] == i
assert b.lazydata.var_vals[vi] == i
def test_reshape_reuse_var_different_value_ok(self):
vi = Variable("i", 1, 10)
for i in range(1, 6):
a = Tensor.rand(i, 4).reshape(vi, 2)
b = Tensor.rand(i, 3).reshape(vi, 3)
# a and b have different values of vi
assert a.lazydata.var_vals[vi] == 2 * i
assert b.lazydata.var_vals[vi] == i
def test_reshape_into_symbols_bad_shape(self):
vi = Variable("i", 1, 10)
vj = Variable("j", 1, 10)
with self.assertRaises(AssertionError):
t = Tensor.rand(3, 4).reshape(vi, vj) # reshape into two variables
with self.assertRaises(AssertionError):
t = Tensor.rand(4, 4).reshape(vi, vi) # reshape into same variable in 2 dimensions
with self.assertRaises(AssertionError):
t = Tensor.rand(4, 6).reshape(vi, 6).reshape(vi, 4) # conflicted implied variable values
with self.assertRaises(AssertionError):
t = Tensor.rand(4, 6).reshape(vi, 6).reshape(1, 77) # reshape to a different size new shape through symbolic shape
with self.assertRaises(AssertionError):
t = Tensor.rand(100, 4).reshape(Variable("too_small", 1, 10), 4)
with self.assertRaises(AssertionError):
t = Tensor.rand(3, 4).reshape(Variable("too_big", 100, 200), 4)
with self.assertRaises(AssertionError):
t = Tensor.rand(3, 4).reshape(3, (vi+1)) # reshape into non-Variable Node
def METHOD_NAME(self):
vi = Variable("i", 1, 5)
vj = Variable("j", 1, 5)
for i in range(1, 6):
for j in range(1, 6):
t1 = Tensor.rand(i, 5).reshape(vi, 5)
t2 = Tensor.rand(5, j).reshape(5, vj)
t = t1@t2
assert t.shape == (vi, vj)
t = t.reshape(1, vi*vj)
assert t.shape == (1, vi*vj)
t = t.reshape(vj, vi)
assert t.shape == (vj, vi)
class TestSymbolicExpand(unittest.TestCase):
def test_expand_into_symbols(self):
vi = Variable("i", 1, 5)
vj = Variable("j", 1, 5)
a = Tensor([[1], [2], [3]]).expand((3, vi))
assert a.shape == (3, vi)
assert a.lazydata.var_vals == {}
a = a.reshape(3, vi, 1).expand((3, vi, vj))
assert a.shape == (3, vi, vj)
assert a.lazydata.var_vals == {}
def test_plus_expands_constant(self):
vi = Variable("i", 1, 5)
for i in range(1, 6):
a = Tensor.rand(3, i).reshape(3, vi)
a = a + 1
assert a.shape == (3, vi)
class TestSymbolicShrink(unittest.TestCase):
def test_shrink_symbols(self):
vi = Variable("i", 1, 5)
t = Tensor.rand(3, 5).shrink(((0, 2), (vi, vi+1)))
assert t.shape == (2, 1)
class TestSymbolicShapeExpr(unittest.TestCase):
def test_symbolic_expr_idxs(self):
# taken from symbolic shape llama
i = Variable("i", 1, 120)
gidx0 = Variable("gidx0", 0, i)
lidx1 = Variable("lidx1", 0, 7)
idx = (gidx0, lidx1, Variable.num(1))
shape = (i+1, 8, 4)
strides = (1, (i*4)+4, i+1)
view = View.create(shape, strides)
st = ShapeTracker(shape, [view])
idx, valid = st.expr_idxs(idx)
assert idx.render() == "((lidx1*((i*4)+4))+1+gidx0+i)"
class TestShapeTrackerVarVals(unittest.TestCase):
def test_reshape_reshape_updates_var_vals(self):
vi = Variable("i", 1, 5)
vj = Variable("j", 1, 5)
t = Tensor.rand(3, 4).reshape(3, vi).reshape(4, vj)
assert t.lazydata.var_vals == {vi: 4, vj: 3}
def test_lazy_check_var_vals(self):
vi = Variable("i", 1, 5)
a = Tensor.rand(3, 4).reshape(3, vi)
b = Tensor.rand(5, 6).reshape(vi, 6)
assert a.lazydata.var_vals == {vi: 4}
assert b.lazydata.var_vals == {vi: 5}
c = a@b
# shapetracker works with symbolic shape and doesn't check the underlying variable values
assert c.shape == (3, 6)
assert c.lazydata.var_vals == {vi: 4}
if __name__ == '__main__':
unittest.main(
| null |
5,351 |
# mypy: ignore-errors
from pytorch_pfn_extras.dataset.tabular import tabular_dataset
class _Concat(tabular_dataset.TabularDataset):
def __init__(self, *datasets):
for dataset in datasets[1:]:
if not dataset.METHOD_NAME == datasets[0].METHOD_NAME:
raise ValueError("All datasets must have the same keys")
self._datasets = datasets
def __len__(self):
return sum(len(dataset) for dataset in self._datasets)
@property
def METHOD_NAME(self):
return self._datasets[0].METHOD_NAME
@property
def mode(self):
return self._datasets[0].mode
def get_examples(self, indices, key_indices):
if key_indices is None:
n_cols = len(self.METHOD_NAME)
else:
n_cols = len(key_indices)
if indices is None:
examples = [
dataset.get_examples(None, key_indices)
for dataset in self._datasets
]
return tuple(
[
data
for sub_examples in examples
for data in sub_examples[col_index]
]
for col_index in range(n_cols)
)
elif isinstance(indices, slice):
start, stop, step = indices.indices(len(self))
examples = []
offset = 0
for dataset in self._datasets:
sub_start = start - offset
sub_stop = stop - offset
if step > 0:
if sub_start < 0:
sub_start %= step
sub_stop = min(sub_stop, len(dataset))
else:
if sub_start >= len(dataset):
sub_start = (
len(dataset) + (sub_start - len(dataset)) % step
)
sub_stop = max(sub_stop, -1)
if len(range(sub_start, sub_stop, step)) > 0:
if sub_start < 0 and step > 0:
sub_start = None
if sub_stop < 0 and step < 0:
sub_stop = None
examples.append(
dataset.get_examples(
slice(sub_start, sub_stop, step), key_indices
)
)
offset += len(dataset)
if len(examples) == 0:
return tuple([] for _ in range(n_cols))
elif len(examples) == 1:
return examples[0]
else:
if step < 0:
examples.reverse()
return tuple(
[
data
for sub_examples in examples
for data in sub_examples[col_index]
]
for col_index in range(n_cols)
)
else:
examples = {}
example_indices = [None] * len(indices)
offset = 0
for dataset_index, dataset in enumerate(self._datasets):
sub_indices = []
for p, index in enumerate(indices):
if index < offset or offset + len(dataset) <= index:
continue
sub_indices.append(index - offset)
example_indices[p] = (dataset_index, len(sub_indices) - 1)
if len(sub_indices) > 0:
examples[dataset_index] = dataset.get_examples(
sub_indices, key_indices
)
offset += len(dataset)
if len(examples) == 0:
return tuple([] for _ in range(n_cols))
elif len(examples) == 1:
return list(examples.values())[0]
else:
return tuple(
[
examples[dataset_index][col_index][p]
for dataset_index, p in example_indices
]
for col_index in range(n_cols)
)
def convert(self, data):
return self._datasets[0].convert(data)
| null |
5,352 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import * # PythonAlgorithm, registerAlgorithm, WorkspaceProperty
from mantid.simpleapi import *
from mantid.kernel import *
# pylint: disable=no-init, too-many-arguments
class ApplyNegMuCorrection(PythonAlgorithm):
# Combining work spaces and normalising the correction.
# _OILE and _OIHE are unused parameters for now to make PyLint happy
def combine(self, dataDir, runno, A2000, B2000, A3000, B3000, _OILE, _OIHE, spec):
if spec < 10:
specNo = "0" + str(spec)
else:
specNo = str(spec)
print(dataDir + "ral0" + runno + ".rooth30" + specNo + ".dat")
# loading data
rooth30_filename = dataDir + r"\ral0" + runno + ".rooth30" + specNo + ".dat"
rooth20_filename = dataDir + r"\ral0" + runno + ".rooth20" + specNo + ".dat"
try:
ws3000 = Load(Filename=rooth30_filename, OutputWorkspace="ws3000")
except RuntimeError:
raise RuntimeError("could not find file: " + rooth30_filename)
try:
ws2000 = Load(Filename=rooth20_filename, OutputWorkspace="ws2000")
except RuntimeError:
raise RuntimeError("could not find file: " + rooth20_filename)
# Correcting for Gain and offset of the detectors
ws2000_corr = CreateWorkspace(A2000 * ws2000.readX(0)[:] + B2000, ws2000.readY(0)[:])
ws3000_corr = CreateWorkspace(A3000 * ws3000.readX(0)[:] + B3000, ws3000.readY(0)[:])
# Summing total counts for normalisation
ws2000_total = 0
ws3000_total = 0
for count in range(0, 8000):
ws2000_total = ws2000_corr.readY(0)[count] + ws2000_total
ws3000_total = ws3000_corr.readY(0)[count] + ws3000_total
print(ws2000_total)
print(ws3000_total)
# normalising
ws2000_corr = ws2000_corr / ws2000_total
ws3000_corr = ws3000_corr / ws3000_total
# rebinning to adataDir detectors together
data = [100, ws2000.readX(0)[2] - ws2000.readX(0)[1], 8000]
ws2000_corr_rebin = Rebin(ws2000_corr, data)
ws3000_corr_rebin = Rebin(ws3000_corr, data)
ws_ral = Plus(ws2000_corr_rebin, ws3000_corr_rebin)
suf = "_" + str(spec) + "_" + runno
RenameWorkspaces(ws_ral, Suffix=suf)
RenameWorkspaces(ws2000_corr, Suffix=suf)
RenameWorkspaces(ws3000_corr, Suffix=suf)
DeleteWorkspace(ws2000)
DeleteWorkspace(ws3000)
DeleteWorkspace(ws2000_corr_rebin)
DeleteWorkspace(ws3000_corr_rebin)
def PyInit(self):
self.declareProperty(
FileProperty(name="DataDirectory", defaultValue=r"", action=FileAction.OptionalDirectory), doc="Data directory"
)
self.declareProperty(name="FirstRunNumber", defaultValue=1718, doc="First Run Number")
self.declareProperty(name="LastRunNumber", defaultValue=1719, doc="Last Run Number")
self.declareProperty(name="GainRIKENHighE", defaultValue=1.077, doc="Gain RIKEN High E")
self.declareProperty(name="OffsetRIKENHighE", defaultValue=-1, doc="OffSet RIKEN High E")
self.declareProperty(name="GainISISHighE", defaultValue=1.278, doc="Gain ISIS High E")
self.declareProperty(name="OffsetISISHighE", defaultValue=-12, doc="OffSet ISIS High E")
self.declareProperty(name="GainISISLowE", defaultValue=1.2, doc="Gain ISIS Low E")
self.declareProperty(name="OffsetISISLowE", defaultValue=0.0, doc="OffSet ISIS Low E")
def METHOD_NAME(self):
return "CorrectionFunctions\\SpecialCorrections;Muon"
def PyExec(self):
spec = 1
dataDir = self.getProperty("DataDirectory").value
first = self.getProperty("FirstRunNumber").value
last = self.getProperty("LastRunNumber").value + 1
GRHE = self.getProperty("GainRIKENHighE").value
ORHE = self.getProperty("OffsetRIKENHighE").value
GIHE = self.getProperty("GainISISHighE").value
OIHE = self.getProperty("OffsetISISHighE").value
GILE = self.getProperty("GainISISLowE").value
OILE = self.getProperty("OffsetISISLowE").value
for run in range(first, last):
for spec in range(0, 3):
runno = str(run)
self.combine(dataDir, runno, GRHE, ORHE, GIHE, OIHE, GILE, OILE, spec)
self.combine(dataDir, runno, GRHE, ORHE, GIHE, OIHE, GILE, OILE, 10)
AlgorithmFactory.subscribe(ApplyNegMuCorrection)
| null |
5,353 |
import json
import time
from _orchest.internals.test_utils import gen_uuid
def mocked_abortable_async_result(abort):
class MockAbortableAsyncResult:
def __init__(self, task_uuid) -> None:
pass
def is_aborted(self):
return abort
return MockAbortableAsyncResult
def mocked_docker_client(_NOT_TO_BE_LOGGED, build_events):
class MockDockerClient:
def __init__(self):
# A way to mock this kind of properties:
# docker_client.images.get(build_context["base_image"])
self.images = self
self.api = self
@staticmethod
def from_env():
return MockDockerClient()
# Will be used as docker_client.images.get(...).
def get(self, *args, **kwargs):
pass
# Will be used as docker_client.api.build(...).
def build(self, path, tag, *args, **kwargs):
# The env build process should only log events/data between
# the flags.
events = (
[_NOT_TO_BE_LOGGED]
+ ["_ORCHEST_RESERVED_FLAG_"]
+ build_events
+ ["_ORCHEST_RESERVED_FLAG_"]
+ [_NOT_TO_BE_LOGGED]
)
data = []
for event in events:
if event is None:
event = {"error": "error"}
else:
event = {"stream": event + "\n"}
data.append(json.dumps(event))
# This way tasks can be aborted, otherwise it might be done
# building an image before the parent process has the chance
# to check if it has been aborted.
time.sleep(0.5)
return iter(data)
return MockDockerClient
def mocked_socketio_class(socketio_data):
class MockSocketIOClient:
def __init__(self, *args, **kwargs) -> None:
self.on_connect = None
def connect(self, *args, **kwargs):
socketio_data["has_connected"] = True
self.on_connect()
def sleep(self, *args, **kwargs):
time.sleep(args[0])
def disconnect(self, *args, **kwargs):
socketio_data["has_disconnected"] = True
def emit(self, name, data, *args, **kwargs):
if "output" in data:
socketio_data["output_logs"].append(data["output"])
# disconnect is passed as a callback
if "callback" in kwargs:
kwargs["callback"]()
def on(self, event, *args, **kwargs):
if event == "connect":
def set_handler(handler):
self.on_connect = handler
return handler
return set_handler
return MockSocketIOClient
class MockRequestReponse:
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def json(self):
pass
def create_env_build_request(project_uuid, n):
request = {"environment_build_requests": []}
for _ in range(n):
request["environment_build_requests"].append(
{
"project_uuid": project_uuid,
"project_path": "project_path",
"environment_uuid": gen_uuid(),
}
)
return request
def create_pipeline_run_spec(project_uuid, METHOD_NAME, n_steps=1):
steps = {}
for i in range(n_steps):
step = {
"incoming_connections": [],
"name": f"step-{i}",
"uuid": f"uuid-{i}",
"file_path": "",
"environment": "my-env",
}
steps[f"uuid-{i}"] = step
return {
"pipeline_definition": {
"name": "pipeline-name",
"project_uuid": project_uuid,
"uuid": METHOD_NAME,
"settings": {},
"parameters": {},
"steps": steps,
},
"uuids": [],
"project_uuid": project_uuid,
"run_type": "full",
"run_config": {},
}
def create_job_spec(
project_uuid,
METHOD_NAME,
cron_schedule=None,
scheduled_start=None,
parameters=[{}],
pipeline_run_spec=None,
max_retained_pipeline_runs=-1,
):
if pipeline_run_spec is None:
pipeline_run_spec = create_pipeline_run_spec(project_uuid, METHOD_NAME)
job_spec = {
"uuid": gen_uuid(),
"name": "job-name",
"project_uuid": project_uuid,
"pipeline_uuid": METHOD_NAME,
"pipeline_name": "pipeline-name",
"cron_schedule": cron_schedule,
"parameters": parameters,
"pipeline_definition": pipeline_run_spec["pipeline_definition"],
"pipeline_run_spec": pipeline_run_spec,
"scheduled_start": scheduled_start,
"strategy_json": {},
"max_retained_pipeline_runs": max_retained_pipeline_runs,
}
return job_spec
class Project:
def __init__(self, client, uuid, env_variables=None):
self.uuid = uuid
project = {"uuid": self.uuid, "env_variables": env_variables}
if env_variables is None:
project["env_variables"] = {}
client.post("/api/projects/", json=project)
class Pipeline:
def __init__(self, client, proj, uuid, env_variables=None):
self.project = proj
self.uuid = uuid
pipeline = {
"project_uuid": proj.uuid,
"uuid": self.uuid,
"env_variables": env_variables,
}
if env_variables is None:
pipeline["env_variables"] = {}
client.post("/api/pipelines/", json=pipeline)
class InteractiveSession:
def __init__(self, client, pipeline):
self.project = pipeline.project
self.pipeline = pipeline
session_request_spec = {
"project_uuid": pipeline.project.uuid,
"pipeline_uuid": pipeline.uuid,
"pipeline_path": "pip_path",
"project_dir": "project_dir",
"userdir": "userdir_pvc",
}
client.post("/api/sessions/", json=session_request_spec)
@property
def project_uuid(self):
return self.project.uuid
@property
def METHOD_NAME(self):
return self.pipeline.uuid
class InteractiveRun:
def __init__(self, client, pipeline):
self.project = pipeline.project
self.pipeline = pipeline
spec = create_pipeline_run_spec(self.project.uuid, self.pipeline.uuid)
self.uuid = client.post("/api/runs/", json=spec).get_json()["uuid"]
class Job:
def __init__(self, client, pipeline):
self.project = pipeline.project
self.pipeline = pipeline
job_spec = create_job_spec(self.project.uuid, self.pipeline.uuid)
self.uuid = client.post("/api/jobs/", json=job_spec).get_json()["uuid"]
class EnvironmentBuild:
def __init__(self, client, project):
self.project = project
req = create_env_build_request(project.uuid, 1)
self.environment_uuid = req["environment_build_requests"][0]["environment_uuid"]
data = client.post("/api/environment-builds/", json=req).get_json()
self.uuid = data["environment_builds"][0]["uuid"]
class EagerScheduler:
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def add_job(self, func, args=None, kwargs=None, *myargs, **mykwargs):
args = () if args is None else args
kwargs = {} if kwargs is None else kwargs
func(*args, **kwargs)
| null |
5,354 |
import copy
import logging
import pytest
import kopf
from kopf._cogs.structs.bodies import Body, RawBody, RawEvent, RawMeta
from kopf._cogs.structs.ephemera import Memo
from kopf._cogs.structs.patches import Patch
from kopf._core.actions.execution import cause_var
from kopf._core.actions.invocation import context
from kopf._core.engines.indexing import OperatorIndexers
from kopf._core.intents.causes import ChangingCause, Reason, WatchingCause
OWNER_API_VERSION = 'owner-api-version'
OWNER_NAMESPACE = 'owner-namespace'
OWNER_KIND = 'OwnerKind'
OWNER_NAME = 'owner-name'
OWNER_UID = 'owner-uid'
OWNER_LABELS = {'label-1': 'value-1', 'label-2': 'value-2'}
OWNER = RawBody(
apiVersion=OWNER_API_VERSION,
kind=OWNER_KIND,
metadata=RawMeta(
namespace=OWNER_NAMESPACE,
name=OWNER_NAME,
uid=OWNER_UID,
labels=OWNER_LABELS,
),
)
@pytest.fixture(params=['state-changing-cause', 'event-watching-cause'])
def owner(request, resource):
body = Body(copy.deepcopy(OWNER))
if request.param == 'state-changing-cause':
cause = ChangingCause(
logger=logging.getLogger('kopf.test.fake.logger'),
indices=OperatorIndexers().indices,
resource=resource,
patch=Patch(),
memo=Memo(),
body=body,
initial=False,
reason=Reason.NOOP,
)
with context([(cause_var, cause)]):
yield body
elif request.param == 'event-watching-cause':
cause = WatchingCause(
logger=logging.getLogger('kopf.test.fake.logger'),
indices=OperatorIndexers().indices,
resource=resource,
patch=Patch(),
memo=Memo(),
body=body,
type='irrelevant',
event=RawEvent(type='irrelevant', object=OWNER),
)
with context([(cause_var, cause)]):
yield body
else:
raise RuntimeError(f"Wrong param for `owner` fixture: {request.param!r}")
def test_when_unset_for_owner_references_appending():
with pytest.raises(LookupError) as e:
kopf.append_owner_reference([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_unset_for_owner_references_removal():
with pytest.raises(LookupError) as e:
kopf.remove_owner_reference([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_unset_for_name_harmonization():
with pytest.raises(LookupError) as e:
kopf.harmonize_naming([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_unset_for_namespace_adjustment():
with pytest.raises(LookupError) as e:
kopf.adjust_namespace([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_unset_for_labelling():
with pytest.raises(LookupError) as e:
kopf.label([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_unset_for_adopting():
with pytest.raises(LookupError) as e:
kopf.adopt([])
assert 'Owner must be set explicitly' in str(e.value)
def test_when_empty_for_name_harmonization(owner):
owner._replace_with({})
with pytest.raises(LookupError) as e:
kopf.harmonize_naming([])
assert 'Name must be set explicitly' in str(e.value)
def test_when_empty_for_namespace_adjustment(owner):
# An absent namespace means a cluster-scoped resource -- a valid case.
obj = {}
owner._replace_with({})
kopf.adjust_namespace(obj)
assert obj['metadata']['namespace'] is None
def test_when_empty_for_adopting(owner):
owner._replace_with({})
with pytest.raises(LookupError):
kopf.adopt([])
# any error message: the order of functions is not specific.
def test_when_set_for_name_harmonization(owner):
obj = {}
kopf.harmonize_naming(obj)
assert obj['metadata']['generateName'].startswith(OWNER_NAME)
def test_when_set_for_namespace_adjustment(owner):
obj = {}
kopf.adjust_namespace(obj)
assert obj['metadata']['namespace'] == OWNER_NAMESPACE
def METHOD_NAME(owner):
obj = {}
kopf.append_owner_reference(obj)
assert obj['metadata']['ownerReferences']
assert obj['metadata']['ownerReferences'][0]['uid'] == OWNER_UID
def test_when_set_for_owner_references_removal(owner):
obj = {}
kopf.append_owner_reference(obj) # assumed to work, tested above
kopf.remove_owner_reference(obj) # this one is being tested here
assert not obj['metadata']['ownerReferences']
def test_when_set_for_labelling(owner):
obj = {}
kopf.label(obj)
assert obj['metadata']['labels'] == {'label-1': 'value-1', 'label-2': 'value-2'}
| null |
5,355 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqtinterfaces.Muon.GUI.Common.contexts.corrections_context import CorrectionsContext
from mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.background_corrections_model import BackgroundCorrectionData
from mantidqtinterfaces.Muon.GUI.Common.muon_load_data import MuonLoadData
class CorrectionsContextTest(unittest.TestCase):
def setUp(self):
self.muon_data = MuonLoadData()
self.corrections_context = CorrectionsContext(self.muon_data)
def test_that_the_context_has_been_instantiated_with_the_expected_context_data(self):
self.assertEqual(self.corrections_context.current_run_string, None)
self.assertEqual(self.corrections_context.dead_time_source, "FromFile")
self.assertEqual(self.corrections_context.dead_time_table_name_from_ads, None)
self.assertEqual(self.corrections_context.background_corrections_mode, "None")
self.assertEqual(self.corrections_context.selected_function, "Flat Background + Exp Decay")
self.assertEqual(self.corrections_context.selected_group, "All")
self.assertEqual(self.corrections_context.show_all_runs, False)
def test_that_the_current_run_string_can_be_set_as_expected(self):
run_string = "62260"
self.corrections_context.current_run_string = run_string
self.assertEqual(self.corrections_context.current_run_string, run_string)
def test_that_the_dead_time_source_can_be_set_as_expected(self):
source = "FromADS"
self.corrections_context.dead_time_source = source
self.assertEqual(self.corrections_context.dead_time_source, source)
def test_that_the_dead_time_table_name_from_ads_can_be_set_as_expected(self):
table_name = "MUSR62260 dead time table"
self.corrections_context.dead_time_table_name_from_ads = table_name
self.assertEqual(self.corrections_context.dead_time_table_name_from_ads, table_name)
def test_that_the_dead_time_table_name_from_file_can_be_set_as_expected(self):
table_name = "MUSR62260 dead time table"
self.corrections_context.dead_time_table_name_from_file = table_name
self.assertEqual(self.corrections_context.dead_time_table_name_from_file, table_name)
def test_that_current_dead_time_table_name_returns_the_expected_table_when_the_source_is_from_file(self):
table_from_file = "MUSR62260 dead time table"
table_from_ads = "MUSR62265 dead time table"
self.corrections_context.get_default_dead_time_table_name_for_run = mock.Mock(return_value=table_from_file)
self.corrections_context.dead_time_source = "FromFile"
self.corrections_context.dead_time_table_name_from_ads = table_from_ads
self.assertEqual(self.corrections_context.current_dead_time_table_name_for_run("MUSR", [62260]), table_from_file)
def METHOD_NAME(self):
table_from_ads = "MUSR62265 dead time table"
self.corrections_context.dead_time_source = "FromADS"
self.corrections_context.dead_time_table_name_from_ads = table_from_ads
self.assertEqual(self.corrections_context.current_dead_time_table_name_for_run("MUSR", [62265]), table_from_ads)
def test_that_current_dead_time_table_name_returns_none_when_the_source_is_from_none(self):
table_from_ads = "MUSR62265 dead time table"
self.corrections_context.dead_time_source = None
self.corrections_context.dead_time_table_name_from_ads = table_from_ads
self.assertEqual(self.corrections_context.current_dead_time_table_name_for_run("MUSR", [62265]), None)
def test_that_the_background_corrections_mode_can_be_set_as_expected(self):
background_corrections_mode = "Auto"
self.corrections_context.background_corrections_mode = background_corrections_mode
self.assertEqual(self.corrections_context.background_corrections_mode, background_corrections_mode)
def test_that_the_selected_function_can_be_set_as_expected(self):
selected_function = "Flat Background + Exp Decay"
self.corrections_context.selected_function = selected_function
self.assertEqual(self.corrections_context.selected_function, selected_function)
def test_that_the_selected_group_can_be_set_as_expected(self):
selected_group = "fwd"
self.corrections_context.selected_group = selected_group
self.assertEqual(self.corrections_context.selected_group, selected_group)
def test_that_show_all_runs_can_be_set_as_expected(self):
show_all_runs = True
self.corrections_context.show_all_runs = show_all_runs
self.assertEqual(self.corrections_context.show_all_runs, show_all_runs)
def test_that_the_background_correction_data_can_be_set_as_expected(self):
run_group = tuple(["84447", "fwd"])
start_x, end_x = 15.0, 30.0
self.corrections_context.background_correction_data[run_group] = BackgroundCorrectionData(True, 5, start_x, end_x)
self.assertTrue(run_group in self.corrections_context.background_correction_data)
self.assertEqual(self.corrections_context.background_correction_data[run_group].use_raw, True)
self.assertEqual(self.corrections_context.background_correction_data[run_group].rebin_fixed_step, 5)
self.assertEqual(self.corrections_context.background_correction_data[run_group].start_x, start_x)
self.assertEqual(self.corrections_context.background_correction_data[run_group].end_x, end_x)
if __name__ == "__main__":
unittest.main(buffer=False, verbosity=2)
| null |
5,356 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.functional import vmap
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class CdistGradTEST(nn.Cell):
def __init__(self, p):
super(CdistGradTEST, self).__init__()
self.cdist_grad = G.CdistGrad(p)
def construct(self, grad, x1, x2, dist):
return self.cdist_grad(grad, x1, x2, dist)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_CdistGradP0_float32():
"""
Feature: Cdist cpu kernel
Description: test the cdist p = 0.0.
Expectation: the output[0] is same as numpy
"""
cdist_grad = CdistGradTEST(3.)
grad = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x1 = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x2 = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
dist = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
output = cdist_grad(grad, x1, x2, dist)
expect = np.array(
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]).astype(np.float32)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: cdist vmap.
Description: test the rightness of cdist vmap feature.
Expectation: Success.
"""
def cal_cdist_grad(grad, x1, x2, dist):
return G.CdistGrad(3.0)(grad, x1, x2, dist)
grad = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x1 = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x2 = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
dist = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
expect = np.array(
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]).astype(np.float32)
vmap_ceil = vmap(cal_cdist_grad, in_axes=(0), out_axes=0)
output = vmap_ceil(grad, x1, x2, dist)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vmap2():
"""
Feature: cdist vmap.
Description: test the rightness of cdist vmap feature.
Expectation: Success.
"""
def cal_cdist_grad(grad, x1, x2, dist):
return G.CdistGrad(3.0)(grad, x1, x2, dist)
grad = Tensor(np.array([[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]],
[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]]).astype(np.float32))
x1 = Tensor(np.array([[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]],
[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]]).astype(np.float32))
x2 = Tensor(np.array([[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]],
[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]]).astype(np.float32))
dist = Tensor(np.array([[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]],
[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]]).astype(np.float32))
expect = np.array(
[[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]],
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]]).astype(np.float32)
vmap_ceil = vmap(vmap(cal_cdist_grad, in_axes=(
0), out_axes=0), in_axes=(0), out_axes=0)
output = vmap_ceil(grad, x1, x2, dist)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
| null |
5,357 |
# Copyright (c) 2022 Tulir Asokan
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import annotations
from mautrix.api import Method, Path
from mautrix.errors import MatrixResponseError
from mautrix.types import (
JSON,
EventID,
PresenceEventContent,
PresenceState,
RoomID,
SerializerError,
UserID,
)
from ..base import BaseClientAPI
class MiscModuleMethods(BaseClientAPI):
"""
Miscellaneous subsections in the `Modules section`_ of the API spec.
Currently included subsections:
* 13.4 `Typing Notifications`_
* 13.5 `Receipts`_
* 13.6 `Fully Read Markers`_
* 13.7 `Presence`_
.. _Modules section: https://matrix.org/docs/spec/client_server/r0.4.0.html#modules
.. _Typing Notifications: https://matrix.org/docs/spec/client_server/r0.4.0.html#id95
.. _Receipts: https://matrix.org/docs/spec/client_server/r0.4.0.html#id99
.. _Fully Read Markers: https://matrix.org/docs/spec/client_server/r0.4.0.html#fully-read-markers
.. _Presence: https://matrix.org/docs/spec/client_server/r0.4.0.html#id107
"""
# region 13.4 Typing Notifications
async def set_typing(self, room_id: RoomID, timeout: int = 0) -> None:
"""
This tells the server that the user is typing for the next N milliseconds where N is the
value specified in the timeout key. If the timeout is equal to or less than zero, it tells
the server that the user has stopped typing.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#put-matrix-client-r0-rooms-roomid-typing-userid>`__
Args:
room_id: The ID of the room in which the user is typing.
timeout: The length of time in milliseconds to mark this user as typing.
"""
if timeout > 0:
content = {"typing": True, "timeout": timeout}
else:
content = {"typing": False}
await self.api.request(Method.PUT, Path.v3.rooms[room_id].typing[self.mxid], content)
# endregion
# region 13.5 Receipts
async def send_receipt(
self,
room_id: RoomID,
event_id: EventID,
receipt_type: str = "m.read",
) -> None:
"""
Update the marker for the given receipt type to the event ID specified.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#post-matrix-client-r0-rooms-roomid-receipt-receipttype-eventid>`__
Args:
room_id: The ID of the room which to send the receipt to.
event_id: The last event ID to acknowledge.
receipt_type: The type of receipt to send. Currently only ``m.read`` is supported.
"""
await self.api.request(Method.POST, Path.v3.rooms[room_id].receipt[receipt_type][event_id])
# endregion
# region 13.6 Fully read markers
async def METHOD_NAME(
self,
room_id: RoomID,
fully_read: EventID,
read_receipt: EventID | None = None,
extra_content: dict[str, JSON] | None = None,
) -> None:
"""
Set the position of the read marker for the given room, and optionally send a new read
receipt.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#post-matrix-client-r0-rooms-roomid-read-markers>`__
Args:
room_id: The ID of the room which to set the read marker in.
fully_read: The last event up to which the user has either read all events or is not
interested in reading the events.
read_receipt: The new position for the user's normal read receipt, i.e. the last event
the user has seen.
extra_content: Additional fields to include in the ``/read_markers`` request.
"""
content = {
"m.fully_read": fully_read,
}
if read_receipt:
content["m.read"] = read_receipt
if extra_content:
content.update(extra_content)
await self.api.request(Method.POST, Path.v3.rooms[room_id].read_markers, content)
# endregion
# region 13.7 Presence
async def set_presence(
self, presence: PresenceState = PresenceState.ONLINE, status: str | None = None
) -> None:
"""
Set the current user's presence state. When setting the status, the activity time is updated
to reflect that activity; the client does not need to specify
:attr:`Presence.last_active_ago`.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#post-matrix-client-r0-presence-list-userid>`__
Args:
presence: The new presence state to set.
status: The status message to attach to this state.
"""
content = {
"presence": presence.value,
}
if status:
content["status_msg"] = status
await self.api.request(Method.PUT, Path.v3.presence[self.mxid].status, content)
async def get_presence(self, user_id: UserID) -> PresenceEventContent:
"""
Get the presence info of a user.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#get-matrix-client-r0-presence-list-userid>`__
Args:
user_id: The ID of the user whose presence info to get.
Returns:
The presence info of the given user.
"""
content = await self.api.request(Method.GET, Path.v3.presence[user_id].status)
try:
return PresenceEventContent.deserialize(content)
except SerializerError:
raise MatrixResponseError("Invalid presence in response")
# endregion
| null |
5,358 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test distribute predict """
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, Model
from mindspore.ops import operations as P
from mindspore import context
from mindspore.parallel._utils import _infer_rank_list
class Net(nn.Cell):
"""Net definition"""
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Dense(128, 768, activation='relu')
self.fc2 = nn.Dense(128, 768, activation='relu')
self.fc3 = nn.Dense(128, 768, activation='relu')
self.fc4 = nn.Dense(768, 768, activation='relu')
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def construct(self, x):
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
def test_distribute_predict():
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, full_batch=True,
enable_parallel_optimizer=True)
inputs = Tensor(np.ones([32, 128]).astype(np.float32))
net = Net()
model = Model(net)
predict_map = model.infer_predict_layout(inputs)
output = model.predict(inputs)
context.reset_auto_parallel_context()
return predict_map, output
def test_edge_case():
context.set_context(mode=context.GRAPH_MODE)
inputs = Tensor(np.ones([32, 48]).astype(np.float32))
net = Net()
model = Model(net)
with pytest.raises(RuntimeError):
model.infer_predict_layout(inputs)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
with pytest.raises(ValueError):
model.infer_predict_layout(inputs)
# standalone predict
def METHOD_NAME():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = None
rank_list = _infer_rank_list(train_map, predict_map)["weight"]
assert list(rank_list[0]) == [0, 1, 2, 3, 4, 5, 6, 7]
assert rank_list[1] is False
# similar layout: gpt3 prediction mode
def test_infer_rank_list2():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[8], [-1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], True)}
assert rank_list == expect_map
# same layout
def test_infer_rank_list3():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[4, 8], [-1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], True)}
assert rank_list == expect_map
# totally different layout
def test_infer_rank_list4():
train_map = {'weight': [[4, 8], [-1, 0]]}
predict_map = {'weight': [[2, 2], [1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)["weight"]
assert list(rank_list[0]) == [0, 1, 2, 3, 4, 5, 6, 7]
assert rank_list[1] is False
# full shape ckpt
def test_infer_rank_list5():
train_map = {'weight': [[8], [-1, -1]]}
predict_map = {'weight': [[2, 2], [1, 0]]}
rank_list = _infer_rank_list(train_map, predict_map)
expect_map = {'weight': ([0], False)}
assert rank_list == expect_map
| null |
5,359 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import AnalysisDataService, WorkspaceGroup
from mantid.simpleapi import CompareWorkspaces, IndirectQuickRun, LoadNexus
import systemtesting
import unittest
def exists_in_ads(workspace_name):
return AnalysisDataService.doesExist(workspace_name)
def get_ads_workspace(workspace_name):
return AnalysisDataService.retrieve(workspace_name) if exists_in_ads(workspace_name) else None
class IndirectQuickRunTest(unittest.TestCase):
def setUp(self):
self._run_numbers = "92762-92766"
self._instrument = "OSIRIS"
self._analyser = "graphite"
self._reflection = "002"
self._spectra_range = "963,980"
self._elastic_range = "-0.02,0.02"
self._inelastic_range = "0.4,0.5"
self._total_range = "-0.5,0.5"
def tearDown(self):
AnalysisDataService.clear()
def test_that_IndirectQuickRun_produces_output_workspaces_with_the_correct_names(self):
self._execute_IndirectQuickRun()
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_eisf"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_q"))
def METHOD_NAME(self):
self._execute_IndirectQuickRun()
scan_group = get_ads_workspace("osiris92762_to_osiris92766_scan_q")
self.assertTrue(isinstance(scan_group, WorkspaceGroup))
self.assertEqual(scan_group.getNumberOfEntries(), 12)
def test_that_IndirectQuickRun_produces_the_correct_eisf_workspace(self):
self._execute_IndirectQuickRun()
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_eisf")
def test_that_IndirectQuickRun_produces_the_correct_workspaces_when_doing_an_MSDFit(self):
self._execute_IndirectQuickRun(msd_fit=True)
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd_Parameters"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd_fit"))
msd_fit_group = get_ads_workspace("osiris92762_to_osiris92766_scan_msd_fit")
self.assertTrue(isinstance(msd_fit_group, WorkspaceGroup))
self.assertEqual(msd_fit_group.getNumberOfEntries(), 5)
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_msd")
def test_that_IndirectQuickRun_produces_the_correct_workspaces_when_doing_a_WidthFit(self):
self._execute_IndirectQuickRun(width_fit=True)
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Width1"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Diffusion"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Width_Fit"))
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_red_Width1", "osiris92762_to_osiris92766_scan_red_Width")
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_red_Diffusion")
width_fit_group = get_ads_workspace("osiris92762_to_osiris92766_scan_red_Width_Fit")
self.assertTrue(isinstance(width_fit_group, WorkspaceGroup))
self.assertEqual(width_fit_group.getNumberOfEntries(), 12)
def _execute_IndirectQuickRun(self, msd_fit=False, width_fit=False):
IndirectQuickRun(
InputFiles=self._run_numbers,
Instrument=self._instrument,
Analyser=self._analyser,
Reflection=self._reflection,
SpectraRange=self._spectra_range,
ElasticRange=self._elastic_range,
InelasticRange=self._inelastic_range,
TotalRange=self._total_range,
MSDFit=msd_fit,
WidthFit=width_fit,
)
def _assert_equal_to_reference_file(self, output_name, reference_name=None):
reference_name = output_name if reference_name is None else reference_name
expected_workspace = LoadNexus(Filename="IndirectQuickRun_" + reference_name + ".nxs")
self.assertTrue(
CompareWorkspaces(
Workspace1=get_ads_workspace(output_name), Workspace2=expected_workspace, Tolerance=5.0, ToleranceRelErr=True
)[0]
)
class IndirectQuickRunTestRunner(systemtesting.MantidSystemTest):
def __init__(self):
systemtesting.MantidSystemTest.__init__(self)
self._success = False
def runTest(self):
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IndirectQuickRunTest, "test"))
runner = unittest.TextTestRunner()
res = runner.run(suite)
if res.wasSuccessful():
self._success = True
def requiredMemoryMB(self):
return 2000
def validate(self):
return self._success
| null |
5,360 |
from .api.client import APIClient
from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.METHOD_NAME import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
from .models.networks import NetworkCollection
from .models.nodes import NodeCollection
from .models.plugins import PluginCollection
from .models.secrets import SecretCollection
from .models.services import ServiceCollection
from .models.swarm import Swarm
from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
class DockerClient:
"""
A client for communicating with a Docker server.
Example:
>>> import docker
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
"""
Return a client configured from environment variables.
The environment variables used are the same as those used by the
Docker command-line client. They are:
.. envvar:: DOCKER_HOST
The URL to the Docker host.
.. envvar:: DOCKER_TLS_VERIFY
Verify the host against a CA certificate.
.. envvar:: DOCKER_CERT_PATH
A path to a directory containing TLS certificates to use when
connecting to the Docker host.
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
timeout=timeout,
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
)
# Resources
@property
def METHOD_NAME(self):
"""
An object for managing configs on the server. See the
:doc:`configs documentation <configs>` for full details.
"""
return ConfigCollection(client=self)
@property
def containers(self):
"""
An object for managing containers on the server. See the
:doc:`containers documentation <containers>` for full details.
"""
return ContainerCollection(client=self)
@property
def images(self):
"""
An object for managing images on the server. See the
:doc:`images documentation <images>` for full details.
"""
return ImageCollection(client=self)
@property
def networks(self):
"""
An object for managing networks on the server. See the
:doc:`networks documentation <networks>` for full details.
"""
return NetworkCollection(client=self)
@property
def nodes(self):
"""
An object for managing nodes on the server. See the
:doc:`nodes documentation <nodes>` for full details.
"""
return NodeCollection(client=self)
@property
def plugins(self):
"""
An object for managing plugins on the server. See the
:doc:`plugins documentation <plugins>` for full details.
"""
return PluginCollection(client=self)
@property
def secrets(self):
"""
An object for managing secrets on the server. See the
:doc:`secrets documentation <secrets>` for full details.
"""
return SecretCollection(client=self)
@property
def services(self):
"""
An object for managing services on the server. See the
:doc:`services documentation <services>` for full details.
"""
return ServiceCollection(client=self)
@property
def swarm(self):
"""
An object for managing a swarm on the server. See the
:doc:`swarm documentation <swarm>` for full details.
"""
return Swarm(client=self)
@property
def volumes(self):
"""
An object for managing volumes on the server. See the
:doc:`volumes documentation <volumes>` for full details.
"""
return VolumeCollection(client=self)
# Top-level methods
def events(self, *args, **kwargs):
return self.api.events(*args, **kwargs)
events.__doc__ = APIClient.events.__doc__
def df(self):
return self.api.df()
df.__doc__ = APIClient.df.__doc__
def info(self, *args, **kwargs):
return self.api.info(*args, **kwargs)
info.__doc__ = APIClient.info.__doc__
def login(self, *args, **kwargs):
return self.api.login(*args, **kwargs)
login.__doc__ = APIClient.login.__doc__
def ping(self, *args, **kwargs):
return self.api.ping(*args, **kwargs)
ping.__doc__ = APIClient.ping.__doc__
def version(self, *args, **kwargs):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
def close(self):
return self.api.close()
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
"object APIClient. See the low-level API section of the "
"documentation for more details.")
raise AttributeError(' '.join(s))
from_env = DockerClient.from_env
| null |
5,361 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implement the Base class for Driver and Connection"""
from abc import ABCMeta, abstractmethod, abstractproperty
from .registry import DriverRegistry
class BaseDriver(metaclass=DriverRegistry):
"""
class BaseDriver():
This is a base class for different server types.
Inherit this class to implement different type of database driver
implementation.
(For PostgreSQL/EDB Postgres Advanced Server, we will be using psycopg)
Abstract Properties:
-------- ----------
* Version (string):
Current version string for the database server
* libpq_version (string):
Current version string for the used libpq library
Abstract Methods:
-------- -------
* get_connection(*args, **kwargs)
- It should return a Connection class object, which may/may not be
connected to the database server.
* release_connection(*args, **kwargs)
- Implement the connection release logic
* gc()
- Implement this function to release the connections assigned in the
session, which has not been pinged from more than the idle timeout
configuration.
"""
@property
@abstractmethod
def version(cls):
pass
@property
@abstractmethod
def libpq_version(cls):
pass
@abstractmethod
def get_connection(self, *args, **kwargs):
pass
@abstractmethod
def release_connection(self, *args, **kwargs):
pass
@abstractmethod
def gc_timeout(self):
pass
class BaseConnection(metaclass=ABCMeta):
"""
class BaseConnection()
It is a base class for database connection. A different connection
drive must implement this to expose abstract methods for this server.
General idea is to create a wrapper around the actual driver
implementation. It will be instantiated by the driver factory
basically. And, they should not be instantiated directly.
Abstract Methods:
-------- -------
* connect(**kwargs)
- Define this method to connect the server using that particular driver
implementation.
* execute_scalar(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns single
datum result.
* execute_async(query, params, formatted_exception_msg)
- Implement this method to execute the given query asynchronously and
returns result.
* execute_void(query, params, formatted_exception_msg)
- Implement this method to execute the given query with no result.
* execute_2darray(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns the result
as a 2 dimensional array.
* execute_dict(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns the result
as an array of dict (column name -> value) format.
* def async_fetchmany_2darray(records=-1, formatted_exception_msg=False):
- Implement this method to retrieve result of asynchronous connection and
polling with no_result flag set to True.
This returns the result as a 2 dimensional array.
If records is -1 then fetchmany will behave as fetchall.
* connected()
- Implement this method to get the status of the connection. It should
return True for connected, otherwise False
* reset()
- Implement this method to reconnect the database server (if possible)
* transaction_status()
- Implement this method to get the transaction status for this
connection. Range of return values different for each driver type.
* ping()
- Implement this method to ping the server. There are times, a connection
has been lost, but - the connection driver does not know about it. This
can be helpful to figure out the actual reason for query failure.
* _release()
- Implement this method to release the connection object. This should not
be directly called using the connection object itself.
NOTE: Please use BaseDriver.release_connection(...) for releasing the
connection object for better memory management, and connection pool
management.
* _wait(conn)
- Implement this method to wait for asynchronous connection to finish the
execution, hence - it must be a blocking call.
* _wait_timeout(conn, time)
- Implement this method to wait for asynchronous connection with timeout.
This must be a non blocking call.
* poll(formatted_exception_msg, no_result)
- Implement this method to poll the data of query running on asynchronous
connection.
* cancel_transaction(conn_id, did=None)
- Implement this method to cancel the running transaction.
* messages()
- Implement this method to return the list of the messages/notices from
the database server.
* rows_affected()
- Implement this method to get the rows affected by the last command
executed on the server.
"""
ASYNC_OK = 1
ASYNC_READ_TIMEOUT = 2
ASYNC_WRITE_TIMEOUT = 3
ASYNC_NOT_CONNECTED = 4
ASYNC_EXECUTION_ABORTED = 5
ASYNC_TIMEOUT = 0.2
ASYNC_WAIT_TIMEOUT = 2
ASYNC_NOTICE_MAXLENGTH = 100000
@abstractmethod
def connect(self, **kwargs):
pass
@abstractmethod
def execute_scalar(self, query, params=None,
formatted_exception_msg=False):
pass
@abstractmethod
def execute_async(self, query, params=None,
formatted_exception_msg=True):
pass
@abstractmethod
def execute_void(self, query, params=None,
formatted_exception_msg=False):
pass
@abstractmethod
def execute_2darray(self, query, params=None,
formatted_exception_msg=False):
pass
@abstractmethod
def execute_dict(self, query, params=None,
formatted_exception_msg=False):
pass
@abstractmethod
def async_fetchmany_2darray(self, records=-1,
formatted_exception_msg=False):
pass
@abstractmethod
def connected(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def METHOD_NAME(self):
pass
@abstractmethod
def ping(self):
pass
@abstractmethod
def _release(self):
pass
@abstractmethod
def _wait(self, conn):
pass
@abstractmethod
def _wait_timeout(self, conn, time):
pass
@abstractmethod
def poll(self, formatted_exception_msg=True, no_result=False):
pass
@abstractmethod
def status_message(self):
pass
@abstractmethod
def rows_affected(self):
pass
@abstractmethod
def cancel_transaction(self, conn_id, did=None):
pass
| null |
5,362 |
# This file is part of cloud-init. See LICENSE file for license information.
import os
import shutil
import tempfile
from copy import copy
from unittest import mock
import yaml
from cloudinit import url_helper
from cloudinit.sources import DataSourceMAAS
from tests.unittests.helpers import CiTestCase, populate_dir
class TestMAASDataSource(CiTestCase):
def setUp(self):
super(TestMAASDataSource, self).setUp()
# Make a temp directoy for tests to use.
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such."""
userdata = b"valid01-userdata"
data = {
"meta-data/instance-id": "i-valid01",
"meta-data/local-hostname": "valid01-hostname",
"user-data": userdata,
"public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
}
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
for key in ("instance-id", "local-hostname"):
self.assertEqual(data["meta-data/" + key], md[key])
# verify that 'userdata' is not returned as part of the metadata
self.assertFalse(("user-data" in md))
self.assertIsNone(vd)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect seed_dir validity."""
userdata = b"valid-extra-userdata"
data = {
"meta-data/instance-id": "i-valid-extra",
"meta-data/local-hostname": "valid-extra-hostname",
"user-data": userdata,
"foo": "bar",
}
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
for key in ("instance-id", "local-hostname"):
self.assertEqual(data["meta-data/" + key], md[key])
# additional files should not just appear as keys in metadata atm
self.assertFalse(("foo" in md))
def test_seed_dir_invalid(self):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
valid = {
"instance-id": "i-instanceid",
"local-hostname": "test-hostname",
"user-data": "",
}
my_based = os.path.join(self.tmp, "valid_extra")
# missing 'userdata' file
my_d = "%s-01" % my_based
invalid_data = copy(valid)
del invalid_data["local-hostname"]
populate_dir(my_d, invalid_data)
self.assertRaises(
DataSourceMAAS.MAASSeedDirMalformed,
DataSourceMAAS.read_maas_seed_dir,
my_d,
)
# missing 'instance-id'
my_d = "%s-02" % my_based
invalid_data = copy(valid)
del invalid_data["instance-id"]
populate_dir(my_d, invalid_data)
self.assertRaises(
DataSourceMAAS.MAASSeedDirMalformed,
DataSourceMAAS.read_maas_seed_dir,
my_d,
)
def test_seed_dir_none(self):
"""Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
self.assertRaises(
DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir,
my_d,
)
def METHOD_NAME(self):
"""Verify that missing seed_dir raises MAASSeedDirNone."""
self.assertRaises(
DataSourceMAAS.MAASSeedDirNone,
DataSourceMAAS.read_maas_seed_dir,
os.path.join(self.tmp, "nonexistantdirectory"),
)
def mock_read_maas_seed_url(self, data, seed, version="19991231"):
"""mock up readurl to appear as a web server at seed has provided data.
return what read_maas_seed_url returns."""
def my_readurl(*args, **kwargs):
if len(args):
url = args[0]
else:
url = kwargs["url"]
prefix = "%s/%s/" % (seed, version)
if not url.startswith(prefix):
raise ValueError("unexpected call %s" % url)
short = url[len(prefix) :]
if short not in data:
raise url_helper.UrlError("not found", code=404, url=url)
return url_helper.StringResponse(data[short])
# Now do the actual call of the code under test.
with mock.patch("cloudinit.url_helper.readurl") as mock_readurl:
mock_readurl.side_effect = my_readurl
return DataSourceMAAS.read_maas_seed_url(seed, version=version)
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
valid = {
"meta-data/instance-id": "i-instanceid",
"meta-data/local-hostname": "test-hostname",
"meta-data/public-keys": "test-hostname",
"meta-data/vendor-data": b"my-vendordata",
"user-data": b"foodata",
}
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(
valid["meta-data/local-hostname"], md["local-hostname"]
)
self.assertEqual(valid["meta-data/public-keys"], md["public-keys"])
self.assertEqual(valid["user-data"], ud)
# vendor-data is yaml, which decodes a string
self.assertEqual(valid["meta-data/vendor-data"].decode(), vd)
def test_seed_url_vendor_data_dict(self):
expected_vd = {"key1": "value1"}
valid = {
"meta-data/instance-id": "i-instanceid",
"meta-data/local-hostname": "test-hostname",
"meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(),
}
_ud, md, vd = self.mock_read_maas_seed_url(
valid, "http://example.com/foo"
)
self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(expected_vd, vd)
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
base_cfg = {
"consumer_key": "FAKE_CONSUMER_KEY",
"token_key": "FAKE_TOKEN_KEY",
"token_secret": "FAKE_TOKEN_SECRET",
"consumer_secret": None,
}
def test_all_required(self, m_helper):
"""Valid config as expected."""
DataSourceMAAS.get_oauth_helper(self.base_cfg.copy())
m_helper.assert_has_calls([mock.call(**self.base_cfg)])
def test_other_fields_not_passed_through(self, m_helper):
"""Only relevant fields are passed through."""
mycfg = self.base_cfg.copy()
mycfg["unrelated_field"] = "unrelated"
DataSourceMAAS.get_oauth_helper(mycfg)
m_helper.assert_has_calls([mock.call(**self.base_cfg)])
class TestGetIdHash(CiTestCase):
v1_cfg = {
"consumer_key": "CKEY",
"token_key": "TKEY",
"token_secret": "TSEC",
}
v1_id = (
"v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392"
)
def test_v1_expected(self):
"""Test v1 id generated as expected working behavior from config."""
result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy())
self.assertEqual(self.v1_id, result)
def test_v1_extra_fields_are_ignored(self):
"""Test v1 id ignores unused entries in config."""
cfg = self.v1_cfg.copy()
cfg["consumer_secret"] = "BOO"
cfg["unrelated"] = "HI MOM"
result = DataSourceMAAS.get_id_from_ds_cfg(cfg)
self.assertEqual(self.v1_id, result)
# vi: ts=4 expandtab
| null |
5,363 |
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test join failed in control flow"""
import pytest
import numpy as np
import mindspore as ms
from mindspore import jit, nn, Tensor, context
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_if_branch_have_two_return():
"""
Feature: Test join failed in if with two return.
Description: Abstract type AbstractTensor cannot join with AbstractScalar.
Expectation: No exception.
"""
# pylint: disable=no-else-return
@jit
def foo(x, y):
if x < y:
return Tensor([1, 2, 3])
else:
return 0
x = Tensor(2, ms.float32)
y = Tensor(6, ms.float32)
with pytest.raises(TypeError) as ex:
foo(x, y)
assert "Cannot join the return values of different branches" in str(ex.value)
assert "return Tensor([1, 2, 3])" in str(ex.value)
assert "return 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Test join failed in if with one return.
Description: Abstract type AbstractTensor cannot join with AbstractScalar.
Expectation: No exception.
"""
@jit
def foo(x, y):
if x < y:
a = Tensor([1, 2, 3])
else:
print(x)
return 0
return a
x = Tensor(2, ms.float32)
y = Tensor(6, ms.float32)
with pytest.raises(TypeError) as ex:
foo(x, y)
assert "Cannot join the return values of different branches" in str(ex.value)
assert "return 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_if_branch_has_no_return():
"""
Feature: Test join failed in if with one return.
Description: Abstract type AbstractTensor cannot join with AbstractScalar.
Expectation: No exception.
"""
@jit
def foo(x, y):
if x < y:
a = Tensor([1, 2, 3])
else:
a = 0
return a
x = Tensor(2, ms.float32)
y = Tensor(6, ms.float32)
with pytest.raises(TypeError) as ex:
foo(x, y)
assert "Cannot join the return values of different branches" in str(ex.value)
assert "a = Tensor([1, 2, 3])" in str(ex.value)
assert "a = 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_while_body_has_return():
"""
Feature: Test join failed in while.
Description: Abstract type AbstractTensor cannot join with AbstractScalar.
Expectation: No exception.
"""
@jit
def foo(x):
while x < 10:
return Tensor([1, 2, 3])
return 0
x = Tensor([1], ms.float32)
with pytest.raises(TypeError) as ex:
foo(x)
assert "Cannot join the return values of different branches" in str(ex.value)
assert "return Tensor([1, 2, 3])" in str(ex.value)
assert "return 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_switch_layer_join_failed():
"""
Feature: Test join failed in switch layer.
Description: Abstract type AbstractTuple cannot join with AbstractTensor.
Expectation: No exception.
"""
class JoinFailedCell1(nn.Cell):
def construct(self, x):
return x, Tensor(10)
class JoinFailedCell2(nn.Cell):
def construct(self, x):
return x**2
class SwitchLayerNet(nn.Cell):
def __init__(self):
super(SwitchLayerNet, self).__init__()
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
self.join_failed1 = JoinFailedCell1()
self.join_failed2 = JoinFailedCell2()
self.layers = (self.relu, self.softmax, self.join_failed1, self.join_failed2)
def construct(self, x, index):
x = self.layers[index](x)
return x
context.set_context(mode=context.GRAPH_MODE)
net = SwitchLayerNet()
data = Tensor(np.ones((1, 1, 224, 224)), ms.float32)
idx = Tensor(1, ms.int32)
with pytest.raises(TypeError) as ex:
net(data, idx)
assert "Cannot join the return values of different branches" in str(ex.value)
assert "return self.relu(x)" in str(ex.value)
assert "return self.softmax(x)" in str(ex.value)
assert "return x, Tensor(10)" in str(ex.value)
assert "return x**2" in str(ex.value)
| null |
5,364 |
#!/usr/bin/env python3
from pathlib import Path
from ctypes import *
import json
import collections
import numpy as np
import faulthandler
import struct
faulthandler.enable()
basedir = Path(__file__).resolve().parent
libane = None
aneregs = None
def init_libane():
global libane, aneregs
libane = cdll.LoadLibrary((basedir / "libane.dylib").as_posix())
libane.ANE_Compile.argtypes = [c_char_p, c_int]
libane.ANE_Compile.restype = c_void_p
libane.ANE_TensorCreate.restype = c_void_p
libane.ANE_TensorData.argtypes = [c_void_p]
libane.ANE_TensorData.restype = POINTER(c_uint16)
libane.ANE_Run.argtypes = [c_void_p]*4
libane.ANE_Run.restype = c_int
#libane.ANE_RegDebug.restype = c_char_p
with open(basedir / "aneregs.json") as f:
aneregs = json.load(f)
ANE_Struct = [
# aneTD.Header
("u32", 0x1C, "NextCommandOffset"),
# KernelDMASrc @ section @ 0x2C len 0xF4
# reloc 0x2c-0x34?? = weights
# u32[16] 0x34-0x74 = 0x80 | 1 if used
# u32[16] 0x74-0xB4 = <channel data offset>
# u32[16] 0xB4-0xF4 = <channel data length>
# Common @ section @ 0x128 len 0x3C (conv)
("u16", 0x128, "InputWidth"),
("u16", 0x12A, "InputHeight"),
("u16", 0x12C, "InputDepth"),
("u32", 0x130, "InputOutputType"), # (OutputType * 0x10) | InputType
# UInt8 = 0, Int8 = 1, Float16 = 2
("u32", 0x134, "InputChannels"),
("u32", 0x138, "OutputChannels"),
("u16", 0x13C, "OutputWidth"),
("u16", 0x13E, "OutputHeight"),
("u16", 0x140, "OutputDepth"),
("u16", 0x144, "KernelSize"), # 0xa000 | (KernelHeight * 0x20) | KernelWidth
("u16", 0x146, "Padding"), # 0x5000 | (PadTop * 0x40) | (PadLeft * 2)
("u16", 0x14C, "BatchSize"),
# TileDMASrc @ section @ 0x16C len 0x6C (input)
# reloc 0x16c-0x174 = image
("u32", 0x178, "InputRowStride"),
("u32", 0x17C, "InputPlaneStride"),
("u32", 0x180, "InputDepthStride"),
("u32", 0x184, "InputBatchStride"),
("u8", 0x1A7, "InputInterleave"),
# L2 @ section @ 0x1E0 len 0x44
# [0x1ec, 0x1f0, 0x1f4, 0x1f8, 0x214] = number of engines
# [0x1f0, 0x1f4, 0x1f8, 0x214] = engines for inconv?
# [0x21c, 0x220, 0x224] = engines for outconv?
# NE @ section @ 0x22c len 0xC (scaling)
("u16", 0x230, "BiasScalar"),
("u16", 0x232, "ScaleScalar"),
# section @ 0x240 len 0x10
("u16", 0x246, "NeuronType"), # 0x10 = copy, 0x11 = ReLU, 0x12 = custom
("u32", 0x250, "PostScale"),
# TileDMADst @ section @ 0x258 len 0x18
# HandleTileDmaDstConfig
# 0x258 -- *(uint *)(this + 0x334) = *(uint *)(this + 0x334) & 0xfffffc3f | 0xc0;
# (GetCacheHintRegisterValue & 0xf) << 6;
("u32", 0x25C, "OutputOffset"), # offset into output buffer to write at?
# 0x260 -- *(uint *)(this + 0x33c) = *(uint *)(this + 0x33c) & 0x3f | (int)uVar10 << 6;
("u32", 0x260, "OutputRowStride"),
("u32", 0x264, "OutputPlaneStride"),
("u32", 0x268, "OutputDepthStride"),
("u32", 0x26C, "OutputBatchStride"),
# 0x270 -- *(uint *)(this + 0x34c) = *(uint *)(this + 0x34c) & 0xf0ffffff | 0x1000000;
# uVar6 = *(uint *)(this + 0x34c) & 0xffffcfcc | 0x2031;
# (ZinTensorDescriptorDmaInterleave & 0xf) << 0x18;
("u8", 0x273, "OutputInterleave"), # i also have this at 0x211?
]
ANE_Struct_Dict = {}
for typ, num, nam in ANE_Struct:
styp = {"u32": "I", "u16": "H", "u8": "B"}[typ]
ANE_Struct_Dict[nam] = (styp, num)
class ANETensor:
def __init__(self, *shape):
self.shape = shape
self.dtype = np.float16
self.sz = int(np.prod(shape))
assert(self.sz <= 0x4000)
self.tt = libane.ANE_TensorCreate(self.sz, 1)
assert(self.tt is not None)
def data(self):
data = libane.ANE_TensorData(self.tt)
assert(data is not None)
#print(hex(addressof(data.contents)))
buf = np.ctypeslib.as_array(data, shape=(self.sz,))
ret = np.frombuffer(buf, dtype=self.dtype)
#print(ret.data)
return ret
class ANE:
def __init__(self):
init_libane()
libane.ANE_Open()
def compile(self, dat):
ret = libane.ANE_Compile(create_string_buffer(dat), len(dat))
assert(ret is not None)
return ret
def METHOD_NAME(self, prog, tin, tout, tweights=None):
libane.ANE_Run(prog, tin.tt, tout.tt, tweights.tt if tweights is not None else 0)
def tensor(self, shape):
return ANETensor(shape)
def unpack(self, dat):
dat = struct.unpack("Q"*(len(dat)//8), dat)
ret = {}
for k,v in aneregs:
by,bi,sz = v
bi += (by%8)*8
by //= 8
rv = (dat[by] >> bi) & ((1 << sz)-1)
ret[k] = rv
return ret
def pack(self, pk, dat):
dat = list(struct.unpack("Q"*(len(dat)//8), dat))
for k,v in aneregs:
by,bi,sz = v
bi += (by%8)*8
by //= 8
dat[by] &= ~(((1 << sz)-1) << bi)
dat[by] |= pk[k] << bi
dat = struct.pack("Q"*len(dat), *dat)
return dat
def debug(self, dat, mems=0):
add = [0x30, 0x1d4, 0x220, 0x29c, 0x2f0, 0x30c, 0x32c]
lens = [244, 60, 108, 68, 12, 16, 24]
ptr = 0x2b
ddat = dat[0:0x28]
for a, pm in zip(add, lens):
#assert pm == dat[ptr]
ddat += b"\x00" * (a-len(ddat))
ddat += dat[ptr+1:ptr+1+pm+4]
ptr += pm+8
ddat += b"\x00" * 0x100
ret = collections.OrderedDict()
for ln in libane.ANE_RegDebug(0, create_string_buffer(ddat), mems).decode('utf-8').strip().split("\n"):
lnn = ln.split(" = ")
if len(lnn) == 2:
ret[lnn[0]] = int(lnn[1])
return ret
def filln(self, dat, nvdict, base=0x4000):
for n,v in nvdict.items():
styp, num = ANE_Struct_Dict[n]
dat = self.fill(dat, [num], styp, v)
return dat
def fill(self, dat, addrs, type, val, base=0x4000):
x = struct.pack(type, val)
for a in addrs:
dat[base+a:base+a+len(x)] = x
return dat
if __name__ == "__main__":
ane = ANE()
tin = ANETensor(16)
tout = ANETensor(16)
tind = tin.data()
toutd = tout.data()
tind[0:4] = [-1,1,-2,2]
print("** before **")
print(tind)
print(toutd)
dat = open("../ops/relu.hwx", "rb").read()
md = dat[0x4000:0x4300]
dd = ane.unpack(md)
mdf = ane.pack(dd, md)
assert(md == mdf)
comp = ane.compile(dat)
ret = ane.METHOD_NAME(comp, tin, tout)
print("** after **")
print(tind)
print(toutd)
| null |
5,365 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import re
import flask
from flask import current_app, request, session, testing
from werkzeug.datastructures import Headers
from werkzeug.test import EnvironBuilder
from flask_wtf.csrf import generate_csrf
import config
class RequestShim():
"""
A fake request that proxies cookie-related methods to a Flask test client.
"""
def __init__(self, client):
self.client = client
def set_cookie(self, key, value='', *args, **kwargs):
"Set the cookie on the Flask test client."
server_name = current_app.config["SERVER_NAME"] or "localhost"
return self.client.set_cookie(
server_name, key=key, value=value, *args, **kwargs
)
def delete_cookie(self, key, *args, **kwargs):
"Delete the cookie on the Flask test client."
server_name = current_app.config["SERVER_NAME"] or "localhost"
return self.client.delete_cookie(
server_name, key=key, *args, **kwargs
)
class TestClient(testing.FlaskClient):
def __init__(self, *args, **kwargs):
self.csrf_token = None
self.app = None
super().__init__(*args, **kwargs)
def setApp(self, _app):
self.app = _app
def open(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], (EnvironBuilder, dict)):
return super().open(*args, **kwargs)
data = kwargs.get('data', {})
if self.csrf_token is not None and not (
'email' in data and
'password' in data and
'csrf_token' in data
):
api_key_headers = Headers({})
api_key_headers[
getattr(config, 'WTF_CSRF_HEADERS', ['X-CSRFToken'])[0]
] = self.csrf_token
headers = kwargs.pop('headers', Headers())
headers.extend(api_key_headers)
kwargs['headers'] = headers
return super().open(*args, **kwargs)
def METHOD_NAME(self, res):
m = re.search(
b'<input id="csrf_token" name="csrf_token" type="hidden"'
b' value="([^"]*)">', res.data
)
if m is None:
# When login through Kerberos, we won't find the CSRF
return None
return m.group(1).decode("utf-8")
def generate_csrf_token(self, *args, **kwargs):
# First, we'll wrap our request shim around the test client, so
# that it will work correctly when Flask asks it to set a cookie.
request = RequestShim(self)
# Next, we need to look up any cookies that might already exist on
# this test client, such as the secure cookie that
# powers `flask.session`,
# and make a test request context that has those cookies in it.
environ_overrides = {}
self.cookie_jar.inject_wsgi(environ_overrides)
with self.app.test_request_context():
# Now, we call Flask-WTF's method of generating a CSRF token...
csrf_token = generate_csrf()
# ...which also sets a value in `flask.session`, so we need to
# ask Flask to save that value to the cookie jar in the test
# client. This is where we actually use that request shim we
# made!
self.app.session_interface.save_session(
self.app, flask.session, request)
return csrf_token
def login(self, email, password, _follow_redirects=False,
headers=None, extra_form_data=dict()):
csrf_token = None
if config.SERVER_MODE is True:
res = self.get('/login',
follow_redirects=_follow_redirects)
csrf_token = self.METHOD_NAME(res)
if csrf_token is None:
csrf_token = self.generate_csrf_token()
form_data = dict(
email=email,
password=password,
csrf_token=csrf_token
)
if extra_form_data:
form_data.update(extra_form_data)
res = self.post(
'/authenticate/login', data=form_data,
follow_redirects=_follow_redirects,
headers=headers
)
self.csrf_token = csrf_token
return res
def logout(self):
self.get('/logout?next=/browser/', follow_redirects=False)
self.csrf_token = None
| null |
5,366 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AccountIdentity',
'AccountManagedResource',
]
@pulumi.output_type
class AccountIdentity(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityIds":
suggest = "identity_ids"
elif key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountIdentity. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountIdentity.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountIdentity.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
identity_ids: Optional[Sequence[str]] = None,
principal_id: Optional[str] = None,
METHOD_NAME: Optional[str] = None):
"""
:param str type: Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are `UserAssigned` and `SystemAssigned`.
:param Sequence[str] identity_ids: Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account.
> **NOTE:** This is required when `type` is set to `UserAssigned`.
:param str principal_id: The Principal ID associated with this Managed Service Identity.
:param str tenant_id: The Tenant ID associated with this Managed Service Identity.
"""
pulumi.set(__self__, "type", type)
if identity_ids is not None:
pulumi.set(__self__, "identity_ids", identity_ids)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if METHOD_NAME is not None:
pulumi.set(__self__, "tenant_id", METHOD_NAME)
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are `UserAssigned` and `SystemAssigned`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="identityIds")
def identity_ids(self) -> Optional[Sequence[str]]:
"""
Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account.
> **NOTE:** This is required when `type` is set to `UserAssigned`.
"""
return pulumi.get(self, "identity_ids")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
The Principal ID associated with this Managed Service Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def METHOD_NAME(self) -> Optional[str]:
"""
The Tenant ID associated with this Managed Service Identity.
"""
return pulumi.get(self, "tenant_id")
@pulumi.output_type
class AccountManagedResource(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventHubNamespaceId":
suggest = "event_hub_namespace_id"
elif key == "resourceGroupId":
suggest = "resource_group_id"
elif key == "storageAccountId":
suggest = "storage_account_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountManagedResource. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountManagedResource.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountManagedResource.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_hub_namespace_id: Optional[str] = None,
resource_group_id: Optional[str] = None,
storage_account_id: Optional[str] = None):
"""
:param str event_hub_namespace_id: The ID of the managed event hub namespace.
:param str resource_group_id: The ID of the managed resource group.
:param str storage_account_id: The ID of the managed storage account.
"""
if event_hub_namespace_id is not None:
pulumi.set(__self__, "event_hub_namespace_id", event_hub_namespace_id)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="eventHubNamespaceId")
def event_hub_namespace_id(self) -> Optional[str]:
"""
The ID of the managed event hub namespace.
"""
return pulumi.get(self, "event_hub_namespace_id")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[str]:
"""
The ID of the managed resource group.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[str]:
"""
The ID of the managed storage account.
"""
return pulumi.get(self, "storage_account_id")
| null |
5,367 |
#!/usr/bin/env python3
# Build the documentation.
import errno, os, re, sys
from subprocess import check_call, CalledProcessError, Popen, PIPE, STDOUT
versions = ['1.0.0', '1.1.0', '2.0.0', '3.0.2', '4.0.0', '4.1.0', '5.0.0', '5.1.0', '5.2.0', '5.2.1', '5.3.0', '6.0.0', '6.1.0', '6.1.1', '6.1.2', '6.2.0', '6.2.1', '7.0.0', '7.0.1', '7.0.2', '7.0.3', '7.1.0', '7.1.1', '7.1.2', '7.1.3', '8.0.0', '8.0.1', '8.1.0', '8.1.1', '9.0.0', '9.1.0']
class Pip:
def __init__(self, venv_dir):
self.path = os.path.join(venv_dir, 'bin', 'pip')
def install(self, package, commit=None):
"Install package using pip."
if commit:
package = 'git+https://github.com/{0}.git@{1}'.format(package, commit)
print('Installing {0}'.format(package))
check_call([self.path, 'install', package])
def create_build_env(venv_dir='virtualenv'):
# Create virtualenv.
if not os.path.exists(venv_dir):
check_call(['python3', '-m', 'venv', venv_dir])
# Install Sphinx and Breathe. Require the exact version of Sphinx which is
# compatible with Breathe.
pip = Pip(venv_dir)
pip.install('wheel')
pip.install('six')
# See: https://github.com/sphinx-doc/sphinx/issues/9777
pip.install('docutils==0.17.1')
# Jinja2 >= 3.1 incompatible with sphinx 3.3.0
# See: https://github.com/sphinx-doc/sphinx/issues/10291
pip.install('Jinja2<3.1')
pip.install('sphinx-doc/sphinx', 'v3.3.0')
pip.install('michaeljones/breathe', 'v4.25.0')
def METHOD_NAME(version='dev', **kwargs):
doc_dir = kwargs.get('doc_dir', os.path.dirname(os.path.realpath(__file__)))
work_dir = kwargs.get('work_dir', '.')
include_dir = kwargs.get(
'include_dir', os.path.join(os.path.dirname(doc_dir), 'include', 'fmt'))
# Build docs.
cmd = ['doxygen', '-']
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
doxyxml_dir = os.path.join(work_dir, 'doxyxml')
out, _ = p.communicate(input=r'''
PROJECT_NAME = fmt
GENERATE_LATEX = NO
GENERATE_MAN = NO
GENERATE_RTF = NO
CASE_SENSE_NAMES = NO
INPUT = {0}/chrono.h {0}/color.h {0}/core.h {0}/compile.h \
{0}/format.h {0}/os.h {0}/ostream.h {0}/printf.h \
{0}/xchar.h
QUIET = YES
JAVADOC_AUTOBRIEF = YES
AUTOLINK_SUPPORT = NO
GENERATE_HTML = NO
GENERATE_XML = YES
XML_OUTPUT = {1}
ALIASES = "rst=\verbatim embed:rst"
ALIASES += "endrst=\endverbatim"
MACRO_EXPANSION = YES
PREDEFINED = _WIN32=1 \
__linux__=1 \
FMT_ENABLE_IF(...)= \
FMT_USE_VARIADIC_TEMPLATES=1 \
FMT_USE_RVALUE_REFERENCES=1 \
FMT_USE_USER_DEFINED_LITERALS=1 \
FMT_USE_ALIAS_TEMPLATES=1 \
FMT_USE_NONTYPE_TEMPLATE_ARGS=1 \
FMT_API= \
"FMT_BEGIN_NAMESPACE=namespace fmt {{" \
"FMT_END_NAMESPACE=}}" \
"FMT_STRING_ALIAS=1" \
"FMT_VARIADIC(...)=" \
"FMT_VARIADIC_W(...)=" \
"FMT_DOC=1"
EXCLUDE_SYMBOLS = fmt::formatter fmt::printf_formatter fmt::arg_join \
fmt::basic_format_arg::handle
'''.format(include_dir, doxyxml_dir).encode('UTF-8'))
out = out.decode('utf-8')
internal_symbols = [
'fmt::detail::.*',
'basic_data<>',
'fmt::type_identity',
'fmt::dynamic_formatter'
]
noisy_warnings = [
'warning: (Compound|Member .* of class) (' + '|'.join(internal_symbols) + \
') is not documented.',
'warning: Internal inconsistency: .* does not belong to any container!'
]
for w in noisy_warnings:
out = re.sub('.*' + w + '\n', '', out)
print(out)
if p.returncode != 0:
raise CalledProcessError(p.returncode, cmd)
html_dir = os.path.join(work_dir, 'html')
main_versions = reversed(versions[-3:])
check_call([os.path.join(work_dir, 'virtualenv', 'bin', 'sphinx-build'),
'-Dbreathe_projects.format=' + os.path.abspath(doxyxml_dir),
'-Dversion=' + version, '-Drelease=' + version,
'-Aversion=' + version, '-Aversions=' + ','.join(main_versions),
'-b', 'html', doc_dir, html_dir])
try:
check_call(['lessc', '--verbose', '--clean-css',
'--include-path=' + os.path.join(doc_dir, 'bootstrap'),
os.path.join(doc_dir, 'fmt.less'),
os.path.join(html_dir, '_static', 'fmt.css')])
except OSError as e:
if e.errno != errno.ENOENT:
raise
print('lessc not found; make sure that Less (http://lesscss.org/) ' +
'is installed')
sys.exit(1)
return html_dir
if __name__ == '__main__':
create_build_env()
METHOD_NAME(sys.argv[1])
| null |
5,368 |
import gzip
from datetime import timedelta
from typing import Any
import pytest
from django.utils import timezone
from freezegun.api import FrozenDateTimeFactory
from storages.backends.s3boto3 import S3Boto3Storage
from thunderstore.cache.storage import get_cache_storage
from thunderstore.community.factories import CommunityFactory
from thunderstore.community.models import Community
from thunderstore.repository.models.cache import APIV1PackageCache
from thunderstore.utils.makemigrations import StubStorage
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community_without_community(
community: Community,
) -> None:
# Make sure a community is in the DB to ensure a random one isn't returned
assert community.pk
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
community_a = CommunityFactory()
community_b = CommunityFactory()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
is None
)
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
APIV1PackageCache.update_for_community(community_a, b"")
APIV1PackageCache.update_for_community(community_b, b"")
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
cache_a = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
cache_b = APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
assert cache_a.pk != cache_b.pk
assert cache_a.community == community_a
assert cache_b.community == community_b
APIV1PackageCache.update_for_community(community_a, b"")
cache_a2 = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
assert cache_a2.pk != cache_a.pk
cache_b.delete()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
@pytest.mark.django_db
def test_api_v1_packge_cache_update_for_community(community: Community) -> None:
content = b"this is a test message"
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
)
is None
)
latest = APIV1PackageCache.update_for_community(community, content=content)
assert latest.content_type == "application/json"
assert latest.content_encoding == "gzip"
assert latest.community.pk == community.pk
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
).pk
== latest.pk
)
with gzip.GzipFile(fileobj=latest.data, mode="r") as f:
result = f.read()
assert result == content
@pytest.mark.django_db
def test_api_v1_package_cache_drop_stale_cache(
freezer: FrozenDateTimeFactory, settings: Any
) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
start = timezone.now()
community_a = CommunityFactory()
community_b = CommunityFactory()
cache_a1 = APIV1PackageCache.update_for_community(community_a, b"")
cache_b1 = APIV1PackageCache.update_for_community(community_b, b"")
communityless_cache = APIV1PackageCache.update_for_community(community_a, b"")
communityless_cache.community = None
communityless_cache.save()
# B1 is within 1 hours of B2 so should not be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=30))
cache_b2 = APIV1PackageCache.update_for_community(community_b, b"")
cache_b2.last_modified = start + timedelta(minutes=30)
cache_b2.save()
# A1 is over 60 minutes older than A2 and should be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=61))
cache_a2 = APIV1PackageCache.update_for_community(community_a, b"")
cache_a2.last_modified = start + timedelta(minutes=61)
cache_a2.save()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 1
APIV1PackageCache.drop_stale_cache()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a1.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a2.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b1.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b2.pk).count() == 1
@pytest.mark.django_db
def test_api_v1_package_cache_drop_stale_cache_none(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
CommunityFactory() # Create a community without a community site
assert APIV1PackageCache.drop_stale_cache() is None # Ensure no crash
@pytest.mark.django_db
def METHOD_NAME(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
with pytest.raises(RuntimeError, match="Must not be called during a transaction"):
cache.delete_file()
@pytest.mark.django_db(transaction=True)
def test_api_v1_package_cache_delete_file_transactionless_allowed(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
cache.delete_file()
@pytest.mark.django_db
def test_api_v1_package_cache_delete_file(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete_file()
assert not storage.exists(name)
cache.refresh_from_db()
assert cache.is_deleted is True
assert bool(cache.data) is False
@pytest.mark.django_db
def test_api_v1_package_cache_delete(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete()
assert not storage.exists(name)
@pytest.mark.django_db
def test_api_v1_package_cache_queryset_delete_disallowed():
with pytest.raises(NotImplementedError, match="Delete is not supported for"):
APIV1PackageCache.objects.all().delete()
def test_api_v1_packge_cache_storage_is_stub_during_makemigrations(mocker):
mocker.patch("sys.argv", ["manage.py", "makemigrations"])
storage = get_cache_storage()
assert isinstance(storage, StubStorage)
def test_api_v1_packge_cache_storage_is_s3_during_run(mocker):
mocker.patch("sys.argv", ["manage.py", "runserver"])
storage = get_cache_storage()
assert isinstance(storage, S3Boto3Storage)
| null |
5,369 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kubeflow_v2_entrypoint_utils.py."""
import os
from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2
import tensorflow as tf
from tfx.components.evaluator import constants
from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
_ARTIFACT_1 = standard_artifacts.String()
_KEY_1 = 'input_1'
_ARTIFACT_2 = standard_artifacts.ModelBlessing()
_KEY_2 = 'input_2'
_ARTIFACT_3 = standard_artifacts.Examples()
_KEY_3 = 'input_3'
_EXEC_PROPERTIES = {
'input_config': 'input config string',
'output_config':
'{ \"split_config\": { \"splits\": [ { \"hash_buckets\": 2, \"name\": '
'\"train\" }, { \"hash_buckets\": 1, \"name\": \"eval\" } ] } }',
}
_ARTIFACT_INVALID_NAME = r"""
inputs {
artifacts {
key: "artifact"
value {
artifacts {
name: "invalid_runtime_name"
uri: "gs://path/to/my/artifact"
type {
instance_schema: "title: tfx.String\ntype: object\nproperties:\n"
}
}
}
}
}
"""
_EXPECTED_CURRENT_MODEL_INT_ID = 123
_EXPECTED_CURRENT_MODEL_STRING_ID = 'current_model_string_id'
_EXPECTED_BASELINE_MODEL_INT_ID = 321
_EXPECTED_BASELINE_MODEL_STRING_ID = 'baseline_model_string_id'
_TEST_NAME_FROM_ID = {
_EXPECTED_BASELINE_MODEL_INT_ID: _EXPECTED_BASELINE_MODEL_STRING_ID,
_EXPECTED_CURRENT_MODEL_INT_ID: _EXPECTED_CURRENT_MODEL_STRING_ID
}
class KubeflowV2EntrypointUtilsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
_ARTIFACT_1.uri = 'gs://root/string/'
# Hash value of
# 'projects/123456789/locations/us-central1/metadataStores/default/artifacts/11111'
_ARTIFACT_1.id = 9171918664759481579
_ARTIFACT_1.set_string_custom_property(
key='my_property_1', value='Test string.')
_ARTIFACT_2.uri = 'gs://root/model/'
# Hash value of
# 'projects/123456789/locations/us-central1/metadataStores/default/artifacts/22222'
_ARTIFACT_2.id = 6826273797600318744
_ARTIFACT_2.set_float_custom_property(key='my_property_2', value=42.0)
_ARTIFACT_3.uri = 'gs://root/examples/'
_ARTIFACT_3.span = 9000
# Hash value of
# 'projects/123456789/locations/us-central1/metadataStores/default/artifacts/33333'
_ARTIFACT_3.id = 27709763105391302
self._expected_dict = {
_KEY_1: [_ARTIFACT_1],
_KEY_2: [_ARTIFACT_2],
_KEY_3: [_ARTIFACT_3],
}
source_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
# Use two protos to store the testdata.
artifacts_pb = pipeline_pb2.ExecutorInput()
io_utils.parse_json_file(
os.path.join(source_data_dir, 'artifacts.json'), artifacts_pb)
self._artifacts = artifacts_pb.inputs.artifacts
# Test legacy properties/custom properties deserialization.
artifacts_legacy_pb = pipeline_pb2.ExecutorInput()
io_utils.parse_json_file(
os.path.join(source_data_dir, 'artifacts_legacy.json'),
artifacts_legacy_pb)
self._artifacts_legacy = artifacts_legacy_pb.inputs.artifacts
properties_pb = pipeline_pb2.ExecutorInput()
io_utils.parse_json_file(
os.path.join(source_data_dir, 'exec_properties.json'), properties_pb)
self._properties = properties_pb.inputs.parameters
def testParseRawArtifactDict(self):
for artifacts_dict in [self._artifacts, self._artifacts_legacy]:
name_from_id = {}
actual_result = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
artifacts_dict, name_from_id)
for key in self._expected_dict:
(expected_artifact,) = self._expected_dict[key]
(actual_artifact,) = actual_result[key]
self.assertEqual(expected_artifact.id, actual_artifact.id)
self.assertEqual(expected_artifact.uri, actual_artifact.uri)
for prop in expected_artifact.artifact_type.properties:
self.assertEqual(
getattr(expected_artifact, prop), getattr(actual_artifact, prop))
self.assertEqual(
self._expected_dict[_KEY_1][0].get_string_custom_property(
'my_property_1'),
actual_result[_KEY_1][0].get_string_custom_property('my_property_1'))
self.assertEqual(
self._expected_dict[_KEY_2][0].get_string_custom_property(
'my_property_2'),
actual_result[_KEY_2][0].get_string_custom_property('my_property_2'))
self.assertEqual(self._expected_dict[_KEY_3][0].span,
actual_result[_KEY_3][0].span)
def testParseExecutionProperties(self):
self.assertDictEqual(
_EXEC_PROPERTIES,
kubeflow_v2_entrypoint_utils.parse_execution_properties(
self._properties))
def METHOD_NAME(self):
properties_pb = pipeline_pb2.ExecutorInput()
properties_pb.inputs.parameters[
'input_base_uri'].string_value = 'gs://input/base'
self.assertDictEqual(
{'input_base': 'gs://input/base'},
kubeflow_v2_entrypoint_utils.parse_execution_properties(
properties_pb.inputs.parameters))
def testCanChangePropertiesByNameIdMapping(self):
model_blessing = standard_artifacts.ModelBlessing()
model_blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY,
_EXPECTED_BASELINE_MODEL_INT_ID)
model_blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
_EXPECTED_CURRENT_MODEL_INT_ID)
expected_model_blessing = standard_artifacts.ModelBlessing()
expected_model_blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY,
_EXPECTED_BASELINE_MODEL_STRING_ID)
expected_model_blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
_EXPECTED_CURRENT_MODEL_STRING_ID)
kubeflow_v2_entrypoint_utils.refactor_model_blessing(
model_blessing, _TEST_NAME_FROM_ID)
self.assertDictEqual(expected_model_blessing.to_json_dict(),
model_blessing.to_json_dict())
if __name__ == '__main__':
tf.test.main()
| null |
5,370 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# coding=utf-8
# This file is part of the mantidqt package.
from mantid.api import IPeaksWorkspace, ITableWorkspace
from mantid.kernel import V3D
from mantidqt.widgets.workspacedisplay.table.error_column import ErrorColumn
from mantidqt.widgets.workspacedisplay.table.marked_columns import MarkedColumns
from contextlib import contextmanager
class TableWorkspaceColumnTypeMapping(object):
"""
Enum can't be used here because the original C++ code maps the types to integers.
Comparing the integer to a Python enum does not work, as it does not simply compare
the integer values. So the types are just stored as integers here
"""
NotSet = -1000
NoType = 0
X = 1
Y = 2
Z = 3
XERR = 4
YERR = 5
LABEL = 6
@contextmanager
def block_model_replacement(model):
model.block_model_replace = True
yield
model.block_model_replace = False
class TableWorkspaceDisplayModel:
SPECTRUM_PLOT_LEGEND_STRING = "{}-{}"
BIN_PLOT_LEGEND_STRING = "{}-bin-{}"
EDITABLE_COLUMN_NAMES = ["h", "k", "l"]
ALLOWED_WORKSPACE_TYPES = [ITableWorkspace]
@classmethod
def supports(cls, ws: ITableWorkspace):
"""
Checks that the provided workspace is supported by this display.
:param ws: Workspace to be checked for support
:raises ValueError: if the workspace is not supported
"""
if not any(isinstance(ws, allowed_type) for allowed_type in cls.ALLOWED_WORKSPACE_TYPES):
raise ValueError("The workspace type is not supported: {0}".format(ws))
def __init__(self, ws: ITableWorkspace):
"""
Initialise the model with the workspace
:param ws: Workspace to be used for providing data
:raises ValueError: if the workspace is not supported
"""
self.supports(ws)
self.ws: ITableWorkspace = ws
self.ws_num_rows = self.ws.rowCount()
self.ws_num_cols = self.ws.columnCount()
self.marked_columns = MarkedColumns()
self._original_column_headers = self.get_column_headers()
self.block_model_replace = False
# loads the types of the columns
for col in range(self.ws_num_cols):
plot_type = self.ws.getPlotType(col)
if plot_type == TableWorkspaceColumnTypeMapping.X:
self.marked_columns.add_x(col)
elif plot_type == TableWorkspaceColumnTypeMapping.Y:
self.marked_columns.add_y(col)
elif plot_type == TableWorkspaceColumnTypeMapping.YERR:
err_for_column = self.ws.getLinkedYCol(col)
if err_for_column >= 0:
self.marked_columns.add_y_err(ErrorColumn(col, err_for_column))
def _get_v3d_from_str(self, string):
if "[" in string and "]" in string:
string = string[1:-1]
if "," in string:
return V3D(*[float(x) for x in string.split(",")])
else:
raise ValueError("'{}' is not a valid V3D string.".format(string))
def original_column_headers(self):
return self._original_column_headers[:]
def build_current_labels(self):
return self.marked_columns.build_labels()
def get_name(self):
return self.ws.name()
def get_column_headers(self):
return self.ws.getColumnNames()
def get_column(self, index):
return self.ws.column(index)
def get_cell(self, row, column):
return self.ws.cell(row, column)
def get_number_of_rows(self):
return self.ws_num_rows
def get_number_of_columns(self):
return self.ws_num_cols
def get_column_header(self, index):
return self.get_column_headers()[index]
def is_editable_column(self, icol):
if self.METHOD_NAME():
return self.ws.getColumnNames()[icol] in self.EDITABLE_COLUMN_NAMES
else:
return not self.ws.isColumnReadOnly(icol)
def METHOD_NAME(self):
return isinstance(self.ws, IPeaksWorkspace)
def set_cell_data(self, row, col, data, is_v3d):
if self.METHOD_NAME():
p = self.ws.getPeak(row)
if self.ws.getColumnNames()[col] == "h":
p.setH(data)
elif self.ws.getColumnNames()[col] == "k":
p.setK(data)
elif self.ws.getColumnNames()[col] == "l":
p.setL(data)
else:
# if the cell contains V3D data, construct a V3D object
# from the string to that it can be properly set
if is_v3d:
data = self._get_v3d_from_str(data)
# The False stops the replace workspace ADS event from being triggered
# The replace event causes the TWD model to be replaced, which in turn
# deletes the previous table item objects, however this happens
# at the same time as we are trying to locally update the data in the
# item object itself, which causes a Qt exception that the object has
# already been deleted and a crash
with block_model_replacement(self):
self.ws.setCell(row, col, data)
def workspace_equals(self, workspace_name):
return self.ws.name() == workspace_name
def delete_rows(self, selected_rows):
from mantid.simpleapi import DeleteTableRows
DeleteTableRows(self.ws, selected_rows)
def get_statistics(self, selected_columns):
from mantid.simpleapi import StatisticsOfTableWorkspace
stats = StatisticsOfTableWorkspace(self.ws, selected_columns)
return stats
def sort(self, column_index, sort_ascending):
from mantid.simpleapi import SortPeaksWorkspace, SortTableWorkspace
column_name = self.ws.getColumnNames()[column_index]
if self.METHOD_NAME():
SortPeaksWorkspace(
InputWorkspace=self.ws, OutputWorkspace=self.ws, ColumnNameToSortBy=column_name, SortAscending=sort_ascending
)
else:
SortTableWorkspace(InputWorkspace=self.ws, OutputWorkspace=self.ws, Columns=column_name, Ascending=sort_ascending)
def set_column_type(self, col, type, linked_col_index=-1):
self.ws.setPlotType(col, type, linked_col_index)
| null |
5,371 |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2022 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
This script is a simple test runner for sweeps on multiple compressors.
"""
import os
import subprocess as sp
import re
import sys
LOG_COMMANDS = False
LOG_PATTERN = re.compile(r"\s*Coding rate:\s*(.*)\s*MT/s")
ISPC_BIN = "./Binaries/ISPC/ispc_astc.exe"
ISPC_QUALITY = ["rgba", "rgb"]
ASTC_BIN = "./bin/astcenc-avx2"
ASTC_QUALITY = ["0", "8", "10", "20", "30", "40", "50", "60"]
TEST_BLOCK_SIZES = ["4x4", "6x6", "8x8"]
TEST_IMAGE = "./Test/Images/Kodak/LDR-RGB/ldr-rgb-kodak%02u.png"
TEST_RANGE = 24
TEST_REPEATS = 5
OUT_CIMAGE = "out.astc"
OUT_DIMAGE = "out.png"
def run(command):
if LOG_COMMANDS:
print(" ".join(command))
return sp.run(command, capture_output=True, universal_newlines=True)
def run_astcenc(in_image, out_image, block_size, quality):
args = [ASTC_BIN, "-tl", in_image, out_image, block_size, quality, "-j", "1"]
result = run(args)
return float(LOG_PATTERN.search(result.stdout).group(1))
def run_ispc(in_image, out_image, block_size, quality):
args = [ISPC_BIN, in_image, out_image, block_size, quality]
result = run(args)
return float(LOG_PATTERN.search(result.stdout).group(1))
def decompress(in_image, out_image):
args = [ASTC_BIN, "-dl", in_image, out_image]
result = run(args)
os.remove(in_image)
def compare(in_image, out_image):
args = ["compare", "-metric", "PSNR", in_image, out_image, "diff.png"]
result = run(args)
os.remove("diff.png")
os.remove(out_image)
return float(result.stderr)
def METHOD_NAME():
"""
The main function.
Returns:
int: The process return code.
"""
# ISPC Tests
for block_size in TEST_BLOCK_SIZES:
for quality in ISPC_QUALITY:
print(f"ISPC {quality} {block_size}")
print(f"ISPC {quality} {block_size}", file=sys.stderr)
for index in range(1, TEST_RANGE + 1):
result_rate = 0.0
for repeat in range(0, TEST_REPEATS):
image = TEST_IMAGE % index
result_rate += run_ispc(image, OUT_CIMAGE, block_size, quality)
decompress(OUT_CIMAGE, OUT_DIMAGE)
result_error = compare(image, OUT_DIMAGE)
result_rate /= TEST_REPEATS
print("%s,Kodak%02u,%0.4f,%0.4f" % (block_size, index, result_rate, result_error))
# ASTCENC Tests
for block_size in TEST_BLOCK_SIZES:
for quality in ASTC_QUALITY:
print(f"ASTC {quality} {block_size}")
print(f"ASTC {quality} {block_size}", file=sys.stderr)
for index in range(1, TEST_RANGE + 1):
result_rate = 0.0
for repeat in range(0, TEST_REPEATS):
image = TEST_IMAGE % index
result_rate += run_astcenc(image, OUT_DIMAGE, block_size, quality)
result_error = compare(image, OUT_DIMAGE)
result_rate /= TEST_REPEATS
print("%s,Kodak%02u,%0.4f,%0.4f" % (block_size, index, result_rate, result_error))
return 0
if __name__ == "__main__":
try:
sys.exit(METHOD_NAME())
except sp.CalledProcessError as ex:
print(ex.stdout)
print(ex.stderr)
| null |
5,372 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore as ms
from mindspore import jit, mutable, Tensor, vmap, ops
from mindspore.nn import Cell
import numpy as np
def test_mutable_scalar_in_while():
"""
Feature: Set Constants Scalar as mutable.
Description: Check whether scalar is broaden.
Expectation: while won't stuck in infer process.
"""
@jit
def mutable_scalar_while():
i = mutable(1)
j = Tensor([1])
while j < 10:
j = j + 2
i = i + 2
return j
ms.context.set_context(precompile_only=True)
mutable_scalar_while()
def test_mutable_scalar_in_while_grad():
"""
Feature: Set Constants Scalar as mutable.
Description: Check whether scalar is broaden.
Expectation: while won't stuck in grad process.
"""
@jit
def mutable_scalar_while(x):
i = mutable(1)
j = Tensor([1])
while j < 10:
j = j + x
i = i + 2
return j
@jit
def mutable_scalar_while_grad(x):
return ms.ops.grad(mutable_scalar_while)(x)
ms.context.set_context(precompile_only=True)
mutable_scalar_while_grad(Tensor([1]))
def test_auto_broaden_scalar_in_while():
"""
Feature: Broaden scalar arg if it is an arg of no-expanding while header.
Description: Check whether scalar is broaden.
Expectation: while won't stuck in infer process.
"""
@jit
def scalar_add_in_while():
i = 1
j = Tensor([1])
while j < 10:
j = j + 2
i = i + 2
return j
ms.context.set_context(precompile_only=True)
scalar_add_in_while()
def test_auto_broaden_scalar_in_while_and_getitem():
"""
Feature: Broaden scalar arg if it is an arg of no-expanding while header.
Description: Check whether scalar is broaden.
Expectation: list with variable index won't raise out of range error.
"""
nums = [1, 2, 3]
@jit
def METHOD_NAME(x, y, i):
j = 0
out = x
while i < 3:
if x + i < y:
out = out + x
else:
out = out + y
out = out + nums[j]
i = i + 1
j = j + 1
return out
ms.context.set_context(precompile_only=True)
i = Tensor(np.array(0), dtype=ms.int32)
x = Tensor(np.array(0), dtype=ms.int32)
y = Tensor(np.array(1), dtype=ms.int32)
METHOD_NAME(x, y, i)
def test_mutable_list_in_while():
"""
Feature: Broaden mutable(list) arg if it is an arg of no-expanding while header.
Description: Check whether mutable(list) is broaden.
Expectation: while won't stuck in infer process.
"""
@jit
def scalar_add_in_while():
list1 = mutable([10], True)
i = 0
j = Tensor([1])
while j < 10:
list1.append(i)
j = j + 2
i = i + 1
return list1
ms.context.set_context(precompile_only=True)
out = scalar_add_in_while()
print("out:", out)
def test_auto_broaden_list_in_while():
"""
Feature: Broaden list arg if it is an arg of no-expanding while header.
Description: Check whether list is broaden.
Expectation: while won't stuck in infer process.
"""
@jit
def scalar_add_in_while():
list1 = mutable([10, 20], True)
i = 0
j = Tensor([1])
while j < 10:
list1.append(i)
j = j + 2
i = i + 1
return list1
ms.context.set_context(precompile_only=True)
out = scalar_add_in_while()
print("out:", out)
def test_auto_broaden_tensor_in_if():
"""
Feature: Broaden tensor if shape not match.
Description: Broaden tensor if shape not match.
Expectation: Broaden tensor if shape not match.
"""
@jit
def scalar_add_in_if(x):
j = Tensor([1])
if x < 10:
j = Tensor([1, 2])
else:
j = Tensor([1, 2, 3])
out = j + j
return out
ms.context.set_context(precompile_only=True)
out = scalar_add_in_if(Tensor([1]))
print("out:", out)
def test_auto_broaden_scalar_in_while_grad():
"""
Feature: Broaden scalar arg if it is an arg of no-expanding while header.
Description: Check whether scalar is broaden.
Expectation: while won't stuck in grad process.
"""
@jit
def scalar_add_in_while(x):
i = 1
j = Tensor([1])
while j < 10:
j = j + x
i = i + 2
return j
@jit
def scalar_add_in_while_grad(x):
return ms.ops.grad(scalar_add_in_while)(x)
ms.context.set_context(precompile_only=True)
scalar_add_in_while_grad(Tensor([1]))
def test_auto_broaden_scalar_in_while_grad_grad():
"""
Feature: Broaden scalar arg if it is an arg of no-expanding while header.
Description: Check whether scalar is broaden.
Expectation: while won't stuck in grad process.
"""
@jit
def scalar_add_in_while(x):
i = 1
j = Tensor([1])
while j < 10:
j = j + x
i = i + 2
return j
@jit
def scalar_add_in_while_grad_grad(x):
return ms.ops.grad(ms.ops.grad(scalar_add_in_while))(x)
ms.context.set_context(precompile_only=True)
scalar_add_in_while_grad_grad(Tensor([1]))
def test_recursive_func():
"""
Feature: Recursive func test.
Description: For recursive func, x is a Constant Tensor, because IGNORE_VALUE flag can't be set to recursive_func,
so arg x won't be broaden, the recursive func will be infer endlss. Now this case can pass, because
the IGNORE_VALUE are wrongly attached to 'if-switch' graph, which is recursive_func by coincidence.
Expectation: while won't stuck in infer process.
"""
def recursive_func(x, max_num):
if x > max_num:
return x
x = x + 1
return recursive_func(x, max_num)
@jit
def test_net(max_num):
init_x = Tensor([0])
return recursive_func(init_x, max_num)
ms.context.set_context(precompile_only=True)
test_net(Tensor(2))
def test_vmap_while():
"""
Feature: Vmap with control flow.
Description: In the situation of vamp with while, axis is a const scalar and as arg of while header, can't broaden
this arg when header condition is variable.
Expectation: No exception raised.
"""
class Net(Cell):
@jit
def construct(self, x, y):
out = y
while ops.less(x, 2):
out = ops.add(y, out)
x = ops.add(x, 1)
return out
net = Net()
x = Tensor([0], ms.dtype.float32)
y = Tensor(np.ones([3, 4]), ms.dtype.float32)
ms.context.set_context(precompile_only=True)
vmap(net, in_axes=(None, 1), out_axes=1)(x, y)
| null |
5,373 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import SANSUserFileParser as UserFileParser
class BackCommandParserTest(unittest.TestCase):
def test_can_parse_correctly_initial_command(self):
# Arrange
correct1 = "TImE /sdlf/sdf" # Correct MAIN
correct2 = "UAMp/sdlf/sdf" # Correct HAB
correct3 = "MON/RUN=1234/sdf/sdf" # Correct Mon/RUN=
parser = UserFileParser.BackCommandParser()
# Act and assert
self.assertTrue(parser.can_attempt_to_parse(correct1))
self.assertTrue(parser.can_attempt_to_parse(correct2))
self.assertTrue(parser.can_attempt_to_parse(correct3))
def test_cannot_parse_correctly_initial_command(self):
# Arrange
correct1 = "FRoNT=/sdlf/sdf" # Wrong specifier
correct2 = "MON/sdf/sdf/sdf" # No run number
correct3 = "Time/sdf" # Correct first but incorrect length
parser = UserFileParser.BackCommandParser()
# Act and assert
self.assertFalse(parser.can_attempt_to_parse(correct1))
self.assertFalse(parser.can_attempt_to_parse(correct2))
self.assertFalse(parser.can_attempt_to_parse(correct3))
def test_that_can_parse_TIME_MEAN_RUN(self):
argument = "TIME/ mEAN/RuN=SANS2D1111111"
uniform = True
mean = True
run_number = "SANS2D1111111"
is_mon = False
mon_number = None
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_UAMP_TOF_RUN(self):
argument = "Uamp/ToF /Run=2222"
uniform = False
mean = False
run_number = "2222"
is_mon = False
mon_number = None
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_TIME_LOQ_RUN(self):
argument = "TIME/tof/run=LOQ33333333"
uniform = True
mean = False
run_number = "LOQ33333333"
is_mon = False
mon_number = None
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_UAMP_MEAN_RUN(self):
argument = " UAMP/mean /RuN=444444444"
uniform = False
mean = True
run_number = "444444444"
is_mon = False
mon_number = None
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def METHOD_NAME(self):
argument = "MON/RUN=123124/time/mean"
uniform = True
mean = True
run_number = "123124"
is_mon = True
mon_number = None
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def test_rejects_bad_first_value(self):
argument = "GUN/RUN=123124/time/mean "
self.do_test_parsing_fails(argument)
def test_rejects_bad_value(self):
argument = "mean/UAMP//RuN=444444444"
self.do_test_parsing_fails(argument)
def test_rejects_bad_second_value(self):
argument = "UAMP/meanTT/RuN=444444444"
self.do_test_parsing_fails(argument)
def test_rejects_bad_third_value(self):
argument = "UAMP/mean/RuN 44444"
self.do_test_parsing_fails(argument)
def test_that_can_pars_M3_RUN_TIME_MEAN(self):
argument = "M3/RUN=123124/time/mean"
uniform = True
mean = True
run_number = "123124"
is_mon = True
mon_number = 3
self.do_test_can_parse_correctly(argument, uniform, mean, run_number, is_mon, mon_number)
def do_test_can_parse_correctly(self, arguments, expected_uniform, expected_mean, expected_run_number, is_mon, expected_mon_number):
# Arrange
parser = UserFileParser.BackCommandParser()
# Act
result = parser.parse_and_set(arguments)
# Assert
self.assertEqual(result.mean, expected_mean)
self.assertEqual(result.time, expected_uniform)
self.assertEqual(result.mon, is_mon)
self.assertEqual(result.run_number, expected_run_number)
self.assertEqual(result.mon_number, expected_mon_number)
def do_test_parsing_fails(self, arguments):
# Arrange
parser = UserFileParser.BackCommandParser()
# Act
args = [arguments]
self.assertRaises(RuntimeError, parser.parse_and_set, *args)
if __name__ == "__main__":
unittest.main()
| null |
5,374 |
from __future__ import annotations
import os
import shutil
import sys
import threading
import warnings
from contextlib import contextmanager
from dataclasses import dataclass
from pathlib import Path
from typing import Callable
from typing import Generator
import pytest
from watchfiles import Change
from lektor.environment import Environment
from lektor.project import Project
from lektor.watcher import watch_project
from lektor.watcher import WatchFilter
RunInThread = Callable[[Callable[[], None]], None]
@pytest.fixture
def run_in_thread() -> RunInThread:
threads = []
def run_thread(target: Callable[[], None]) -> None:
t = threading.Thread(target=target)
t.start()
threads.append(t)
try:
yield run_thread
finally:
for t in threads:
t.join(10.0)
for t in threads:
assert not t.is_alive()
@dataclass
class WatchResult:
change_seen: bool = False
def __bool__(self):
return self.change_seen
@dataclass
class WatcherTest:
env: Environment
run_in_thread: RunInThread
@contextmanager
def __call__(
self,
timeout: float = 1.2,
) -> Generator[WatchResult, None, None]:
"""Run watch_project in a separate thread, wait for a file change event.
This is a context manager that runs watch_project in a separate thread.
After the context exits, it will wait at most ``timeout`` seconds before returning.
If a file system change is seen, it will return immediately.
The context manager returns a WatchResult value. After the context has been
exited, the result will be True-ish if a file system change was noticed,
False-ish otherwise.
"""
if sys.platform == "darwin":
self.macos_pause_for_calm()
with self.watch(timeout) as change_seen:
yield change_seen
@contextmanager
def watch(
self,
timeout: float,
) -> Generator[WatchResult, None, None]:
"""Run watch_project in a separate thread, wait for a file change event."""
running = threading.Event()
stop = threading.Event()
changed = threading.Event()
def run() -> None:
watcher = watch_project(
self.env, "non-existant-output-path", stop_event=stop
)
running.set()
for _ in watcher:
changed.set()
return
self.run_in_thread(run)
result = WatchResult()
running.wait()
try:
yield result
result.change_seen = changed.wait(timeout)
finally:
stop.set()
def macos_pause_for_calm(self) -> None:
# Wait a bit for the dust to settle.
# For whatever reason, on macOS, the watcher sometimes seems to return
# filesystem events that happened shortly before it was started.
for n in range(5):
with self.watch(timeout=0.1) as change_seen:
pass
if not change_seen:
break
warnings.warn(f"macOS settle loop {n}: {change_seen}") # noqa: B028
@pytest.fixture
def watcher_test(scratch_env: Environment, run_in_thread: RunInThread) -> WatcherTest:
return WatcherTest(scratch_env, run_in_thread)
@pytest.fixture
def watched_path(scratch_env: Environment) -> Path:
return Path(scratch_env.root_path)
def test_watcher_test(watcher_test: WatcherTest) -> None:
with watcher_test(timeout=0.2) as change_seen:
pass
assert not change_seen
def test_sees_created_file(watcher_test: WatcherTest, watched_path: Path) -> None:
with watcher_test() as change_seen:
Path(watched_path, "created").touch()
assert change_seen
def test_sees_deleted_file(watcher_test: WatcherTest, watched_path: Path) -> None:
deleted_path = watched_path / "deleted"
deleted_path.touch()
with watcher_test() as change_seen:
deleted_path.unlink()
assert change_seen
def test_sees_modified_file(watcher_test: WatcherTest, watched_path: Path) -> None:
modified_path = watched_path / "modified"
modified_path.touch()
with watcher_test() as change_seen:
with modified_path.open("a") as fp:
fp.write("addition")
assert change_seen
def test_sees_file_moved_in(
watcher_test: WatcherTest, watched_path: Path, tmp_path: Path
) -> None:
orig_path = tmp_path / "orig_path"
orig_path.touch()
final_path = watched_path / "final_path"
with watcher_test() as change_seen:
orig_path.rename(final_path)
assert change_seen
def test_sees_file_moved_out(
watcher_test: WatcherTest, watched_path: Path, tmp_path: Path
) -> None:
orig_path = watched_path / "orig_path"
orig_path.touch()
final_path = tmp_path / "final_path"
with watcher_test() as change_seen:
orig_path.rename(final_path)
assert change_seen
def test_sees_deleted_directory(watcher_test: WatcherTest, watched_path: Path) -> None:
# We only really care about deleted directories that contain at least a file.
deleted_path = watched_path / "deleted"
deleted_path.mkdir()
watched_file = deleted_path / "file"
watched_file.touch()
with watcher_test() as change_seen:
shutil.rmtree(deleted_path)
assert change_seen
def test_sees_file_in_directory_moved_in(
watcher_test: WatcherTest, watched_path: Path, tmp_path: Path
) -> None:
# We only really care about directories that contain at least a file.
orig_dir_path = tmp_path / "orig_dir_path"
orig_dir_path.mkdir()
Path(orig_dir_path, "file").touch()
final_dir_path = watched_path / "final_dir_path"
with watcher_test() as change_seen:
orig_dir_path.rename(final_dir_path)
assert change_seen
def METHOD_NAME(
watcher_test: WatcherTest, watched_path: Path, tmp_path: Path
) -> None:
# We only really care about directories that contain at least one file.
orig_dir_path = watched_path / "orig_dir_path"
orig_dir_path.mkdir()
Path(orig_dir_path, "file").touch()
final_dir_path = tmp_path / "final_dir_path"
with watcher_test() as change_seen:
orig_dir_path.rename(final_dir_path)
assert change_seen
def test_ignores_opened_file(watcher_test: WatcherTest, watched_path: Path) -> None:
file_path = watched_path / "file"
file_path.touch()
with watcher_test() as change_seen:
with file_path.open() as fp:
fp.read()
assert not change_seen
@pytest.fixture(scope="session")
def watch_filter(project: Project) -> WatchFilter:
env = Environment(project, load_plugins=False)
return WatchFilter(env)
@pytest.mark.parametrize("path", [".dotfile", "webpack/node_modules"])
def test_WatchFilter_false(
watch_filter: WatchFilter, path: str, project: Project
) -> None:
abspath = os.path.abspath(os.path.join(project.tree, path))
assert not watch_filter(Change.added, abspath)
| null |
5,375 |
#!/usr/bin/env python
"""
setup-fit - prepare and apply data cuts before fit
setup-fit constructs the fit [results] folder where data used by nnfit
will be stored.
"""
# Implementation notes
#
# This is a validphys-like app in disguise. It takes an nnfit runcard and adds
# a fixed list of actions and some associated resourced to it so as to make it
# a proper validphys runcard. These config options are defined in the
# SETUPFIT_FIXED_CONFIG mapping below. Similarly, defult options are specified
# in SETUPFIT_DEFAULTS.
#
# Extensions to the setup procedure can be implemented by adding suitable
# actions_ to the mapping (making sure that they are executed in the right
# namespace that pulls all the required resources from the fit runcard),
# together with the additional non variable resources required by said actions
# (such as `use_cuts: "internal"`) in the current code. vp-setupfit also gets
# its own provider modules, so you may need to add the modules of your actions
# to SETUPFIT_PROVIDERS.
#
# The state of the output folder must be such that the nnfit code can be run on
# top.
import hashlib
import logging
import pathlib
import re
import shutil
import sys
import warnings
from reportengine import colors
from reportengine.compat import yaml
from validphys.app import App
from validphys.config import Config, ConfigError, Environment, EnvironmentError_
SETUPFIT_FIXED_CONFIG = dict(
actions_=[
'datacuts check_t0pdfset',
'theory check_positivity',
]
)
SETUPFIT_PROVIDERS = [
'validphys.filters',
'validphys.theorycovariance.construction',
'validphys.results',
'validphys.covmats',
'n3fit.n3fit_checks_provider',
]
SETUPFIT_DEFAULTS = dict(
use_cuts='internal',
)
log = logging.getLogger(__name__)
RUNCARD_COPY_FILENAME = "filter.yml"
FILTER_OUTPUT_FOLDER = "filter"
TABLE_OUTPUT_FOLDER = "tables"
MD5_FILENAME = "md5"
INPUT_FOLDER = "input"
class SetupFitError(Exception):
"""Exception raised when setup-fit cannot succeed and knows why"""
pass
class SetupFitEnvironment(Environment):
"""Container for information to be filled at run time"""
def init_output(self):
# check file exists, is a file, has extension.
if not self.config_yml.exists():
raise SetupFitError("Invalid runcard. File not found.")
else:
if not self.config_yml.is_file():
raise SetupFitError("Invalid runcard. Must be a file.")
# check if results folder exists
self.output_path = pathlib.Path(self.output_path).absolute()
if self.output_path.is_dir():
log.warning(f"Output folder exists: {self.output_path} Overwriting contents")
else:
if not re.fullmatch(r'[\w\-]+', self.output_path.name):
raise SetupFitError("Invalid output folder name. Must be alphanumeric.")
try:
self.output_path.mkdir()
except OSError as e:
raise EnvironmentError_(e) from e
try:
shutil.copy2(self.config_yml, self.output_path / RUNCARD_COPY_FILENAME)
except shutil.SameFileError:
pass
except Exception as e:
raise EnvironmentError_(e) from e
# create output folder
self.filter_path = self.output_path / FILTER_OUTPUT_FOLDER
self.filter_path.mkdir(exist_ok=True)
self.table_folder = self.output_path / TABLE_OUTPUT_FOLDER
self.table_folder.mkdir(exist_ok=True)
# put lockfile input inside of filter output
self.input_folder = self.filter_path / INPUT_FOLDER
self.input_folder.mkdir(exist_ok=True)
def save_md5(self):
"""Generate md5 key from file"""
output_filename = self.output_path / MD5_FILENAME
with open(self.config_yml, 'rb') as f:
hash_md5 = hashlib.md5(f.read()).hexdigest()
with open(output_filename, 'w') as g:
g.write(hash_md5)
log.info(f"md5 {hash_md5} stored in {output_filename}")
@classmethod
def ns_dump_description(cls):
return {'filter_path': "The filter output folder", **super().ns_dump_description()}
class SetupFitConfig(Config):
"""Specialization for yaml parsing"""
@classmethod
def from_yaml(cls, o, *args, **kwargs):
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', yaml.error.MantissaNoDotYAML1_1Warning)
# We need to specify the older version 1.1 to support the
# older configuration files, which liked to use on/off for
# booleans.
# The floating point parsing yields warnings everywhere, which
# we suppress.
file_content = yaml.safe_load(o, version='1.1')
except yaml.error.YAMLError as e:
raise ConfigError(f"Failed to parse yaml file: {e}")
if not isinstance(file_content, dict):
raise ConfigError(
f"Expecting input runcard to be a mapping, " f"not '{type(file_content)}'."
)
if file_content.get('closuretest') is not None:
filter_action = 'datacuts::closuretest::theory::fitting filter'
check_n3fit_action = 'datacuts::theory::closuretest::fitting n3fit_checks_action'
else:
filter_action = 'datacuts::theory::fitting filter'
check_n3fit_action = 'datacuts::theory::fitting n3fit_checks_action'
SETUPFIT_FIXED_CONFIG['actions_'] += [check_n3fit_action, filter_action]
if file_content.get('theorycovmatconfig') is not None:
SETUPFIT_FIXED_CONFIG['actions_'].append(
'datacuts::theory::theorycovmatconfig nnfit_theory_covmat'
)
if file_content.get('fiatlux') is not None:
SETUPFIT_FIXED_CONFIG['actions_'].append('fiatlux check_luxset')
if file_content.get('fiatlux')["additional_errors"]:
SETUPFIT_FIXED_CONFIG['actions_'].append('fiatlux check_additional_errors')
for k, v in SETUPFIT_DEFAULTS.items():
file_content.setdefault(k, v)
file_content.update(SETUPFIT_FIXED_CONFIG)
return cls(file_content, *args, **kwargs)
class SetupFitApp(App):
"""The class which parsers and perform the filtering"""
environment_class = SetupFitEnvironment
config_class = SetupFitConfig
def __init__(self):
super(SetupFitApp, self).__init__(name='setup-fit', providers=SETUPFIT_PROVIDERS)
@property
def METHOD_NAME(self):
parser = super().METHOD_NAME
parser.add_argument(
'-o', '--output', help="Output folder and name of the fit", default=None
)
return parser
def get_commandline_arguments(self, cmdline=None):
args = super().get_commandline_arguments(cmdline)
if args['output'] is None:
args['output'] = pathlib.Path(args['config_yml']).stem
return args
def run(self):
try:
# set folder output name
self.environment.config_yml = pathlib.Path(self.args['config_yml']).absolute()
# proceed with default run
super().run()
# if succeeded print md5
self.environment.save_md5()
except SetupFitError as e:
log.error(f"Error in setup-fit:\n{e}")
sys.exit(1)
except Exception as e:
log.critical(f"Bug in setup-fit ocurred. Please report it.")
print(colors.color_exception(e.__class__, e, e.__traceback__), file=sys.stderr)
sys.exit(1)
def main():
a = SetupFitApp()
a.main()
| null |
5,376 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Document Server.
# Copyright (C) 2016, 2017 CERN.
#
# CERN Document Server is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Document Server is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Document Server; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CDS FFmpeg tests."""
from __future__ import absolute_import
import shutil
import tempfile
from os import listdir
from os.path import dirname, isfile, join
import pytest
from cds.modules.ffmpeg import ff_frames, ff_probe, ff_probe_all
from cds.modules.ffmpeg.ffmpeg import _refactoring_metadata
from cds.modules.ffmpeg.errors import (FrameExtractionExecutionError,
FrameExtractionInvalidArguments,
MetadataExtractionExecutionError)
def test_error_report(datadir):
"""Test FFmpeg error reporting."""
not_found = 'invalid_filename: No such file or directory'
is_dir = 'Is a directory'
with pytest.raises(MetadataExtractionExecutionError) as e:
ff_probe('invalid_filename', 'width')
assert not_found in repr(e.value)
assert not_found in e.value.error_message
with pytest.raises(MetadataExtractionExecutionError) as e:
ff_probe(datadir, 'width')
assert is_dir in repr(e.value)
assert is_dir in e.value.error_message
with pytest.raises(FrameExtractionExecutionError) as e:
ff_frames('invalid_filename', 10, 20, 2, 100, '')
assert not_found in repr(e.value)
assert not_found in e.value.error_message
def METHOD_NAME(video):
"""Test ff_probe wrapper."""
expected_info = dict(
codec_type=b'video',
codec_name=b'h264',
codec_long_name=b'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10',
duration=60.095,
width=640,
height=360,
bit_rate=612177,
invalid=b'',
)
def check_metadata(field_name, convertor=lambda x: x, e=None):
expected = expected_info[field_name]
actual = convertor(ff_probe(video, field_name))
if e is None:
assert expected == actual
else:
assert expected - e < actual < expected + e
check_metadata('codec_type')
check_metadata('codec_name')
check_metadata('codec_long_name')
check_metadata('invalid')
check_metadata('width', convertor=int)
check_metadata('height', convertor=int)
check_metadata('bit_rate', convertor=int)
check_metadata('duration', convertor=float, e=0.2)
@pytest.mark.parametrize('start, end, step, error', [
(5, 95, 10, None), # CDS use-case
(4, 88, 12, None),
(1, 89, 1, None),
(90, 98, 2, None),
(0, 100, 1, FrameExtractionInvalidArguments),
(5, 10, 2, FrameExtractionInvalidArguments),
])
def test_frames(video_with_small, start, end, step, error):
"""Test frame extraction."""
frame_indices = []
tmp = tempfile.mkdtemp(dir=dirname(__file__))
# Convert percentages to values
duration = float(ff_probe(video_with_small, 'duration'))
time_step = duration * step / 100
start_time = duration * start / 100
end_time = (duration * end / 100) + 0.01
arguments = dict(
input_file=video_with_small,
start=start_time,
end=end_time,
step=time_step,
duration=duration,
output=join(tmp, 'img{0:02d}.jpg'),
progress_callback=lambda i: frame_indices.append(i))
# Extract frames
if error:
with pytest.raises(error):
ff_frames(**arguments)
else:
ff_frames(**arguments)
# Check that progress updates are complete
expected_file_no = int(((end - start) / step) + 1)
assert frame_indices == list(range(1, expected_file_no + 1))
# Check number of generated files
file_no = len([f for f in listdir(tmp) if isfile(join(tmp, f))])
assert file_no == expected_file_no
shutil.rmtree(tmp)
def test_refactoring_metadata(demo_ffmpeg_metadata):
"""Test refactoring metadata."""
metadata = _refactoring_metadata(demo_ffmpeg_metadata)
result = metadata['streams'][0]
assert result['description'] == 'This video is about Quadrupole'
assert result['title'] == 'Quadrupole'
assert result['keywords'] == [
'21-07-16',
'cds',
'timelapseSM18',
'magnet on SM18',
'2 mqxfs quadrupole coils: winding completed and waiting for heat '
'treatment'
]
assert result['creation_time'] == '2017-03-23T13:25:02.000000Z'
# test empty
metadata = _refactoring_metadata({})
assert metadata == {}
# test partial metadata
metadata = _refactoring_metadata({
'format': {'tags': {'title': 'test'}},
'streams': [{}],
})
result = metadata['streams'][0]
assert result['title'] == 'test'
for key in ['description', 'keywords', 'creation_time']:
assert key not in result
def test_ffprobe_all(online_video):
"""Test ff_probe_all wrapper."""
information = ff_probe_all(online_video)
assert 'streams' in information
video_stream = information['streams'][0]
stream_keys = ['index', 'tags', 'bit_rate', 'codec_type', 'codec_name',
'codec_long_name', 'start_time', 'duration']
assert all([key in video_stream for key in stream_keys])
assert 'format' in information
format_keys = ['filename', 'nb_streams', 'format_name', 'format_long_name',
'start_time', 'duration', 'size', 'bit_rate', 'tags']
assert all([key in information['format'] for key in format_keys])
| null |
5,377 |
import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def METHOD_NAME(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self.METHOD_NAME(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| null |
5,378 |
from __future__ import annotations
import numpy as np
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dxtbx.serialize import load
from scitbx import matrix
from dials.algorithms import shoebox
from dials.algorithms.profile_model.gaussian_rs import Model
from dials.array_family import flex
def predict_reflections(sequence, crystal):
# Get models from the sequence
beam = sequence.get_beam()
detector = sequence.get_detector()
gonio = sequence.get_goniometer()
scan = sequence.get_scan()
sigma_b = beam.get_sigma_divergence(deg=True)
sigma_m = crystal.get_mosaicity(deg=True)
exlist = ExperimentList()
exlist.append(
Experiment(
imageset=sequence,
beam=beam,
detector=detector,
goniometer=gonio,
scan=scan,
crystal=crystal,
profile=Model(None, 3, sigma_b, sigma_m, deg=True),
)
)
predicted = flex.reflection_table.from_predictions(exlist[0])
predicted["id"] = flex.int(len(predicted), 0)
predicted.compute_bbox(exlist)
# Find overlapping reflections
overlaps = shoebox.find_overlapping(predicted["bbox"])
# Return the reflections and overlaps
return predicted, overlaps
def test(dials_data):
# Load the sequence and crystal
sequence = load.imageset(
dials_data("centroid_test_data", pathlib=True) / "sweep.json"
)
crystal = load.crystal(
str(dials_data("centroid_test_data", pathlib=True) / "crystal.json")
)
# Get models from the sequence
detector = sequence.get_detector()
# Get the reflections and overlaps
reflections, adjacency_list = predict_reflections(sequence, crystal)
reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"])
reflections["shoebox"].allocate_with_value(shoebox.MaskCode.Valid)
# If the adjacency list is given, then create the reflection mask
assert len(detector) == 1
image_size = detector[0].get_image_size()
shoeboxes = reflections["shoebox"]
coords = reflections["xyzcal.px"]
shoebox_masker = shoebox.MaskOverlapping()
shoebox_masker(shoeboxes, coords, adjacency_list)
# Loop through all edges
overlapping = []
for e in adjacency_list.edges():
v1, v2 = adjacency_list.source(e), adjacency_list.target(e)
overlapping.append(v1)
overlapping.append(v2)
# Ensure elements are unique
overlapping = set(overlapping)
# Ensure we have some overlaps
assert len(overlapping) > 0
# Get all non-overlapping reflections
all_r = set(range(len(reflections)))
non_overlapping = all_r.difference(overlapping)
# Run the tests
tst_non_overlapping(reflections, non_overlapping, detector[0].get_image_size())
tst_overlapping(reflections, overlapping, adjacency_list, image_size)
def tst_non_overlapping(reflections, non_overlapping, image_size):
"""Ensure non-overlapping reflections have all their values 1."""
# Check that all elements in non_overlapping masks are 1
shoeboxes = reflections["shoebox"]
for i in non_overlapping:
mask = shoeboxes[i].mask
assert mask.all_eq(shoebox.MaskCode.Valid)
def tst_overlapping(reflections, overlapping, adjacency_list, image_size):
"""Ensure masks for overlapping reflections are set properly."""
# Loop through all overlaps
shoeboxes = reflections["shoebox"]
coord = reflections["xyzcal.px"]
for i in overlapping:
r1 = shoeboxes[i]
bbox_1 = r1.bbox
r1_coord = matrix.col(coord[i])
# Create a mask that we expect
r1_size = (bbox_1[5] - bbox_1[4], bbox_1[3] - bbox_1[2], bbox_1[1] - bbox_1[0])
expected_mask = np.zeros(shape=r1_size, dtype=np.int32)
expected_mask[:, :, :] = shoebox.MaskCode.Valid
# Loop through all reflections which this reflection overlaps
for j in adjacency_list.adjacent_vertices(i):
r2 = shoeboxes[j]
bbox_2 = r2.bbox
r2_coord = matrix.col(coord[j])
# Get bounding box of intersection
bbox_3 = (
max(bbox_1[0], bbox_2[0]),
min(bbox_1[1], bbox_2[1]),
max(bbox_1[2], bbox_2[2]),
min(bbox_1[3], bbox_2[3]),
max(bbox_1[4], bbox_2[4]),
min(bbox_1[5], bbox_2[5]),
)
# Check intersection is valid
assert bbox_3[0] < bbox_3[1]
assert bbox_3[2] < bbox_3[3]
assert bbox_3[4] < bbox_3[5]
# Get the coordinates are all mask values
mask_coord = []
for k in range(bbox_3[4], bbox_3[5]):
for j in range(bbox_3[2], bbox_3[3]):
for i in range(bbox_3[0], bbox_3[1]):
mask_coord.append(matrix.col((i + 0.5, j + 0.5, k + 0.5)))
def METHOD_NAME(a, m):
return np.array([(a - b).length() for b in m])
# Find the indices in the intersection area where r2 is closer to
# the point than r1
ind = np.where(METHOD_NAME(r1_coord, mask_coord) > METHOD_NAME(r2_coord, mask_coord))[0]
# Set the mask values for r1 where r2 is closer to 0
k0, k1 = bbox_3[4] - bbox_1[4], bbox_3[5] - bbox_1[4]
j0, j1 = bbox_3[2] - bbox_1[2], bbox_3[3] - bbox_1[2]
i0, i1 = bbox_3[0] - bbox_1[0], bbox_3[1] - bbox_1[0]
intersect_mask = expected_mask[k0:k1, j0:j1, i0:i1]
intersect_mask_1d = intersect_mask.reshape(-1)
intersect_mask_1d[ind] = 0
intersect_mask[:, :] = intersect_mask_1d.reshape(intersect_mask.shape)
expected_mask[k0:k1, j0:j1, i0:i1] = intersect_mask
# Check the masks are the same
calculated_mask = r1.mask.as_numpy_array()
assert np.all(calculated_mask == expected_mask)
| null |
5,379 |
from copy import deepcopy
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import isolate_apps
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.api import add_plugin
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.project.extensionapp.models import MyPageExtension
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import ArticlePluginModel
from cms.utils.check import FileOutputWrapper, FileSectionWrapper, check
class TestOutput(FileOutputWrapper):
def __init__(self):
super().__init__(None, None)
self.section_wrapper = TestSectionOutput
def write(self, message):
pass
def write_stderr(self, message):
pass
class TestSectionOutput(FileSectionWrapper):
def write(self, message):
pass
def write_stderr(self, message):
pass
class CheckAssertMixin:
def assertCheck(self, successful, **assertions):
"""
asserts that checks are successful or not
Assertions is a mapping of numbers to check (eg successes=5)
"""
output = TestOutput()
check(output)
self.assertEqual(output.successful, successful)
for key, value in assertions.items():
self.assertEqual(getattr(output, key), value, f"{value} {key} expected, got {getattr(output, key)}")
class CheckTests(CheckAssertMixin, TestCase):
def test_test_confs(self):
self.assertCheck(True, errors=0, warnings=0)
def test_no_sekizai(self):
apps = list(settings.INSTALLED_APPS)
apps.remove('sekizai')
with self.settings(INSTALLED_APPS=apps):
self.assertCheck(False, errors=1)
def test_no_cms_settings_context_processor(self):
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['OPTIONS']['context_processors'] = ['sekizai.context_processors.sekizai']
with self.settings(**override):
self.assertCheck(False, errors=1)
def test_no_sekizai_template_context_processor(self):
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['OPTIONS']['context_processors'] = ['cms.context_processors.cms_settings']
with self.settings(**override):
self.assertCheck(False, errors=2)
def test_old_style_i18n_settings(self):
with self.settings(CMS_LANGUAGES=[('en', 'English')]):
self.assertRaises(ImproperlyConfigured, self.assertCheck, True, warnings=1, errors=0)
def METHOD_NAME(self):
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
self.assertCheck(True, warnings=0, errors=0)
with self.settings(MIDDLEWARE=MIDDLEWARE):
self.assertCheck(False, warnings=0, errors=2)
def test_copy_relations_fk_check(self):
"""
this is ugly, feel free to come up with a better test
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = ArticlePluginModel.copy_relations
del ArticlePluginModel.copy_relations
self.assertCheck(True, warnings=2, errors=0)
ArticlePluginModel.copy_relations = copy_rel
def test_copy_relations_on_page_extension(self):
"""
Agreed. It is ugly, but it works.
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = MyPageExtension.copy_relations
del MyPageExtension.copy_relations
self.assertCheck(True, warnings=1, errors=0)
MyPageExtension.copy_relations = copy_rel
def test_non_numeric_site_id(self):
self.assertCheck(True, warnings=0, errors=0)
with self.settings(SITE_ID='broken'):
self.assertCheck(False, warnings=0, errors=1)
@isolate_apps("test_app")
def test_placeholder_field(self):
from django.contrib import admin
from django.db import models
from cms.models.fields import PlaceholderField
class ModelTest(models.Model):
field_a = PlaceholderField(slotname="test")
admin.site.register(ModelTest)
self.assertCheck(False, warnings=0, errors=1)
admin.site.unregister(ModelTest)
class CheckWithDatabaseTests(CheckAssertMixin, TestCase):
def test_check_plugin_instances(self):
self.assertCheck(True, warnings=0, errors=0 )
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", external_link="https://www.django-cms.org")
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
self.assertCheck(False, warnings=0, errors=2)
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
self.assertCheck(False, warnings=0, errors=3)
| null |
5,380 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
# std imports
# 3rd party imports
import mantid.api
from qtpy.QtCore import Qt, QTimer, Signal
from qtpy.QtWidgets import QHBoxLayout, QSplitter, QWidget
# local imports
from mantidqt.widgets.sliceviewer.views.dataview import SliceViewerDataView
from mantidqt.widgets.sliceviewer.peaksviewer.workspaceselection import (
PeaksWorkspaceSelectorModel,
PeaksWorkspaceSelectorPresenter,
PeaksWorkspaceSelectorView,
)
from mantidqt.widgets.sliceviewer.peaksviewer.view import PeaksViewerCollectionView
from mantidqt.widgets.sliceviewer.peaksviewer.representation.painter import MplPainter
# Constants
from mantidqt.widgets.observers.observing_view import ObservingView
class SliceViewerView(QWidget, ObservingView):
"""Combines the data view for the slice viewer with the optional peaks viewer."""
close_signal = Signal()
rename_signal = Signal(str)
def __init__(self, presenter, dims_info, can_normalise, parent=None, window_flags=Qt.Window, conf=None):
super().__init__(parent)
self.presenter = presenter
self.setWindowFlags(window_flags)
self.setAttribute(Qt.WA_DeleteOnClose, True)
self._splitter = QSplitter(self)
self._data_view = SliceViewerDataView(presenter, dims_info, can_normalise, self, conf)
self._splitter.addWidget(self._data_view)
self._splitter.setCollapsible(0, False)
self._splitter.splitterMoved.connect(self._data_view.on_resize)
# peaks viewer off by default
self._peaks_view = None
# config the splitter appearance
splitterStyleStr = """QSplitter::handle{
border: 1px solid gray;
min-height: 10px;
max-height: 20px;
}"""
self._splitter.setStyleSheet(splitterStyleStr)
self._splitter.setHandleWidth(1)
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._splitter)
self.setLayout(layout)
self.refresh_queued = False
# connect up additional peaks signals
self.METHOD_NAME.mpl_toolbar.peaksOverlayClicked.connect(self.peaks_overlay_clicked)
self.METHOD_NAME.mpl_toolbar.nonAlignedCutsClicked.connect(self.non_axis_aligned_cuts_clicked)
self.close_signal.connect(self._run_close)
self.rename_signal.connect(self._on_rename)
@property
def METHOD_NAME(self):
return self._data_view
@property
def dimensions(self):
return self._data_view.dimensions
@property
def peaks_view(self) -> PeaksViewerCollectionView:
"""Lazily instantiates PeaksViewer and returns it"""
if self._peaks_view is None:
self._peaks_view = PeaksViewerCollectionView(MplPainter(self.METHOD_NAME), self.presenter)
self.add_widget_to_splitter(self._peaks_view)
return self._peaks_view
def add_widget_to_splitter(self, widget):
self._splitter.addWidget(widget)
widget_index = self._splitter.indexOf(widget)
self._splitter.setCollapsible(widget_index, False)
self._data_view.on_resize()
def peaks_overlay_clicked(self):
"""Peaks overlay button has been toggled"""
self.presenter.overlay_peaks_workspaces()
def non_axis_aligned_cuts_clicked(self, state):
self.presenter.non_axis_aligned_cut(state)
def query_peaks_to_overlay(self, current_overlayed_names):
"""Display a dialog to the user to ask which peaks to overlay
:param current_overlayed_names: A list of names that are currently overlayed
:returns: A list of workspace names to overlay on the display
"""
model = PeaksWorkspaceSelectorModel(mantid.api.AnalysisDataService.Instance(), checked_names=current_overlayed_names)
view = PeaksWorkspaceSelectorView(self)
presenter = PeaksWorkspaceSelectorPresenter(view, model)
return presenter.select_peaks_workspaces()
def set_peaks_viewer_visible(self, on):
"""
Set the visibility of the PeaksViewer.
:param on: If True make the view visible, else make it invisible
:return: The PeaksViewerCollectionView
"""
self.peaks_view.set_visible(on)
def delayed_refresh(self):
"""Post an event to the event loop that causes the view to
update on the next cycle"""
if not self.refresh_queued:
self.refresh_queued = True
QTimer.singleShot(0, self.presenter.refresh_view)
def close(self):
self.presenter.notify_close()
super().close()
def _run_close(self):
# handles the signal emitted from ObservingView.emit_close
self.close()
def _on_rename(self, new_title: str):
self.setWindowTitle(new_title)
| null |
5,381 |
from sympy.core.numbers import Float
from sympy.core.symbol import Dummy
from sympy.utilities.lambdify import lambdify
import math
def is_valid(x):
"""Check if a floating point number is valid"""
if x is None:
return False
if isinstance(x, complex):
return False
return not math.isinf(x) and not math.isnan(x)
def rescale(y, W, H, mi, ma):
"""Rescale the given array `y` to fit into the integer values
between `0` and `H-1` for the values between ``mi`` and ``ma``.
"""
y_new = []
norm = ma - mi
offset = (ma + mi) / 2
for x in range(W):
if is_valid(y[x]):
normalized = (y[x] - offset) / norm
if not is_valid(normalized):
y_new.append(None)
else:
rescaled = Float((normalized*H + H/2) * (H-1)/H).round()
rescaled = int(rescaled)
y_new.append(rescaled)
else:
y_new.append(None)
return y_new
def METHOD_NAME(start, stop, num):
return [start + (stop - start) * x / (num-1) for x in range(num)]
def textplot_str(expr, a, b, W=55, H=21):
"""Generator for the lines of the plot"""
free = expr.free_symbols
if len(free) > 1:
raise ValueError(
"The expression must have a single variable. (Got {})"
.format(free))
x = free.pop() if free else Dummy()
f = lambdify([x], expr)
a = float(a)
b = float(b)
# Calculate function values
x = METHOD_NAME(a, b, W)
y = []
for val in x:
try:
y.append(f(val))
# Not sure what exceptions to catch here or why...
except (ValueError, TypeError, ZeroDivisionError):
y.append(None)
# Normalize height to screen space
y_valid = list(filter(is_valid, y))
if y_valid:
ma = max(y_valid)
mi = min(y_valid)
if ma == mi:
if ma:
mi, ma = sorted([0, 2*ma])
else:
mi, ma = -1, 1
else:
mi, ma = -1, 1
y_range = ma - mi
precision = math.floor(math.log(y_range, 10)) - 1
precision *= -1
mi = round(mi, precision)
ma = round(ma, precision)
y = rescale(y, W, H, mi, ma)
y_bins = METHOD_NAME(mi, ma, H)
# Draw plot
margin = 7
for h in range(H - 1, -1, -1):
s = [' '] * W
for i in range(W):
if y[i] == h:
if (i == 0 or y[i - 1] == h - 1) and (i == W - 1 or y[i + 1] == h + 1):
s[i] = '/'
elif (i == 0 or y[i - 1] == h + 1) and (i == W - 1 or y[i + 1] == h - 1):
s[i] = '\\'
else:
s[i] = '.'
if h == 0:
for i in range(W):
s[i] = '_'
# Print y values
if h in (0, H//2, H - 1):
prefix = ("%g" % y_bins[h]).rjust(margin)[:margin]
else:
prefix = " "*margin
s = "".join(s)
if h == H//2:
s = s.replace(" ", "-")
yield prefix + " |" + s
# Print x values
bottom = " " * (margin + 2)
bottom += ("%g" % x[0]).ljust(W//2)
if W % 2 == 1:
bottom += ("%g" % x[W//2]).ljust(W//2)
else:
bottom += ("%g" % x[W//2]).ljust(W//2-1)
bottom += "%g" % x[-1]
yield bottom
def textplot(expr, a, b, W=55, H=21):
r"""
Print a crude ASCII art plot of the SymPy expression 'expr' (which
should contain a single symbol, e.g. x or something else) over the
interval [a, b].
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.plotting import textplot
>>> t = Symbol('t')
>>> textplot(sin(t)*t, 0, 15)
14 | ...
| .
| .
| .
| .
| ...
| / . .
| /
| / .
| . . .
1.5 |----.......--------------------------------------------
|.... \ . .
| \ / .
| .. / .
| \ / .
| ....
| .
| . .
|
| . .
-11 |_______________________________________________________
0 7.5 15
"""
for line in textplot_str(expr, a, b, W, H):
print(line)
| null |
5,382 |
import asyncio
import os
from functools import partial
from typing import Callable, Coroutine, List, Union
from git.objects.commit import Commit
from opal_common.logger import logger
OnNewPolicyCallback = Callable[[Commit, Commit], Coroutine]
OnPolicyFailureCallback = Callable[[Exception], Coroutine]
class BasePolicySource:
"""Base class to support git and api policy source.
Args:
remote_source_url(str): the base address to request the policy from
local_clone_path(str): path for the local git to manage policies
polling_interval(int): how many seconds need to wait between polling
"""
def __init__(
self,
remote_source_url: str,
local_clone_path: str,
polling_interval: int = 0,
):
self._on_failure_callbacks: List[OnNewPolicyCallback] = []
self._on_new_policy_callbacks: List[OnPolicyFailureCallback] = []
self._polling_interval = polling_interval
self._polling_task = None
self.remote_source_url = remote_source_url
self.local_clone_path = os.path.expanduser(local_clone_path)
def add_on_new_policy_callback(self, callback: OnNewPolicyCallback):
"""Register a callback that will be called when new policy are detected
on the monitored repo (after a pull)."""
self._on_new_policy_callbacks.append(callback)
def add_on_failure_callback(self, callback: OnPolicyFailureCallback):
"""Register a callback that will be called when failure occurred."""
self._on_failure_callbacks.append(callback)
async def get_initial_policy_state_from_remote(self):
"""init remote data to local repo."""
raise NotImplementedError()
async def check_for_changes(self):
"""trigger check for policy change."""
raise NotImplementedError()
async def run(self):
"""potentially starts the polling task."""
await self.get_initial_policy_state_from_remote()
if self._polling_interval > 0:
logger.info(
"Launching polling task, interval: {interval} seconds",
interval=self._polling_interval,
)
self._start_polling_task(self.check_for_changes)
else:
logger.info("Polling task is off")
async def stop(self):
return await self._stop_polling_task()
def _start_polling_task(self, polling_task):
if self._polling_task is None and self._polling_interval > 0:
self._polling_task = asyncio.create_task(self._do_polling(polling_task))
async def _do_polling(self, polling_task):
"""optional task to periodically check the remote for changes (git pull
and compare hash)."""
while True:
try:
await polling_task()
except Exception as ex:
logger.error(
"Error occurred during polling task {task}: {err}",
task=polling_task.__name__,
err=ex,
)
await asyncio.sleep(self._polling_interval)
async def _stop_polling_task(self):
if self._polling_task is not None:
self._polling_task.cancel()
try:
await self._polling_task
except asyncio.CancelledError:
pass
async def _on_new_policy(self, old: Commit, new: Commit):
"""triggers callbacks registered with on_new_policy()."""
await self._run_callbacks(self._on_new_policy_callbacks, old, new)
async def _on_failed(self, exc: Exception):
"""will be triggered if a failure occurred.
triggers callbacks registered with on_git_failed().
"""
await self._run_callbacks(self._on_failure_callbacks, exc)
async def _run_callbacks(self, handlers, *args, **kwargs):
"""triggers a list of callbacks."""
await asyncio.gather(*(callback(*args, **kwargs) for callback in handlers))
async def METHOD_NAME(self, exc: Exception):
"""will be triggered if a git failure occurred (i.e: repo does not
exist, can't clone, etc).
triggers callbacks registered with on_git_failed().
"""
await self._run_callbacks(self._on_failure_callbacks, exc)
| null |
5,383 |
import errno
import os
import sys
import textwrap
import unittest
import subprocess
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok
class TestTool(unittest.TestCase):
data = """
[["blorpie"],[ "whoops" ] , [
],\t"d-shtaeou",\r"d-nthiouh",
"i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field"
:"yes"} ]
"""
expect_without_sort_keys = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]
""")
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"morefield": false,
"field": "yes"
}
]
""")
jsonlines_raw = textwrap.dedent("""\
{"ingredients":["frog", "water", "chocolate", "glucose"]}
{"ingredients":["chocolate","steel bolts"]}
""")
jsonlines_expect = textwrap.dedent("""\
{
"ingredients": [
"frog",
"water",
"chocolate",
"glucose"
]
}
{
"ingredients": [
"chocolate",
"steel bolts"
]
}
""")
def test_stdin_stdout(self):
args = sys.executable, '-m', 'json.tool'
process = subprocess.run(args, input=self.data, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, self.expect)
self.assertEqual(process.stderr, '')
def METHOD_NAME(self, data=None):
infile = os_helper.TESTFN
with open(infile, "w", encoding="utf-8") as fp:
self.addCleanup(os.remove, infile)
fp.write(data or self.data)
return infile
def test_infile_stdout(self):
infile = self.METHOD_NAME()
rc, out, err = assert_python_ok('-m', 'json.tool', infile)
self.assertEqual(rc, 0)
self.assertEqual(out.splitlines(), self.expect.encode().splitlines())
self.assertEqual(err, b'')
def test_non_ascii_infile(self):
data = '{"msg": "\u3053\u3093\u306b\u3061\u306f"}'
expect = textwrap.dedent('''\
{
"msg": "\\u3053\\u3093\\u306b\\u3061\\u306f"
}
''').encode()
infile = self.METHOD_NAME(data)
rc, out, err = assert_python_ok('-m', 'json.tool', infile)
self.assertEqual(rc, 0)
self.assertEqual(out.splitlines(), expect.splitlines())
self.assertEqual(err, b'')
def test_infile_outfile(self):
infile = self.METHOD_NAME()
outfile = os_helper.TESTFN + '.out'
rc, out, err = assert_python_ok('-m', 'json.tool', infile, outfile)
self.addCleanup(os.remove, outfile)
with open(outfile, "r", encoding="utf-8") as fp:
self.assertEqual(fp.read(), self.expect)
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_writing_in_place(self):
infile = self.METHOD_NAME()
rc, out, err = assert_python_ok('-m', 'json.tool', infile, infile)
with open(infile, "r", encoding="utf-8") as fp:
self.assertEqual(fp.read(), self.expect)
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_jsonlines(self):
args = sys.executable, '-m', 'json.tool', '--json-lines'
process = subprocess.run(args, input=self.jsonlines_raw, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, self.jsonlines_expect)
self.assertEqual(process.stderr, '')
def test_help_flag(self):
rc, out, err = assert_python_ok('-m', 'json.tool', '-h')
self.assertEqual(rc, 0)
self.assertTrue(out.startswith(b'usage: '))
self.assertEqual(err, b'')
def test_sort_keys_flag(self):
infile = self.METHOD_NAME()
rc, out, err = assert_python_ok('-m', 'json.tool', '--sort-keys', infile)
self.assertEqual(rc, 0)
self.assertEqual(out.splitlines(),
self.expect_without_sort_keys.encode().splitlines())
self.assertEqual(err, b'')
def test_indent(self):
input_ = '[1, 2]'
expect = textwrap.dedent('''\
[
1,
2
]
''')
args = sys.executable, '-m', 'json.tool', '--indent', '2'
process = subprocess.run(args, input=input_, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, expect)
self.assertEqual(process.stderr, '')
def test_no_indent(self):
input_ = '[1,\n2]'
expect = '[1, 2]\n'
args = sys.executable, '-m', 'json.tool', '--no-indent'
process = subprocess.run(args, input=input_, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, expect)
self.assertEqual(process.stderr, '')
def test_tab(self):
input_ = '[1, 2]'
expect = '[\n\t1,\n\t2\n]\n'
args = sys.executable, '-m', 'json.tool', '--tab'
process = subprocess.run(args, input=input_, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, expect)
self.assertEqual(process.stderr, '')
def test_compact(self):
input_ = '[ 1 ,\n 2]'
expect = '[1,2]\n'
args = sys.executable, '-m', 'json.tool', '--compact'
process = subprocess.run(args, input=input_, capture_output=True, text=True, check=True)
self.assertEqual(process.stdout, expect)
self.assertEqual(process.stderr, '')
def test_no_ensure_ascii_flag(self):
infile = self.METHOD_NAME('{"key":"💩"}')
outfile = os_helper.TESTFN + '.out'
self.addCleanup(os.remove, outfile)
assert_python_ok('-m', 'json.tool', '--no-ensure-ascii', infile, outfile)
with open(outfile, "rb") as f:
lines = f.read().splitlines()
# asserting utf-8 encoded output file
expected = [b'{', b' "key": "\xf0\x9f\x92\xa9"', b"}"]
self.assertEqual(lines, expected)
def test_ensure_ascii_default(self):
infile = self.METHOD_NAME('{"key":"💩"}')
outfile = os_helper.TESTFN + '.out'
self.addCleanup(os.remove, outfile)
assert_python_ok('-m', 'json.tool', infile, outfile)
with open(outfile, "rb") as f:
lines = f.read().splitlines()
# asserting an ascii encoded output file
expected = [b'{', rb' "key": "\ud83d\udca9"', b"}"]
self.assertEqual(lines, expected)
@unittest.skipIf(sys.platform =="win32", "The test is failed with ValueError on Windows")
def test_broken_pipe_error(self):
cmd = [sys.executable, '-m', 'json.tool']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
# bpo-39828: Closing before json.tool attempts to write into stdout.
proc.stdout.close()
proc.communicate(b'"{}"')
self.assertEqual(proc.returncode, errno.EPIPE)
| null |
5,384 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import base64
import json
import re
import sys
import time
import urllib2
import urlparse
def read_task_file(args):
with open(args.file, 'r') as f:
contents = f.read()
# We don't use the parsed data, but we want to throw early if it's invalid
try:
json.loads(contents)
except Exception, e:
sys.stderr.write('Invalid JSON in task file "{0}": {1}\n'.format(args.file, repr(e)))
sys.exit(1)
return contents
def add_basic_auth_header(args, req):
if (args.user is not None):
basic_auth_encoded = base64.b64encode('%s:%s' % (args.user, args.password))
req.add_header("Authorization", "Basic %s" % basic_auth_encoded)
# Keep trying until timeout_at, maybe die then
def post_task(args, task_json, timeout_at):
try:
url = args.url.rstrip("/") + "/druid/indexer/v1/task"
req = urllib2.Request(url, task_json, {'Content-Type' : 'application/json'})
add_basic_auth_header(args, req)
timeleft = timeout_at - time.time()
response_timeout = min(max(timeleft, 5), 10)
response = urllib2.urlopen(req, None, response_timeout)
return response.read().rstrip()
except urllib2.URLError as e:
if isinstance(e, urllib2.HTTPError) and e.code >= 400 and e.code <= 500:
# 4xx (problem with the request) or 500 (something wrong on the server)
raise_friendly_error(e)
elif time.time() >= timeout_at:
# No futher retries
raise_friendly_error(e)
elif isinstance(e, urllib2.HTTPError) and e.code in [301, 302, 303, 305, 307] and \
e.info().getheader("Location") is not None:
# Set the new location in args.url so it can be used by await_task_completion and re-issue the request
location = urlparse.urlparse(e.info().getheader("Location"))
args.url = "{0}://{1}".format(location.scheme, location.netloc)
sys.stderr.write("Redirect response received, setting url to [{0}]\n".format(args.url))
return post_task(args, task_json, timeout_at)
else:
# If at first you don't succeed, try, try again!
sleep_time = 5
if not args.quiet:
extra = ''
if hasattr(e, 'read'):
extra = e.read().rstrip()
sys.stderr.write("Waiting up to {0}s for indexing service [{1}] to become available. [Got: {2} {3}]".format(max(sleep_time, int(timeout_at - time.time())), args.url, str(e), extra).rstrip())
sys.stderr.write("\n")
time.sleep(sleep_time)
return post_task(args, task_json, timeout_at)
# Keep trying until timeout_at, maybe die then
def METHOD_NAME(args, task_id, timeout_at):
while True:
url = args.url.rstrip("/") + "/druid/indexer/v1/task/{0}/status".format(task_id)
req = urllib2.Request(url)
add_basic_auth_header(args, req)
timeleft = timeout_at - time.time()
response_timeout = min(max(timeleft, 5), 10)
response = urllib2.urlopen(req, None, response_timeout)
response_obj = json.loads(response.read())
response_status_code = response_obj["status"]["statusCode"]
if response_status_code in ['SUCCESS', 'FAILED']:
return response_status_code
else:
if time.time() < timeout_at:
if not args.quiet:
sys.stderr.write("Task {0} still running...\n".format(task_id))
timeleft = timeout_at - time.time()
time.sleep(min(5, timeleft))
else:
raise Exception("Task {0} did not finish in time!".format(task_id))
def raise_friendly_error(e):
if isinstance(e, urllib2.HTTPError):
text = e.read().strip()
reresult = re.search(r'<pre>(.*?)</pre>', text, re.DOTALL)
if reresult:
text = reresult.group(1).strip()
raise Exception("HTTP Error {0}: {1}, check overlord log for more details.\n{2}".format(e.code, e.reason, text))
raise e
def await_load_completion(args, datasource, timeout_at):
while True:
url = args.coordinator_url.rstrip("/") + "/druid/coordinator/v1/loadstatus"
req = urllib2.Request(url)
add_basic_auth_header(args, req)
timeleft = timeout_at - time.time()
response_timeout = min(max(timeleft, 5), 10)
response = urllib2.urlopen(req, None, response_timeout)
response_obj = json.loads(response.read())
load_status = response_obj.get(datasource, 0.0)
if load_status >= 100.0:
sys.stderr.write("{0} loading complete! You may now query your data\n".format(datasource))
return
else:
if time.time() < timeout_at:
if not args.quiet:
sys.stderr.write("{0} is {1}% finished loading...\n".format(datasource, load_status))
timeleft = timeout_at - time.time()
time.sleep(min(5, timeleft))
else:
raise Exception("{0} was not loaded in time!".format(datasource))
def main():
parser = argparse.ArgumentParser(description='Post Druid indexing tasks.')
parser.add_argument('--url', '-u', metavar='url', type=str, default='http://localhost:8090/', help='Druid Overlord url')
parser.add_argument('--coordinator-url', type=str, default='http://localhost:8081/', help='Druid Coordinator url')
parser.add_argument('--file', '-f', type=str, required=True, help='Query JSON file')
parser.add_argument('--submit-timeout', type=int, default=120, help='Timeout (in seconds) for submitting tasks')
parser.add_argument('--complete-timeout', type=int, default=14400, help='Timeout (in seconds) for completing tasks')
parser.add_argument('--load-timeout', type=int, default=14400, help='Timeout (in seconds) for waiting for tasks to load')
parser.add_argument('--quiet', '-q', action='store_true', help='Suppress retryable errors')
parser.add_argument('--user', type=str, default=None, help='Basic auth username')
parser.add_argument('--password', type=str, default=None, help='Basic auth password')
args = parser.parse_args()
submit_timeout_at = time.time() + args.submit_timeout
complete_timeout_at = time.time() + args.complete_timeout
task_contents = read_task_file(args)
task_json = json.loads(task_contents)
if task_json['type'] == "compact":
datasource = task_json['dataSource']
else:
datasource = task_json["spec"]["dataSchema"]["dataSource"]
sys.stderr.write("Beginning indexing data for {0}\n".format(datasource))
task_id = json.loads(post_task(args, task_contents, submit_timeout_at))["task"]
sys.stderr.write('\033[1m' + "Task started: " + '\033[0m' + "{0}\n".format(task_id))
sys.stderr.write('\033[1m' + "Task log: " + '\033[0m' + "{0}/druid/indexer/v1/task/{1}/log\n".format(args.url.rstrip("/"),task_id))
sys.stderr.write('\033[1m' + "Task status: " + '\033[0m' + "{0}/druid/indexer/v1/task/{1}/status\n".format(args.url.rstrip("/"),task_id))
task_status = METHOD_NAME(args, task_id, complete_timeout_at)
sys.stderr.write("Task finished with status: {0}\n".format(task_status))
if task_status != 'SUCCESS':
sys.exit(1)
sys.stderr.write("Completed indexing data for {0}. Now loading indexed data onto the cluster...\n".format(datasource))
load_timeout_at = time.time() + args.load_timeout
await_load_completion(args, datasource, load_timeout_at)
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| null |
5,385 |
"""
pyexcel.core
~~~~~~~~~~~~~~~~~~~
A list of pyexcel signature functions
:copyright: (c) 2015-2022 by Onni Software Ltd.
:license: New BSD License
"""
import re
from pyexcel_io import manager
from pyexcel import constants
from pyexcel import docstrings as docs
from pyexcel.book import Book, to_book
from pyexcel.sheet import Sheet
from pyexcel._compact import OrderedDict, append_doc, zip_longest
from pyexcel.internal import core as sources
STARTS_WITH_DEST = "^dest_(.*)"
SAVE_AS_EXCEPTION = (
"This function does not accept parameters for "
+ "pyexce.Sheet. Please use pyexcel.save_as instead."
)
@append_doc(docs.GET_SHEET)
def get_sheet(**keywords):
"""
Get an instance of :class:`Sheet` from an excel source
"""
sheet_params = {}
for field in constants.VALID_SHEET_PARAMETERS:
if field in keywords:
sheet_params[field] = keywords.pop(field)
named_content = sources.get_sheet_stream(**keywords)
sheet = Sheet(named_content.payload, named_content.name, **sheet_params)
return sheet
@append_doc(docs.GET_BOOK)
def get_book(**keywords):
"""
Get an instance of :class:`Book` from an excel source
"""
book_stream = sources.get_book_stream(**keywords)
book = Book(
book_stream.to_dict(),
filename=book_stream.filename,
path=book_stream.path,
)
return book
@append_doc(docs.IGET_BOOK)
def iget_book(**keywords):
"""
Get an instance of :class:`BookStream` from an excel source
First use case is to get all sheet names without extracting
the sheets into memory.
"""
return sources.get_book_stream(on_demand=True, **keywords)
@append_doc(docs.SAVE_AS)
def save_as(**keywords):
"""
Save a sheet from a data source to another one
"""
dest_keywords, source_keywords = _split_keywords(**keywords)
sheet_params = {}
for field in constants.VALID_SHEET_PARAMETERS:
if field in source_keywords:
sheet_params[field] = source_keywords.pop(field)
sheet_stream = sources.get_sheet_stream(**source_keywords)
output_sheet_name = sheet_stream.name
if "sheet_name" in dest_keywords:
output_sheet_name = dest_keywords["sheet_name"]
sheet = Sheet(sheet_stream.payload, output_sheet_name, **sheet_params)
return sources.save_sheet(sheet, **dest_keywords)
@append_doc(docs.ISAVE_AS)
def isave_as(**keywords):
"""
Save a sheet from a data source to another one with less memory
It is similar to :meth:`pyexcel.save_as` except that it does
not accept parameters for :class:`pyexcel.Sheet`. And it read
when it writes.
"""
dest_keywords, source_keywords = _split_keywords(**keywords)
for field in constants.VALID_SHEET_PARAMETERS:
if field in source_keywords:
raise Exception(SAVE_AS_EXCEPTION)
sheet = sources.get_sheet_stream(on_demand=True, **source_keywords)
if "sheet_name" in dest_keywords:
sheet.name = dest_keywords["sheet_name"]
return sources.save_sheet(sheet, **dest_keywords)
@append_doc(docs.SAVE_BOOK_AS)
def save_book_as(**keywords):
"""
Save a book from a data source to another one
"""
dest_keywords, source_keywords = _split_keywords(**keywords)
book = sources.get_book_stream(**source_keywords)
book = to_book(book)
return sources.save_book(book, **dest_keywords)
@append_doc(docs.ISAVE_BOOK_AS)
def isave_book_as(**keywords):
"""
Save a book from a data source to another one
It is similar to :meth:`pyexcel.save_book_as` but it read
when it writes. This function provide some speedup but
the output data is not made uniform.
"""
dest_keywords, source_keywords = _split_keywords(**keywords)
book = sources.get_book_stream(on_demand=True, **source_keywords)
return sources.save_book(book, **dest_keywords)
@append_doc(docs.GET_ARRAY)
def get_array(**keywords):
"""
Obtain an array from an excel source
It accepts the same parameters as :meth:`~pyexcel.get_sheet`
but return an array instead.
"""
sheet = get_sheet(**keywords)
return sheet.to_array()
@append_doc(docs.GET_DICT)
def get_dict(name_columns_by_row=0, **keywords):
"""
Obtain a dictionary from an excel source
It accepts the same parameters as :meth:`~pyexcel.get_sheet`
but return a dictionary instead.
Specifically:
name_columns_by_row : specify a row to be a dictionary key.
It is default to 0 or first row.
If you would use a column index 0 instead, you should do::
get_dict(name_columns_by_row=-1, name_rows_by_column=0)
"""
sheet = get_sheet(name_columns_by_row=name_columns_by_row, **keywords)
return sheet.to_dict()
@append_doc(docs.GET_RECORDS)
def get_records(name_columns_by_row=0, **keywords):
"""
Obtain a list of records from an excel source
It accepts the same parameters as :meth:`~pyexcel.get_sheet`
but return a list of dictionary(records) instead.
Specifically:
name_columns_by_row : specify a row to be a dictionary key.
It is default to 0 or first row.
If you would use a column index 0 instead, you should do::
get_records(name_columns_by_row=-1, name_rows_by_column=0)
"""
sheet = get_sheet(name_columns_by_row=name_columns_by_row, **keywords)
return list(sheet.to_records())
@append_doc(docs.IGET_ARRAY)
def METHOD_NAME(**keywords):
"""
Obtain a generator of an two dimensional array from an excel source
It is similiar to :meth:`pyexcel.get_array` but it has less memory
footprint.
"""
sheet_stream = sources.get_sheet_stream(on_demand=True, **keywords)
return sheet_stream.payload
@append_doc(docs.IGET_RECORDS)
def iget_records(custom_headers=None, **keywords):
"""
Obtain a generator of a list of records from an excel source
It is similiar to :meth:`pyexcel.get_records` but it has less memory
footprint but requires the headers to be in the first row. And the
data matrix should be of equal length. It should consume less memory
and should work well with large files.
"""
sheet_stream = sources.get_sheet_stream(on_demand=True, **keywords)
headers = None
for row_index, row in enumerate(sheet_stream.payload):
if row_index == 0:
headers = row
else:
if custom_headers:
# custom order
tmp_dict = dict(
zip_longest(headers, row, fillvalue=constants.DEFAULT_NA)
)
ordered_dict = OrderedDict()
for name in custom_headers:
ordered_dict[name] = tmp_dict[name]
yield ordered_dict
else:
# default order
yield OrderedDict(
zip_longest(headers, row, fillvalue=constants.DEFAULT_NA)
)
@append_doc(docs.GET_BOOK_DICT)
def get_book_dict(**keywords):
"""
Obtain a dictionary of two dimensional arrays
It accepts the same parameters as :meth:`~pyexcel.get_book`
but return a dictionary instead.
"""
book = get_book(**keywords)
return book.to_dict()
def get_io_type(file_type):
"""
Return the io stream types, string or bytes
"""
io_type = manager.get_io_type(file_type)
if io_type is None:
io_type = "string"
return io_type
def _split_keywords(**keywords):
dest_keywords = {}
source_keywords = {}
for key, value in keywords.items():
result = re.match(STARTS_WITH_DEST, key)
if result:
parameter = result.group(1)
dest_keywords[parameter] = value
else:
source_keywords[key] = value
return dest_keywords, source_keywords
| null |
5,386 |
import re
import logging
log = logging.getLogger("bbot.helpers.cloud.provider")
class BaseCloudProvider:
domains = []
regexes = {}
def __init__(self, parent_helper):
self.parent_helper = parent_helper
self.name = str(self.__class__.__name__).lower()
self.dummy_module = self.parent_helper._make_dummy_module(f"{self.name}_cloud", _type="scan")
self.bucket_name_regex = re.compile("^" + self.bucket_name_regex + "$", re.I)
self.signatures = {}
self.domain_regexes = []
for domain in self.domains:
self.domain_regexes.append(re.compile(r"^(?:[\w\-]+\.)*" + rf"{re.escape(domain)}$"))
for event_type, regexes in self.regexes.items():
self.signatures[event_type] = [re.compile(r, re.I) for r in regexes]
@property
def METHOD_NAME(self):
return {f"cloud-{self.name}"}
def excavate(self, event, http_body):
base_kwargs = dict(source=event, tags=self.METHOD_NAME)
# check for buckets in HTTP responses
for event_type, sigs in self.signatures.items():
found = set()
for sig in sigs:
for match in sig.findall(http_body):
kwargs = dict(base_kwargs)
kwargs["event_type"] = event_type
if not match in found:
found.add(match)
if event_type == "STORAGE_BUCKET":
self.emit_bucket(match, **kwargs)
else:
self.emit_event(**kwargs)
def speculate(self, event):
base_kwargs = dict(source=event, tags=self.METHOD_NAME)
if event.type.startswith("DNS_NAME"):
# check for DNS_NAMEs that are buckets
for event_type, sigs in self.signatures.items():
found = set()
for sig in sigs:
match = sig.match(event.data)
if match:
kwargs = dict(base_kwargs)
kwargs["event_type"] = event_type
if not event.data in found:
found.add(event.data)
if event_type == "STORAGE_BUCKET":
self.emit_bucket(match.groups(), **kwargs)
else:
self.emit_event(**kwargs)
def emit_bucket(self, match, **kwargs):
bucket_name, bucket_domain = match
kwargs["data"] = {"name": bucket_name, "url": f"https://{bucket_name}.{bucket_domain}"}
self.emit_event(**kwargs)
def emit_event(self, *args, **kwargs):
excavate_module = self.parent_helper.scan.modules.get("excavate", None)
if excavate_module:
event = self.dummy_module.make_event(*args, **kwargs)
if event:
excavate_module.emit_event(event)
def is_valid_bucket(self, bucket_name):
return self.bucket_name_regex.match(bucket_name)
def tag_event(self, event):
# tag the event if
if event.host:
# its host directly matches this cloud provider's domains
if isinstance(event.host, str) and self.domain_match(event.host):
event.tags.update(self.METHOD_NAME)
# tag as buckets, etc.
for event_type, sigs in self.signatures.items():
for sig in sigs:
if sig.match(event.host):
event.add_tag(f"cloud-{event_type}")
else:
# or it has a CNAME that matches this cloud provider's domains
for rh in event.resolved_hosts:
if not self.parent_helper.is_ip(rh) and self.domain_match(rh):
event.tags.update(self.METHOD_NAME)
def domain_match(self, s):
for r in self.domain_regexes:
if r.match(s):
return True
return False
| null |
5,387 |
import re
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import Manager, QuerySet, Sum, signals
from django.urls import reverse
from django.utils.functional import cached_property
from ipware import get_client_ip
from thunderstore.repository.consts import PACKAGE_NAME_REGEX
from thunderstore.repository.models import Package, PackageVersionDownloadEvent
from thunderstore.repository.package_formats import PackageFormats
from thunderstore.utils.decorators import run_after_commit
from thunderstore.webhooks.models import Webhook
def get_version_zip_filepath(instance, filename):
return f"repository/packages/{instance}.zip"
def get_version_png_filepath(instance, filename):
return f"repository/icons/{instance}.png"
class PackageVersionManager(models.Manager):
def active(self) -> "QuerySet[PackageVersion]": # TODO: Generic type
return self.exclude(is_active=False)
class PackageVersion(models.Model):
objects: "Manager[PackageVersion]" = PackageVersionManager()
package = models.ForeignKey(
"repository.Package",
related_name="versions",
on_delete=models.CASCADE,
)
is_active = models.BooleanField(
default=True,
)
date_created = models.DateTimeField(
auto_now_add=True,
)
downloads = models.PositiveIntegerField(default=0)
format_spec = models.TextField(
choices=PackageFormats.choices,
blank=True,
null=True,
help_text="Used to track the latest package format spec this package is compatible with",
)
name = models.CharField(
max_length=Package._meta.get_field("name").max_length,
)
# TODO: Split to three fields for each number in the version for better querying performance
version_number = models.CharField(
max_length=16,
)
website_url = models.CharField(
max_length=1024,
)
description = models.CharField(max_length=256)
dependencies = models.ManyToManyField(
"self",
related_name="dependants",
symmetrical=False,
blank=True,
)
readme = models.TextField()
changelog = models.TextField(blank=True, null=True)
# <packagename>.zip
file = models.FileField(
upload_to=get_version_zip_filepath,
storage=get_storage_class(settings.PACKAGE_FILE_STORAGE)(),
)
file_size = models.PositiveIntegerField()
# <packagename>.png
icon = models.ImageField(
upload_to=get_version_png_filepath,
)
uuid4 = models.UUIDField(default=uuid.uuid4, editable=False)
def validate(self):
if not re.match(PACKAGE_NAME_REGEX, self.name):
raise ValidationError(
"Package names can only contain a-z A-Z 0-9 _ characters"
)
def save(self, *args, **kwargs):
self.validate()
return super().save(*args, **kwargs)
class Meta:
constraints = [
models.UniqueConstraint(
fields=("package", "version_number"), name="unique_version_per_package"
),
models.CheckConstraint(
check=PackageFormats.as_query_filter(
field_name="format_spec",
allow_none=True,
),
name="valid_package_format",
),
]
# TODO: Remove in the end of TS-272
def get_absolute_url(self):
return reverse(
"old_urls:packages.version.detail",
kwargs={
"owner": self.owner.name,
"name": self.name,
"version": self.version_number,
},
)
def get_page_url(self, community_identifier: str) -> str:
return reverse(
"communities:community:packages.version.detail",
kwargs={
"owner": self.owner.name,
"name": self.name,
"version": self.version_number,
"community_identifier": community_identifier,
},
)
@cached_property
def display_name(self):
return self.name.replace("_", " ")
@cached_property
def owner(self):
return self.package.owner
@cached_property
def is_deprecated(self):
return self.package.is_deprecated
@cached_property
def full_version_name(self):
return f"{self.package.full_package_name}-{self.version_number}"
@cached_property
def reference(self):
from thunderstore.repository.package_reference import PackageReference
return PackageReference(
namespace=self.owner.name,
name=self.name,
version=self.version_number,
)
@cached_property
def _download_url(self):
return reverse(
"old_urls:packages.download",
kwargs={
"owner": self.package.owner.name,
"name": self.package.name,
"version": self.version_number,
},
)
@cached_property
def full_download_url(self) -> str:
return "%(protocol)s%(hostname)s%(path)s" % {
"protocol": settings.PROTOCOL,
"hostname": settings.PRIMARY_HOST,
"path": self._download_url,
}
@property
def install_url(self):
return "ror2mm://v1/install/%(hostname)s/%(owner)s/%(name)s/%(version)s/" % {
"hostname": settings.PRIMARY_HOST,
"owner": self.package.owner.name,
"name": self.package.name,
"version": self.version_number,
}
@staticmethod
def post_save(sender, instance, created, **kwargs):
if created:
instance.package.handle_created_version(instance)
instance.announce_release()
instance.package.handle_updated_version(instance)
@staticmethod
def post_delete(sender, instance, **kwargs):
instance.package.handle_deleted_version(instance)
@classmethod
def get_total_used_disk_space(cls):
return cls.objects.aggregate(total=Sum("file_size"))["total"] or 0
@run_after_commit
def announce_release(self):
webhooks = Webhook.get_for_package_release(self.package)
for webhook in webhooks:
webhook.post_package_version_release(self)
def METHOD_NAME(self, request):
client_ip, is_routable = get_client_ip(request)
if client_ip is None:
return
download_event, created = PackageVersionDownloadEvent.objects.get_or_create(
version=self,
source_ip=client_ip,
)
if created:
valid = True
else:
valid = download_event.count_downloads_and_return_validity()
if valid:
self._increase_download_counter()
def _increase_download_counter(self):
self.downloads += 1
self.save(update_fields=("downloads",))
def __str__(self):
return self.full_version_name
signals.post_save.connect(PackageVersion.post_save, sender=PackageVersion)
signals.post_delete.connect(PackageVersion.post_delete, sender=PackageVersion)
| null |
5,388 |
from cradmin_legacy import crapp
from cradmin_legacy.viewhelpers.detail import DetailRoleView
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Assignment, Candidate, AssignmentGroup
from devilry.apps.core.models import Examiner
from devilry.apps.core.models import RelatedExaminer
from devilry.apps.core.models import RelatedStudent
from devilry.devilry_account.models import SubjectPermissionGroup
from devilry.devilry_admin.views.assignment.anonymizationmode import AssignmentAnonymizationmodeUpdateView
from devilry.devilry_admin.views.assignment.deadline_handling import AssignmentDeadlineHandlingUpdateView
from devilry.devilry_admin.views.assignment.gradingconfiguration import AssignmentGradingConfigurationUpdateView
from devilry.devilry_admin.views.assignment.long_and_shortname import AssignmentLongAndShortNameUpdateView
from devilry.devilry_admin.views.assignment.projectgroups import AssignmentProjectGroupUpdateView
from devilry.devilry_admin.views.assignment.examiner_selfassign import AssignmentExaminerSelfAssignUpdateView
from .first_deadline import AssignmentFirstDeadlineUpdateView
from .publishing_time import AssignmentPublishingTimeUpdateView, PublishNowRedirectView
class Overview(DetailRoleView):
model = coremodels.Assignment
template_name = 'devilry_admin/assignment/overview.django.html'
def get_candidates_count(self):
return coremodels.Candidate.objects\
.filter(assignment_group__parentnode=self.assignment)\
.count()
def get_distinct_examiners_count(self):
"""
Get distinct examiners otherwise the same relatedexaminer assigned to multiple groups
would be shown as multiple examiners.
"""
return Examiner.objects\
.filter(assignmentgroup__parentnode=self.assignment)\
.distinct('relatedexaminer__user').count()
def get_assignmentgroups_count(self):
return self.assignment.assignmentgroups.count()
def get_related_students_count(self):
return RelatedStudent.objects\
.filter(period=self.assignment.period, active=True)\
.distinct('user').count()
def METHOD_NAME(self):
return RelatedExaminer.objects\
.filter(period=self.assignment.period, active=True)\
.distinct('user').count()
@property
def assignment(self):
if not hasattr(self, '_assignment'):
queryset = Assignment.objects\
.filter(id=self.request.cradmin_role.id)\
.prefetch_point_to_grade_map()
self._assignment = queryset.get()
return self._assignment
def get_user_is_subjectadmin_or_higher(self):
return SubjectPermissionGroup.objects\
.get_devilryrole_for_user_on_subject(
user=self.request.user, subject=self.assignment.parentnode.parentnode) is not None
def get_assignment_groups_without_any_examiners(self):
return AssignmentGroup.objects.filter(parentnode=self.request.cradmin_role, examiners__isnull=True)
def get_context_data(self, **kwargs):
context = super(Overview, self).get_context_data(**kwargs)
context['assignmentgroups_count'] = self.get_assignmentgroups_count()
context['candidates_count'] = self.get_candidates_count()
context['examiners_count'] = self.get_distinct_examiners_count()
context['assignment'] = self.assignment
context['relatedstudents_count'] = self.get_related_students_count()
context['relatedexaminers_count'] = self.METHOD_NAME()
context['user_is_subjectadmin_or_higher'] = self.get_user_is_subjectadmin_or_higher()
context['students_without_examiners_exists'] = self.get_assignment_groups_without_any_examiners().exists()
return context
class App(crapp.App):
appurls = [
crapp.Url(r'^$',
Overview.as_view(),
name=crapp.INDEXVIEW_NAME),
crapp.Url(r'^update_assignment_short_and_long_name/(?P<pk>\d+)$',
AssignmentLongAndShortNameUpdateView.as_view(),
name="update_assignment_short_and_long_name"),
crapp.Url(r'^update_publishing_time/(?P<pk>\d+)$',
AssignmentPublishingTimeUpdateView.as_view(),
name="update_publishing_time"),
crapp.Url(r'^publish_assignment_now/(?P<pk>\d+)$',
PublishNowRedirectView.as_view(),
name="publish_assignment_now"),
crapp.Url(r'^update_first_deadline/(?P<pk>\d+)$',
AssignmentFirstDeadlineUpdateView.as_view(),
name="update_first_deadline"),
crapp.Url(r'^update_gradingconfiguration/(?P<pk>\d+)$',
AssignmentGradingConfigurationUpdateView.as_view(),
name="update_gradingconfiguration"),
crapp.Url(r'^update_projectgroup_settings/(?P<pk>\d+)$',
AssignmentProjectGroupUpdateView.as_view(),
name="update_projectgroup_settings"),
crapp.Url(r'^update_anonymizationmode/(?P<pk>\d+)$',
AssignmentAnonymizationmodeUpdateView.as_view(),
name="update_anonymizationmode"),
crapp.Url(r'^update_deadlinehandling/(?P<pk>\d+)',
AssignmentDeadlineHandlingUpdateView.as_view(),
name='update_deadline_handling'),
crapp.Url(f'^update_examiner_selfassign_settings/(?P<pk>\d+)',
AssignmentExaminerSelfAssignUpdateView.as_view(),
name='update_examiner_selfassign_settings')
]
| null |
5,389 |
import jwt
import pytest
from django.urls import reverse
from rest_framework.test import APIClient
from thunderstore.community.models.community import Community
from thunderstore.core.models import IncomingJWTAuthConfiguration, SecretTypeChoices
from thunderstore.repository.models import DiscordUserBotPermission, Package
@pytest.mark.django_db
@pytest.mark.parametrize("old_urls", (False, True))
def test_bot_api_deprecate_mod_200(
api_client: APIClient,
admin_user,
package: Package,
community: Community,
old_urls: bool,
):
assert package.is_deprecated is False
jwt_secret = "superSecret"
auth = IncomingJWTAuthConfiguration.objects.create(
name="Test configuration",
user=admin_user,
secret=jwt_secret,
secret_type=SecretTypeChoices.HS256,
)
perms = DiscordUserBotPermission.objects.create(
label="Test",
thunderstore_user=admin_user,
discord_user_id=1234,
can_deprecate=True,
)
payload = {"package": package.full_package_name, "user": perms.discord_user_id}
encoded = jwt.encode(
payload=payload,
key=jwt_secret,
algorithm=SecretTypeChoices.HS256,
headers={"kid": str(auth.key_id)},
)
if old_urls:
url = reverse("api:v1:bot.deprecate-mod")
else:
url = reverse(
"communities:community:api:bot.deprecate-mod",
kwargs={"community_identifier": community.identifier},
)
response = api_client.post(
url,
data=encoded,
content_type="application/jwt",
)
assert response.status_code == 200
assert response.content == b'{"success":true}'
package.refresh_from_db()
assert package.is_deprecated is True
@pytest.mark.django_db
@pytest.mark.parametrize("old_urls", (False, True))
def test_bot_api_deprecate_mod_403_thunderstore_perms(
api_client: APIClient,
user,
package: Package,
community: Community,
old_urls: bool,
):
assert package.is_deprecated is False
jwt_secret = "superSecret"
auth = IncomingJWTAuthConfiguration.objects.create(
name="Test configuration",
user=user,
secret=jwt_secret,
secret_type=SecretTypeChoices.HS256,
)
perms = DiscordUserBotPermission.objects.create(
label="Test",
thunderstore_user=user,
discord_user_id=1234,
can_deprecate=True,
)
payload = {"package": package.full_package_name, "user": perms.discord_user_id}
encoded = jwt.encode(
payload=payload,
key=jwt_secret,
algorithm=SecretTypeChoices.HS256,
headers={"kid": str(auth.key_id)},
)
if old_urls:
url = reverse("api:v1:bot.deprecate-mod")
else:
url = reverse(
"communities:community:api:bot.deprecate-mod",
kwargs={"community_identifier": community.identifier},
)
response = api_client.post(
url,
data=encoded,
content_type="application/jwt",
)
assert response.status_code == 403
assert (
response.content
== b'{"detail":"You do not have permission to perform this action."}'
)
package.refresh_from_db()
assert package.is_deprecated is False
@pytest.mark.django_db
@pytest.mark.parametrize("old_urls", (False, True))
def METHOD_NAME(
api_client: APIClient,
admin_user,
package: Package,
community: Community,
old_urls: bool,
):
assert package.is_deprecated is False
jwt_secret = "superSecret"
auth = IncomingJWTAuthConfiguration.objects.create(
name="Test configuration",
user=admin_user,
secret=jwt_secret,
secret_type=SecretTypeChoices.HS256,
)
DiscordUserBotPermission.objects.create(
label="Test",
thunderstore_user=admin_user,
discord_user_id=1234,
can_deprecate=False,
)
payload = {"package": package.full_package_name, "user": 1234}
encoded = jwt.encode(
payload=payload,
key=jwt_secret,
algorithm=SecretTypeChoices.HS256,
headers={"kid": str(auth.key_id)},
)
if old_urls:
url = reverse("api:v1:bot.deprecate-mod")
else:
url = reverse(
"communities:community:api:bot.deprecate-mod",
kwargs={"community_identifier": community.identifier},
)
response = api_client.post(
url,
data=encoded,
content_type="application/jwt",
)
assert response.status_code == 403
assert response.content == b'{"detail":"Insufficient Discord user permissions"}'
package.refresh_from_db()
assert package.is_deprecated is False
@pytest.mark.django_db
@pytest.mark.parametrize("old_urls", (False, True))
def test_bot_api_deprecate_mod_404(
api_client: APIClient,
admin_user,
community: Community,
old_urls: bool,
):
jwt_secret = "superSecret"
auth = IncomingJWTAuthConfiguration.objects.create(
name="Test configuration",
user=admin_user,
secret=jwt_secret,
secret_type=SecretTypeChoices.HS256,
)
perms = DiscordUserBotPermission.objects.create(
label="Test",
thunderstore_user=admin_user,
discord_user_id=1234,
can_deprecate=True,
)
payload = {"package": "Nonexistent-Package", "user": perms.discord_user_id}
encoded = jwt.encode(
payload=payload,
key=jwt_secret,
algorithm=SecretTypeChoices.HS256,
headers={"kid": str(auth.key_id)},
)
if old_urls:
url = reverse("api:v1:bot.deprecate-mod")
else:
url = reverse(
"communities:community:api:bot.deprecate-mod",
kwargs={"community_identifier": community.identifier},
)
response = api_client.post(
url,
data=encoded,
content_type="application/jwt",
)
assert response.status_code == 404
assert response.content == b'{"detail":"Not found."}'
| null |
5,390 |
from __future__ import annotations
import pytest
from cctbx import crystal
from dials.algorithms.indexing.ssx.analysis import (
combine_results_dicts,
generate_html_report,
generate_plots,
make_cluster_plots,
make_summary_table,
)
def generate_test_results_dict(n_lattices=1):
results = {
0: [
{
"Image": "test_image_001.cbf",
"n_indexed": 0,
"n_strong": 100,
}
], # an unindexed image
1: [
{
"Image": "test_image_002.cbf",
"n_indexed": 50,
"n_strong": 200,
"RMSD_X": 0.5,
"RMSD_Y": 1.1,
"RMSD_dPsi": 1.2,
}
], # an indexed image with one lattice
2: [
{
"Image": "test_image_003.cbf",
"n_indexed": 30,
"n_strong": 50,
"RMSD_X": 0.5,
"RMSD_Y": 0.4,
"RMSD_dPsi": 0.6,
}
], # an indexed image with one lattice
}
if n_lattices > 1:
results[2].append(
{
"Image": "test_image_003.cbf",
"n_indexed": 10,
"n_strong": 50,
"RMSD_X": 0.3,
"RMSD_Y": 0.5,
"RMSD_dPsi": 0.7,
}
) # an indexed image with two lattices
return results
@pytest.mark.parametrize("n_lattices", [1, 2])
def test_make_summary_table(n_lattices):
"""Test that the summary table has the correct columns"""
results = generate_test_results_dict(n_lattices)
table = make_summary_table(results)
headerline = table.splitlines()[1]
headers = ["Image", "expt_id", "n_indexed", "RMSD X", "RMSD Y", "RMSD dPsi"]
last_lattice_line = table.splitlines()[-2]
assert all(h in headerline for h in headers)
if n_lattices > 1:
assert "lattice" in headerline
expected_last = "| test_image_003.cbf | 2 | 2 | 10/50 (20.0%) | 0.3 | 0.5 | 0.7 |"
else:
assert "lattice" not in headerline
expected_last = "| test_image_003.cbf | 1 | 30/50 (60.0%) | 0.5 | 0.4 | 0.6 |"
assert expected_last == last_lattice_line
def test_combine_multiple_results_summary_dicts():
s1 = generate_test_results_dict()
s2 = generate_test_results_dict(n_lattices=2)
combined = combine_results_dicts([s1, s2])
assert list(combined.keys()) == [0, 1, 2, 3, 4, 5]
for k, res in combined.items():
if k == 5:
assert len(res) == 2
else:
assert len(res) == 1
@pytest.mark.parametrize("n_lattices", [1, 2])
def test_generate_plots(n_lattices):
plots = generate_plots(generate_test_results_dict(n_lattices))
# First plot, n_indexed vs image, should have a scatter plot n_strong,
# one for the first lattice an another for second lattices
assert len(plots["n_indexed"]["data"]) == n_lattices + 1
assert len(plots["n_indexed"]["data"][0]["x"]) == 3 # lattice 1 (0 or indexed)
assert plots["n_indexed"]["data"][0]["y"][0] == 0.0 # first has none indexed
assert all(plots["n_indexed"]["data"][0]["y"][i] > 0.0 for i in range(1, 3))
assert plots["n_indexed"]["data"][-1]["y"] == [100, 200, 50] # n_strong
if n_lattices == 2:
assert (
len(plots["n_indexed"]["data"][1]["x"]) == 1
) # only one has second lattice
assert "lattice 1" in plots["n_indexed"]["data"][0]["name"]
assert "lattice 2" in plots["n_indexed"]["data"][1]["name"]
percent_indexed = [0.0, 25.0, 80.0]
else:
assert "lattice 1" not in plots["n_indexed"]["data"][0]["name"]
percent_indexed = [0.0, 25.0, 60.0]
# Next plot, percentage of strong spots, should just have one trend
assert plots["percent_indexed"]["data"][0]["y"] == percent_indexed
# Perecent indexed histogram
assert sum(plots["percent_indexed_hist"]["data"][0]["y"]) == 3
# rmsd plots
assert plots["rmsds"]["data"][0]["y"] == [0.5, 0.5] # X
assert plots["rmsds"]["data"][1]["y"] == [1.1, 0.4] # Y
assert plots["rmsdz"]["data"][0]["y"] == [1.2, 0.6] # dPsi
if n_lattices == 2:
assert plots["rmsds"]["data"][2]["y"] == [0.3]
assert plots["rmsds"]["data"][3]["y"] == [0.5]
assert plots["rmsdz"]["data"][1]["y"] == [0.7] # dPsi
# rmsd distribution plots
assert sum(plots["rmsdxy_hist"]["data"][0]["y"]) == n_lattices + 1
assert len(plots["rmsdxy_hist"]["data"]) == 2
assert sum(plots["rmsdz_hist"]["data"][0]["y"]) == n_lattices + 1
def test_generate_html_report(tmp_path):
plots = generate_plots(generate_test_results_dict())
fname = "test_report_name.html"
generate_html_report(plots, tmp_path / fname)
assert tmp_path.joinpath("test_report_name.html").is_file()
def METHOD_NAME():
from dials.algorithms.clustering.unit_cell import Cluster
c1 = Cluster(
[
crystal.symmetry(
unit_cell=(10.0, 10.0, 10.0, 90, 90, 90), space_group="P1"
),
crystal.symmetry(
unit_cell=(10.1, 10.1, 10.1, 90, 90, 90), space_group="P1"
),
crystal.symmetry(
unit_cell=(10.2, 10.2, 10.2, 90, 90, 90), space_group="P1"
),
]
)
c2 = Cluster(
[
crystal.symmetry(
unit_cell=(11.0, 11.0, 11.0, 90, 90, 90), space_group="P1"
),
crystal.symmetry(
unit_cell=(11.1, 11.1, 11.1, 90, 90, 90), space_group="P1"
),
crystal.symmetry(
unit_cell=(11.2, 11.2, 11.2, 90, 90, 90), space_group="P1"
),
crystal.symmetry(
unit_cell=(11.3, 11.3, 11.3, 90, 90, 90), space_group="P1"
),
]
)
clusters = [c1, c2]
plots = make_cluster_plots(clusters)
assert "uc_scatter_0" in plots
assert "uc_scatter_1" in plots
assert "uc_hist_0" in plots
assert "uc_hist_1" in plots
print(plots)
assert len(plots["uc_hist_0"]["data"]) == 3
assert len(plots["uc_hist_0"]["data"][0]["x"]) == 3
assert len(plots["uc_hist_1"]["data"][0]["x"]) == 4
assert len(plots["uc_scatter_0"]["data"]) == 3
assert len(plots["uc_scatter_0"]["data"][0]["x"]) == 3
assert len(plots["uc_scatter_1"]["data"][0]["x"]) == 4
| null |
5,391 |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpeOneView.connection import connection
from hpeOneView.resources.networking.logical_switches import LogicalSwitches
from hpeOneView.resources.resource import ResourceClient
class LogicalSwitchesTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host, 800)
self._logical_switches = LogicalSwitches(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._logical_switches.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'create')
def test_create_should_use_given_values(self, mock_create):
resource = {
"logicalSwitch": {
"name": "Test Logical Switch",
"state": "Active",
"logicalSwitchGroupUri": "/rest/logical-switch-groups/e7401307-58bd-49ad-8a1b-79f351a346b8",
"type": "set_by_user"
},
"switchCredentialConfiguration": [],
"logicalSwitchCredentials": []
}
resource_rest_call = resource.copy()
mock_create.return_value = {}
self._logical_switches.create(resource, 30)
mock_create.assert_called_once_with(resource_rest_call, timeout=30)
@mock.patch.object(ResourceClient, 'create')
def test_create_should_use_default_values(self, mock_create):
resource = {
"logicalSwitch": {
"name": "Test Logical Switch"
}
}
resource_with_default_values = {
"logicalSwitch": {
"name": "Test Logical Switch",
}
}
mock_create.return_value = {}
self._logical_switches.create(resource)
mock_create.assert_called_once_with(resource_with_default_values, timeout=-1)
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_given_values(self, mock_update):
uri = '/rest/logical-switches/c4ae6a56-a595-4b06-8c7a-405212df8b93'
resource = {
"logicalSwitch": {
"name": "Test Logical Switch",
"state": "Active",
"logicalSwitchGroupUri": "/rest/logical-switch-groups/7cc34511-0b00-4a48-82f6-1e9a662afeb8",
"type": "set_by_user",
"uri": uri
},
"switchCredentialConfiguration": [],
"logicalSwitchCredentials": []
}
resource_rest_call = resource.copy()
mock_update.return_value = {}
self._logical_switches.update(resource, 60)
mock_update.assert_called_once_with(resource_rest_call, uri=uri, timeout=60)
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_default_values(self, mock_update):
uri = '/rest/logical-switches/c4ae6a56-a595-4b06-8c7a-405212df8b93'
resource = {
"logicalSwitch": {
"name": "Test Logical Switch",
"uri": uri
},
"uri": "a_uri"
}
resource_with_default_values = {
"logicalSwitch": {
"name": "Test Logical Switch",
"uri": uri
},
"uri": "a_uri"
}
mock_update.return_value = {}
self._logical_switches.update(resource)
mock_update.assert_called_once_with(resource_with_default_values, uri=uri, timeout=-1)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once(self, mock_delete):
id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._logical_switches.delete(id, force=False, timeout=-1)
mock_delete.assert_called_once_with(id, force=False, timeout=-1)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._logical_switches.get_by('name', 'Test Logical Switch')
mock_get_by.assert_called_once_with('name', 'Test Logical Switch')
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._logical_switches.get('3518be0e-17c1-4189-8f81-83f3724f6155')
mock_get.assert_called_once_with('3518be0e-17c1-4189-8f81-83f3724f6155')
@mock.patch.object(ResourceClient, 'get')
def test_get_by_uri_called_once(self, mock_get):
uri = '/rest/logical-switches/3518be0e-17c1-4189-8f81-83f3724f6155'
self._logical_switches.get(uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def METHOD_NAME(self, mock_update_with_zero_body):
uri = '/rest/logical-switches/ad28cf21-8b15-4f92-bdcf-51cb2042db32'
uri_rest_call = '/rest/logical-switches/ad28cf21-8b15-4f92-bdcf-51cb2042db32/refresh'
self._logical_switches.refresh(uri)
mock_update_with_zero_body.assert_called_once_with(uri_rest_call, timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_refresh_by_id(self, mock_update_with_zero_body):
mock_update_with_zero_body.return_value = {}
id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
uri_rest_call = '/rest/logical-switches/ad28cf21-8b15-4f92-bdcf-51cb2042db32/refresh'
self._logical_switches.refresh(id)
mock_update_with_zero_body.assert_called_once_with(uri_rest_call, timeout=-1)
@mock.patch.object(ResourceClient, 'patch')
def test_patch_should_use_user_defined_values(self, mock_patch):
mock_patch.return_value = {}
self._logical_switches.patch('/rest/logical-switches/fake', 'replace', '/scopeUris', ['/rest/scopes/fake1'], 1)
mock_patch.assert_called_once_with('/rest/logical-switches/fake', 'replace', '/scopeUris',
['/rest/scopes/fake1'], timeout=1)
| null |
5,392 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
DNS xml data dump model.
"""
import xml.etree.ElementTree as etree
from collections import OrderedDict
from xml.dom import minidom
from mantidqtinterfaces.dns_powder_tof.data_structures.dns_obs_model import DNSObsModel
from mantidqtinterfaces.dns_powder_tof.helpers.file_processing import save_txt
class DNSXMLDumpModel(DNSObsModel):
def _convert_type(self, value, my_type):
# pylint: disable=too-many-return-statements
"""
Return a variable of the type described by
string :my_type from the string :value
"""
if my_type == "bool":
return value == "True"
if my_type == "int":
return int(value)
if my_type == "float":
return float(value)
if my_type == "emptylist":
return []
if my_type.endswith("list"):
return [self._convert_type(x, my_type=my_type.split("list")[0]) for x in value.strip("[]").split(",")]
if my_type == "None":
return None
return value
def _dict_element_to_xml(self, dictionary, node=None):
"""
Return an xml element for a given dictionary
"""
if node is None:
node = etree.Element("document")
for key, value in dictionary.items():
sub = etree.SubElement(node, key)
if isinstance(value, dict):
self._dict_element_to_xml(value, node=sub)
else:
sub.text = str(value)
sub.set("type", self._return_type(value))
return node
def dict_to_xml_file(self, param_dict, filename, xml_header):
"""
Write :param_dict to an xml file :filename.
Dictionary can contain bool, None, str, int, float and list of them,
or dictionaries, other types are converted to str and not converted back
if you try to read them.
"""
dictionary = OrderedDict()
dictionary["xml_header"] = xml_header
dictionary.update(param_dict)
xml_str = self._dict_to_xml_string(dictionary)
if filename:
save_txt(xml_str, filename)
def _dict_to_xml_string(self, dictionary):
"""
Returns an xml string for a given dictionary, parsed by minidom.
"""
xml_str = etree.tostring(self._dict_element_to_xml(dictionary))
xml_str = minidom.parseString(xml_str).toprettyxml(indent=" ")
return xml_str
def _return_type(self, value):
# pylint: disable=too-many-return-statements
"""
Return a string describing the type of :value.
"""
if isinstance(value, bool): # bool is subtype of int
return "bool"
if isinstance(value, int):
return "int"
if isinstance(value, float):
return "float"
if isinstance(value, list):
if value:
return "".join([self._return_type(value[0]), "list"])
return "emptylist"
if value is None:
return "None"
if isinstance(value, str):
return "str"
print(value)
return "str"
@staticmethod
def _load_file_to_xml_tree(filename):
tree = None
if filename:
try:
tree = etree.parse(filename)
except IOError:
print("Error reading file")
return None
return tree
@staticmethod
def _check_instrument_name(tree):
instrument_name = tree.find(".//instrument_name").text
return instrument_name == "DNS"
def xml_file_to_dict(self, filename):
"""
Return a dictionary from a given :filename.
Works only with structures written by dict_to_xml_file.
"""
tree = self._load_file_to_xml_tree(filename)
if tree and self._check_instrument_name(tree):
return self.METHOD_NAME(tree.getroot(), {}).get("document", {})
return None
def METHOD_NAME(self, element, dictionary):
"""
Updates and returns the given dictionary with
values of xml tree element.
"""
children = list(element)
if children:
new_dict = OrderedDict()
dictionary.update({element.tag: new_dict})
for child in children:
self.METHOD_NAME(child, new_dict)
else:
dictionary.update({element.tag: self._convert_type(element.text, element.get("type", "str"))})
return dictionary
| null |
5,393 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name
import random
import string
import os
import mantid
import time
from isis_instrument import BaseInstrument
class ReductionStep(object):
"""
Base class for reduction steps
"""
@classmethod
def delete_workspaces(cls, workspace):
"""
Delete all workspace created by this reduction step related
to the given workspace
@param workspace: workspace to delete
"""
return
@classmethod
def _create_unique_name(cls, filepath, descriptor):
"""
Generate a unique name for an internal workspace
"""
random_str = "".join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5))
return "__" + descriptor + "_" + os.path.basename(filepath) + "_" + random_str
def METHOD_NAME(self, reducer, inputworkspace=None, outputworkspace=None):
"""
Implemented the reduction step.
@param reducer: Reducer object for which the step is executed
@param inputworkspace: Name of the workspace to apply this step to
@param outputworkspace: Name of the workspace to have as an output. If this is None it will be set to inputworkspace
"""
raise NotImplementedError
def run_consistency_check(self):
"""
Run a consistency check of the settings of the ReuctionStep
"""
return
class Reducer(object):
"""
Base reducer class. Instrument-specific reduction processes should be
implemented in a child of this class.
"""
## Instrument configuration object
instrument = None
## Path for data files
_data_path = "."
## Path for output files
_output_path = None
## List of reduction steps
_reduction_steps = []
## Log
log_text = ""
## Output workspaces
output_workspaces = []
def __init__(self):
self.UID = "".join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5))
self._reduction_steps = []
def set_instrument(self, configuration):
if issubclass(configuration.__class__, BaseInstrument):
self.instrument = configuration
else:
raise RuntimeError("Reducer.set_instrument expects an %s object, found %s" % (BaseInstrument, configuration.__class__))
def set_data_path(self, path):
"""
Set the path for data files
@param path: data file path
"""
path = os.path.normcase(path)
if os.path.isdir(path):
self._data_path = path
mantid.config.appendDataSearchDir(path)
else:
raise RuntimeError("Reducer.set_data_path: provided path is not a directory (%s)" % path)
def set_output_path(self, path):
"""
Set the path for output files
@param path: output file path
"""
path = os.path.normcase(path)
if os.path.isdir(path):
self._output_path = path
else:
raise RuntimeError("Reducer.set_output_path: provided path is not a directory (%s)" % path)
def pre_process(self):
"""
Reduction steps that are meant to be executed only once per set
of data files. After this is executed, all files will go through
the list of reduction steps.
"""
pass
def post_process(self):
"""
Reduction steps to be executed after all data files have been
processed.
"""
pass
def reduce(self):
"""
Go through the list of reduction steps
"""
# should we use it?
t_0 = time.time()
instrument_name = ""
self.output_workspaces = []
# Check that an instrument was specified
if self.instrument is not None:
instrument_name = self.instrument.name()
# Log text
self.log_text = "%s reduction - %s\n" % (instrument_name, time.ctime())
# Go through the list of steps that are common to all data files
self.pre_process()
# Go through the list of files to be reduced
# for file_ws in self._data_files:
# for item in self._reduction_steps:
# try:
# result = item.execute(self, file_ws)
# if result is not None and len(str(result))>0:
# self.log_text += "%s\n" % str(result)
# except:
# self.log_text += "\n%s\n" % sys.exc_value
# raise
# any clean up, possibly removing workspaces
self.post_process()
# Determine which directory to use
output_dir = self._data_path
if self._output_path is not None:
if os.path.isdir(self._output_path):
output_dir = self._output_path
else:
output_dir = os.path.expanduser("~")
self.log_text += "Reduction completed in %g sec\n" % (time.time() - t_0)
log_path = os.path.join(output_dir, "%s_reduction.log" % instrument_name)
self.log_text += "Log saved to %s" % log_path
# Write the log to file
f = open(log_path, "a")
f.write("\n-------------------------------------------\n")
f.write(self.log_text)
f.close()
return self.log_text
class ReductionSingleton(object):
"""Singleton reduction class"""
## storage for the instance reference
__instance = None
def __init__(self):
"""Create singleton instance"""
# Check whether we already have an instance
if ReductionSingleton.__instance is None:
# Create and remember instance
ReductionSingleton.__instance = Reducer()
# Store instance reference as the only member in the handle
self.__dict__["_ReductionSingleton__instance"] = ReductionSingleton.__instance
@classmethod
def clean(cls, reducer_cls=None):
if reducer_cls is None:
ReductionSingleton.__instance = Reducer()
else:
ReductionSingleton.__instance = reducer_cls()
@classmethod
def replace(cls, red):
"""
Set the object pointed to by the singleton with
the one passed
@param red: reducer object
"""
if issubclass(red.__class__, Reducer):
ReductionSingleton.__instance = red
else:
raise RuntimeError("The object passed to ReductionSingleton.replace() must be of type Reducer")
@classmethod
def run(cls):
"""
Execute the reducer and then clean it (regardless of
if it throws) to ensure that a partially run reducer is
not left behind
"""
try:
if ReductionSingleton.__instance is not None:
return ReductionSingleton.__instance._reduce()
finally:
ReductionSingleton.clean(ReductionSingleton.__instance.__class__)
def __getattr__(self, attr):
"""Delegate access to implementation"""
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
"""Delegate access to implementation"""
return setattr(self.__instance, attr, value)
| null |
5,394 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import platform
import numpy as np
import pytest
import mindspore as ms
from mindspore import nn
from mindspore import ops
from mindspore import context, Tensor
context.set_context(mode=context.PYNATIVE_MODE)
class NetInner(nn.Cell):
def __init__(self):
super(NetInner, self).__init__()
self.log = ops.Log()
self.exp = ops.Exp()
self.addn = ops.AddN()
self.relu = nn.ReLU()
def construct(self, x, y):
x = self.addn((x, y))
x = self.log(x)
x = self.exp(x)
x = self.relu(x)
x = self.addn((x, y))
return x
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.log = ops.Log()
self.exp = ops.Exp()
self.addn = ops.AddN()
self.relu = nn.ReLU()
self.inner = NetInner()
def construct(self, x, y):
x = self.addn((x, y))
x = self.inner(x, y)
x = self.log(x)
x = self.exp(x)
x = self.relu(x)
return x
class CmpNetInner(nn.Cell):
def __init__(self):
super(CmpNetInner, self).__init__()
self.log = ops.Log()
self.exp = ops.Exp()
self.addn = ops.AddN()
self.relu = nn.ReLU()
def construct(self, x, y):
x = self.addn((x, y))
x = self.log(x)
x = self.exp(x)
x = self.relu(x)
x = self.addn((x, y))
return x
class CmpNet(nn.Cell):
def __init__(self):
super(CmpNet, self).__init__()
self.log = ops.Log()
self.exp = ops.Exp()
self.addn = ops.AddN()
self.relu = nn.ReLU()
self.inner = CmpNetInner()
def construct(self, x, y):
x = self.addn((x, y))
x = self.inner(x, y)
x = self.log(x)
x = self.exp(x)
x = self.relu(x)
return x
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_auto_dynamic_shape_with_three_static_shape():
"""
Feature: PyNative auto dynamic shape.
Description: The static shape is automatically converted to a dynamic shape.
Expectation: The calculation result is correct.
"""
if platform.system() == 'Windows':
return
net = Net()
grad_op = ops.GradOperation(get_all=True, get_by_list=False, sens_param=True)
# run first shape
input_x = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 2)
input_y = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 5)
out = net(input_x, input_y)
_ = grad_op(net)(input_x, input_y, out)
# run second shape
input_x2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 2)
input_y2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 5)
out = net(input_x2, input_y2)
_ = grad_op(net)(input_x2, input_y2, out)
# run third shape
input_x3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 2)
input_y3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 5)
out = net(input_x3, input_y3)
grad = grad_op(net)(input_x3, input_y3, out)
cmp_net = CmpNet()
cmp_out = cmp_net(input_x3, input_y3)
cmp_grad = grad_op(cmp_net)(input_x3, input_y3, cmp_out)
assert np.allclose(grad[0].asnumpy(), cmp_grad[0].asnumpy(), 0.00001, 0.00001)
assert np.allclose(grad[1].asnumpy(), cmp_grad[1].asnumpy(), 0.00001, 0.00001)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: PyNative auto dynamic shape.
Description: Mixing static shape and dynamic shape.
Expectation: The calculation result is correct.
"""
if platform.system() == 'Windows':
return
net = Net()
grad_op = ops.GradOperation(get_all=True, get_by_list=False, sens_param=True)
# run first shape
input_x = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 2)
input_y = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 5)
out = net(input_x, input_y)
_ = grad_op(net)(input_x, input_y, out)
# run second shape
input_x2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 2)
input_y2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 5)
net.set_inputs(Tensor(shape=[None, None, None, None], dtype=ms.float32),
Tensor(shape=[None, None, None, None], dtype=ms.float32))
out = net(input_x2, input_y2)
_ = grad_op(net)(input_x2, input_y2, out)
# run third shape
input_x3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 2)
input_y3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 5)
out = net(input_x3, input_y3)
grad = grad_op(net)(input_x3, input_y3, out)
cmp_net = CmpNet()
cmp_out = cmp_net(input_x3, input_y3)
cmp_grad = grad_op(cmp_net)(input_x3, input_y3, cmp_out)
assert np.allclose(grad[0].asnumpy(), cmp_grad[0].asnumpy(), 0.00001, 0.00001)
assert np.allclose(grad[1].asnumpy(), cmp_grad[1].asnumpy(), 0.00001, 0.00001)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_auto_dynamic_shape_mixing_static_shape_and_dynamic_shape_2():
"""
Feature: PyNative auto dynamic shape.
Description: Mixing static shape and dynamic shape.
Expectation: The calculation result is correct.
"""
if platform.system() == 'Windows':
return
net = Net()
grad_op = ops.GradOperation(get_all=True, get_by_list=False, sens_param=True)
# run first shape
input_x = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 2)
input_y = Tensor(np.random.rand(2, 3, 6, 8).astype(np.float32) * 5)
net.set_inputs(Tensor(shape=[None, None, None, None], dtype=ms.float32),
Tensor(shape=[None, None, None, None], dtype=ms.float32))
out = net(input_x, input_y)
_ = grad_op(net)(input_x, input_y, out)
# run second shape
input_x2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 2)
input_y2 = Tensor(np.random.rand(2, 3, 6, 16).astype(np.float32) * 5)
out = net(input_x2, input_y2)
_ = grad_op(net)(input_x2, input_y2, out)
# run third shape
input_x3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 2)
input_y3 = Tensor(np.random.rand(2, 3, 6, 34).astype(np.float32) * 5)
out = net(input_x3, input_y3)
grad = grad_op(net)(input_x3, input_y3, out)
cmp_net = CmpNet()
cmp_out = cmp_net(input_x3, input_y3)
cmp_grad = grad_op(cmp_net)(input_x3, input_y3, cmp_out)
assert np.allclose(grad[0].asnumpy(), cmp_grad[0].asnumpy(), 0.00001, 0.00001)
assert np.allclose(grad[1].asnumpy(), cmp_grad[1].asnumpy(), 0.00001, 0.00001)
| null |
5,395 |
from functools import wraps
from jinja2.asyncsupport import auto_aiter
from jinja2 import filters
async def auto_to_seq(value):
seq = []
if hasattr(value, '__aiter__'):
async for item in value:
seq.append(item)
else:
for item in value:
seq.append(item)
return seq
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
seq, func = filters.prepare_select_or_reject(
args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
yield item
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
if getattr(normal_filter, 'environmentfilter', False):
is_async = lambda args: args[0].is_async
wrap_evalctx = False
else:
if not getattr(normal_filter, 'evalcontextfilter', False) and \
not getattr(normal_filter, 'contextfilter', False):
wrap_evalctx = True
is_async = lambda args: args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
b = is_async(args)
if wrap_evalctx:
args = args[1:]
if b:
return async_filter(*args, **kwargs)
return normal_filter(*args, **kwargs)
if wrap_evalctx:
wrapper.evalcontextfilter = True
wrapper.asyncfiltervariant = True
return wrapper
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
return decorator
@asyncfiltervariant(filters.do_first)
async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined('No first item, sequence was empty.')
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
return [filters._GroupTuple(key, await auto_to_seq(values))
for key, values in filters.groupby(sorted(
await auto_to_seq(value), key=expr), expr)]
@asyncfiltervariant(filters.do_join)
async def do_join(eval_ctx, value, d=u'', attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@asyncfiltervariant(filters.do_list)
async def do_list(value):
return await auto_to_seq(value)
@asyncfiltervariant(filters.METHOD_NAME)
async def METHOD_NAME(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, False)
@asyncfiltervariant(filters.do_rejectattr)
async def do_rejectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, True)
@asyncfiltervariant(filters.do_select)
async def do_select(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, False)
@asyncfiltervariant(filters.do_selectattr)
async def do_selectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, True)
@asyncfiltervariant(filters.do_map)
async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
yield func(item)
@asyncfiltervariant(filters.do_sum)
async def do_sum(environment, iterable, attribute=None, start=0):
rv = start
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
func = lambda x: x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@asyncfiltervariant(filters.do_slice)
async def do_slice(value, slices, fill_with=None):
return filters.do_slice(await auto_to_seq(value), slices, fill_with)
ASYNC_FILTERS = {
'first': do_first,
'groupby': do_groupby,
'join': do_join,
'list': do_list,
# we intentionally do not support do_last because that would be
# ridiculous
'reject': METHOD_NAME,
'rejectattr': do_rejectattr,
'map': do_map,
'select': do_select,
'selectattr': do_selectattr,
'sum': do_sum,
'slice': do_slice,
}
| null |
5,396 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
#
from qtpy.QtWidgets import QAction, QActionGroup
from qtpy.QtGui import QFont
from mantidqt.utils.qt import import_qt
SHOW_FRAMEWORK_OUTPUT_KEY = "MessageDisplay/ShowFrameworkOutput"
SHOW_ALL_SCRIPT_OUTPUT_KEY = "MessageDisplay/ShowAllScriptOutput"
SHOW_ACTIVE_SCRIPT_OUTPUT_KEY = "MessageDisplay/ShowActiveScriptOutput"
MessageDisplay_cpp = import_qt(".._common", "mantidqt.widgets", "MessageDisplay")
class Priority:
Fatal = 2
Error = 3
Warning = 4
Notice = 5
Information = 6
Debug = 7
class MessageDisplay(MessageDisplay_cpp):
def __init__(self, font=QFont(), parent=None):
super(MessageDisplay, self).__init__(font, parent)
# We need to disconnect from the C++ base class's slot to avoid
# calling the base class's showContextMenu method as well
self.getTextEdit().customContextMenuRequested.disconnect()
self.getTextEdit().customContextMenuRequested.connect(self.showContextMenu)
self.last_executed_script = ""
def readSettings(self, qsettings):
super(MessageDisplay, self).readSettings(qsettings)
self.setShowFrameworkOutput(self.ReadSettingSafely(qsettings, SHOW_FRAMEWORK_OUTPUT_KEY, True, bool))
self.setShowActiveScriptOutput(self.ReadSettingSafely(qsettings, SHOW_ACTIVE_SCRIPT_OUTPUT_KEY, True, bool))
self.setShowAllScriptOutput(self.ReadSettingSafely(qsettings, SHOW_ALL_SCRIPT_OUTPUT_KEY, False, bool))
def ReadSettingSafely(self, qsettings, key, default, type):
"""
Reads a value from qsettings, returning the default if the value is missing or the type is wrong
:param qsettings: the qsettings object
:param key: the key to read
:param default: the default value
:param type: the type to check the returned value for
:return: The value from qsettings, or default if the value is missing or the type is wrong
"""
try:
return qsettings.value(key, default, type=type)
except (KeyError, TypeError):
return default
def writeSettings(self, qsettings):
super(MessageDisplay, self).writeSettings(qsettings)
qsettings.setValue(SHOW_FRAMEWORK_OUTPUT_KEY, self.showFrameworkOutput())
qsettings.setValue(SHOW_ALL_SCRIPT_OUTPUT_KEY, self.showAllScriptOutput())
qsettings.setValue(SHOW_ACTIVE_SCRIPT_OUTPUT_KEY, self.showActiveScriptOutput())
def generateContextMenu(self):
"""
Generate the window's context menu. This first calls the base class's
context menu generator and then extends it with the filtering options.
"""
qmenu = super(MessageDisplay, self).generateContextMenu()
filter_menu = qmenu.addMenu("&View")
framework_action = QAction("Mantid Log Output", filter_menu)
framework_action.triggered.connect(self.toggle_filter_framework_output)
framework_action.setCheckable(True)
framework_action.setChecked(self.showFrameworkOutput())
filter_menu.addAction(framework_action)
filter_menu.addSeparator()
actions_to_group = []
active_script_action = QAction("Active Tab Output", filter_menu)
active_script_action.triggered.connect(self.METHOD_NAME)
actions_to_group.append(active_script_action)
all_script_action = QAction("All Script Output", filter_menu)
all_script_action.triggered.connect(self.show_all_scripts)
actions_to_group.append(all_script_action)
hide_all_script_action = QAction("Hide All Script Output", filter_menu)
hide_all_script_action.triggered.connect(self.hide_all_scripts)
actions_to_group.append(hide_all_script_action)
action_group = QActionGroup(filter_menu)
for action in actions_to_group:
action_group.addAction(action)
filter_menu.addAction(action)
action.setCheckable(True)
if self.showAllScriptOutput():
all_script_action.setChecked(True)
elif self.showActiveScriptOutput():
active_script_action.setChecked(True)
else:
hide_all_script_action.setChecked(True)
return qmenu
def showContextMenu(self, q_position):
self.generateContextMenu().exec_(self.mapToGlobal(q_position))
def show_all_scripts(self):
if not self.showAllScriptOutput():
self.setShowAllScriptOutput(True)
self.setShowActiveScriptOutput(False)
self.filterMessages()
def hide_all_scripts(self):
if self.showActiveScriptOutput() or self.showAllScriptOutput():
self.setShowAllScriptOutput(False)
self.setShowActiveScriptOutput(False)
self.filterMessages()
def METHOD_NAME(self):
if not self.showActiveScriptOutput():
self.setShowAllScriptOutput(False)
self.setShowActiveScriptOutput(True)
self.filterMessages()
def toggle_filter_framework_output(self):
self.setShowFrameworkOutput(not self.showFrameworkOutput())
self.filterMessages()
def append_script_error(self, txt):
"""
Append the given message to the window, marking the message as
output from a Python script with "Error" priority. This function
is hooked into stderr.
"""
self.appendPython(txt, Priority.Error, self.last_executed_script)
def append_script_notice(self, txt):
"""
Append the given message to the window, marking the message as
output from a Python script with "Notice" priority. This
function is hooked into stdout.
"""
self.appendPython(txt, Priority.Notice, self.last_executed_script)
def script_executing(self, script_path):
"""Slot executed when a script is executed in the Workbench."""
self.last_executed_script = script_path
def file_name_modified(self, old_file_name, new_file_name):
self.filePathModified(old_file_name, new_file_name)
if self.activeScript() == old_file_name:
self.setActiveScript(new_file_name)
def current_tab_changed(self, script_path):
self.setActiveScript(script_path)
if self.showActiveScriptOutput():
self.filterMessages()
| null |
5,397 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
""" Defines the state of the event slices which should be reduced."""
import json
import copy
from sans.state.JsonSerializable import JsonSerializable
from sans.state.automatic_setters import automatic_setters
from sans.state.state_functions import is_pure_none_or_not_none, validation_message
from sans.common.enums import SANSFacility
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
class StateSliceEvent(metaclass=JsonSerializable):
def __init__(self):
super(StateSliceEvent, self).__init__()
self.start_time = None # : List[Float]
self.end_time = None # : List[Float]
self.event_slice_str: str = ""
def validate(self):
is_invalid = dict()
if not is_pure_none_or_not_none([self.start_time, self.end_time]):
entry = validation_message(
"Missing slice times",
"Makes sure that either both or none are set.",
{"start_time": self.start_time, "end_time": self.end_time},
)
is_invalid.update(entry)
if self.start_time and self.end_time:
# The length of start_time and end_time needs to be identical
if len(self.start_time) != len(self.end_time):
entry = validation_message(
"Bad relation of start and end",
"Makes sure that the start time is smaller than the end time.",
{"start_time": self.start_time, "end_time": self.end_time},
)
is_invalid.update(entry)
# Each entry in start_time and end_time must be a float
if len(self.start_time) == len(self.end_time) and len(self.start_time) > 0:
for item in range(0, len(self.start_time)):
for element1, element2 in zip(self.start_time, self.end_time):
if not isinstance(element1, float) or not isinstance(element2, float):
entry = validation_message(
"Bad relation of start and end time entries",
"The elements need to be floats.",
{"start_time": self.start_time, "end_time": self.end_time},
)
is_invalid.update(entry)
# Check that end_time is not smaller than start_time
if not is_smaller(self.start_time, self.end_time):
entry = validation_message(
"Start time larger than end time.",
"Make sure that the start time is not smaller than the end time.",
{"start_time": self.start_time, "end_time": self.end_time},
)
is_invalid.update(entry)
if is_invalid:
raise ValueError("StateSliceEvent: The provided inputs are illegal. " "Please see: {}".format(json.dumps(is_invalid)))
def monotonically_increasing(to_check):
return all(x <= y for x, y in zip(to_check, to_check[1:]))
def is_smaller(smaller, larger):
return all(x <= y for x, y in zip(smaller, larger))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateSliceEventBuilder(object):
@automatic_setters(StateSliceEvent)
def __init__(self):
super(StateSliceEventBuilder, self).__init__()
self.state = StateSliceEvent()
def METHOD_NAME(self):
# Make sure that the product is in a valid state, ie not incomplete
self.state.validate()
return copy.copy(self.state)
# ------------------------------------------
# Factory method for SANStateDataBuilder
# ------------------------------------------
def get_slice_event_builder(data_info):
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateSliceEventBuilder()
else:
raise NotImplementedError(
"StateSliceEventBuilder: Could not find any valid slice builder for the "
"specified StateData object {0}".format(str(data_info))
)
| null |
5,398 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from sans.gui_logic.models.RunSelectionModel import RunSelectionModel
from sans.gui_logic.models.run_file import SummableRunFile
from sans.gui_logic.models.run_finder import SummableRunFinder
from sans.gui_logic.presenter.RunSelectorPresenter import RunSelectorPresenter
from ui.sans_isis.run_selector_widget import RunSelectorWidget
from fake_signal import FakeSignal
class RunSelectorPresenterTest(unittest.TestCase):
def setUp(self):
self.view = self._make_mock_view()
self.run_selection = self._make_mock_selection()
self.run_finder = self._make_mock_finder()
self.presenter = self.METHOD_NAME(self.run_selection, self.run_finder, self.view)
def _make_mock_view(self):
mock_view = mock.create_autospec(RunSelectorWidget, spec_set=True)
mock_view.addRuns = FakeSignal()
mock_view.removeRuns = FakeSignal()
mock_view.manageDirectories = FakeSignal()
mock_view.browse = FakeSignal()
mock_view.removeAllRuns = FakeSignal()
return mock_view
def _make_mock_selection(self):
return mock.create_autospec(RunSelectionModel)
def _make_mock_finder(self):
return mock.create_autospec(SummableRunFinder)
def METHOD_NAME(self, run_selection, run_finder, view):
return RunSelectorPresenter("some_title", run_selection, run_finder, view, None)
def test_searches_for_runs_when_add_run_pressed(self):
run_query = "1"
no_runs = ("", [])
self.view.run_list.return_value = run_query
self.run_finder.find_all_from_query.return_value = no_runs
self.view.addRuns.emit()
self.run_finder.find_all_from_query.assert_called_with(run_query)
def _make_fake_run_model(self, run_name):
return SummableRunFile("/home/{}".format(run_name), run_name, is_event_mode=True)
def test_adds_search_results_to_model_when_add_run_pressed(self):
run_name = "1"
run_query = run_name
found_run = self._make_fake_run_model(run_name)
self.view.run_list.return_value = run_query
self.run_finder.find_all_from_query.return_value = ("", [found_run])
self.view.addRuns.emit()
self.run_selection.add_run.assert_called_with(found_run)
def test_handles_error_when_invalid_query(self):
run_query = "1-0"
error_message = "Invalid Query"
self.view.run_list.return_value = run_query
self.run_finder.find_all_from_query.return_value = (error_message, [])
self.view.addRuns.emit()
self.view.invalid_run_query.assert_called_with(error_message)
def test_handles_error_when_run_not_found(self):
run_query = "1-10"
self.view.run_list.return_value = run_query
self.run_finder.find_all_from_query.return_value = ("", [])
self.view.addRuns.emit()
self.view.run_not_found.assert_called()
def test_adds_multiple_search_results_to_model_when_add_run_pressed(self):
run_names = ["1", "009", "12"]
run_query = ",".join(run_names)
found_runs = [self._make_fake_run_model(run_name) for run_name in run_names]
self.view.run_list.return_value = run_query
self.run_finder.find_all_from_query.return_value = ("", found_runs)
self.view.addRuns.emit()
expected = [mock.call.add_run(run) for run in found_runs]
self.run_selection.assert_has_calls(expected)
def test_remove_runs_removes_run_from_model(self):
run_index = 0
self.view.selected_runs.return_value = [run_index]
self.view.removeRuns.emit()
self.run_selection.remove_run.assert_called_with(run_index)
def test_removes_runs_from_model_when_multi_selected(self):
run_indices = [0, 2]
self.view.selected_runs.return_value = run_indices
self.view.removeRuns.emit()
expected = [mock.call.remove_run(index) for index in run_indices]
self.run_selection.assert_has_calls(expected, any_order=True)
def test_removes_runs_in_correct_order_when_multi_selected(self):
run_indices = [2, 1, 5, 0]
self.view.selected_runs.return_value = run_indices
self.view.removeRuns.emit()
expected = [mock.call.remove_run(index) for index in [5, 2, 1, 0]]
self.run_selection.assert_has_calls(expected, any_order=False)
def test_clears_all_runs_from_model_when_clear_pressed(self):
self.view.removeAllRuns.emit()
self.run_selection.clear_all_runs.assert_called()
def test_manage_directories_launches_dialog(self):
self.view.manageDirectories.emit()
self.view.show_directories_manager.assert_called()
def test_browse_to_directory(self):
self.view.browse.emit()
self.view.show_file_picker.assert_called()
if __name__ == "__main__":
unittest.main()
| null |
5,399 |
# pylint: disable=import-outside-toplevel
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Any
import click
from lektor.i18n import get_default_lang
from lektor.i18n import is_valid_language
from lektor.project import Project
def echo_json(data):
click.echo(json.dumps(data, indent=2).rstrip())
def pruneflag(cli):
return click.option(
"--prune/--no-prune",
default=True,
help="Controls if old " 'artifacts should be pruned. "prune" is the default.',
)(cli)
def extraflag(cli):
return click.option(
"-f",
"--extra-flag",
"extra_flags",
multiple=True,
help="Defines an arbitrary flag. These can be used by plugins "
"to customize the build and deploy process. More information can be "
"found in the documentation of affected plugins.",
)(cli)
class AliasedGroup(click.Group):
# pylint: disable=inconsistent-return-statements
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)]
if not matches:
return None
if len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail("Too many matches: %s" % ", ".join(sorted(matches)))
class Context:
def __init__(self):
self._project_path = os.environ.get("LEKTOR_PROJECT") or None
self._project = None
self._env = None
self._ui_lang = None
def _get_ui_lang(self):
rv = self._ui_lang
if rv is None:
rv = self._ui_lang = get_default_lang()
return rv
def _set_ui_lang(self, value):
self._ui_lang = value
ui_lang = property(_get_ui_lang, _set_ui_lang)
del _get_ui_lang, _set_ui_lang
def set_project_path(self, value):
self._project_path = value
self._project = None
def get_project(self, silent=False):
if self._project is not None:
return self._project
if self._project_path is not None:
rv = Project.from_path(self._project_path)
else:
rv = Project.discover()
if rv is None:
if silent:
return None
if self._project_path is None:
raise click.UsageError(
"Could not automatically discover "
"project. A Lektor project must "
"exist in the working directory or "
"any of the parent directories."
)
raise click.UsageError('Could not find project "%s"' % self._project_path)
self._project = rv
return rv
def get_default_output_path(self):
rv = os.environ.get("LEKTOR_OUTPUT_PATH")
if rv is not None:
return rv
return self.get_project().get_output_path()
def get_env(self, extra_flags=None):
if self._env is not None:
return self._env
from lektor.environment import Environment
env = Environment(
self.get_project(), load_plugins=False, extra_flags=extra_flags
)
self._env = env
return env
def load_plugins(self, reinstall=False, extra_flags=None):
from .packages import load_packages
load_packages(self.get_env(extra_flags=extra_flags), reinstall=reinstall)
if not reinstall:
from .pluginsystem import initialize_plugins
initialize_plugins(self.get_env())
pass_context = click.make_pass_decorator(Context, ensure=True)
def validate_language(ctx, param, value):
if value is not None and not is_valid_language(value):
raise click.BadParameter('Unsupported language "%s".' % value)
return value
class ResolvedPath(click.Path):
"""A click paramter type for a resolved path.
We could just use ``click.Path(resolve_path=True)`` except that that
fails sometimes under Windows running python <= 3.9.
See https://github.com/pallets/click/issues/2466
"""
def __init__(self, writable=False, file_okay=True):
super().__init__(
resolve_path=True, allow_dash=False, writable=writable, file_okay=file_okay
)
def METHOD_NAME(
self, value: Any, param: click.Parameter | None, ctx: click.Context | None
) -> Any:
abspath = Path(value).absolute()
# fsdecode to ensure that the return value is a str.
# (with click<8.0.3 Path.convert will return Path if passed a Path)
return os.fsdecode(super().METHOD_NAME(abspath, param, ctx))
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.