id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,800 |
from str
|
# SPDX-License-Identifier: MIT
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from . import utils
from .asset import Asset, AssetMixin
from .errors import InvalidArgument
__all__ = ("PartialEmoji",)
if TYPE_CHECKING:
from datetime import datetime
from typing_extensions import Self
from .state import ConnectionState
from .types.emoji import DefaultReaction
from .types.message import PartialEmoji as PartialEmojiPayload
class _EmojiTag:
__slots__ = ()
id: int
def _to_partial(self) -> PartialEmoji:
raise NotImplementedError
class PartialEmoji(_EmojiTag, AssetMixin):
"""Represents a "partial" emoji.
This model will be given in two scenarios:
- "Raw" data events such as :func:`on_raw_reaction_add`
- Custom emoji that the bot cannot see from e.g. :attr:`Message.reactions`
.. container:: operations
.. describe:: x == y
Checks if two emoji are the same.
.. describe:: x != y
Checks if two emoji are not the same.
.. describe:: hash(x)
Return the emoji's hash.
.. describe:: str(x)
Returns the emoji rendered for discord.
Attributes
----------
name: Optional[:class:`str`]
The custom emoji name, if applicable, or the unicode codepoint
of the non-custom emoji. This can be ``None`` if the emoji
got deleted (e.g. removing a reaction with a deleted emoji).
animated: :class:`bool`
Whether the emoji is animated or not.
id: Optional[:class:`int`]
The ID of the custom emoji, if applicable.
"""
__slots__ = ("animated", "name", "id", "_state")
_CUSTOM_EMOJI_RE = re.compile(
r"<?(?P<animated>a)?:?(?P<name>[A-Za-z0-9\_]+):(?P<id>[0-9]{13,20})>?"
)
if TYPE_CHECKING:
id: Optional[int]
def __init__(self, *, name: str, animated: bool = False, id: Optional[int] = None) -> None:
self.animated = animated
self.name = name
self.id = id
self._state: Optional[ConnectionState] = None
@classmethod
def from_dict(cls, data: Union[PartialEmojiPayload, Dict[str, Any]]) -> Self:
return cls(
animated=data.get("animated", False),
id=utils.get_as_snowflake(data, "id"),
name=data.get("name") or "",
)
@classmethod
def from_default_reaction(cls, data: DefaultReaction) -> Self:
return cls(
id=utils.get_as_snowflake(data, "emoji_id"),
name=data.get("emoji_name") or "",
)
@classmethod
def METHOD_NAME(cls, value: str) -> Self:
"""Converts a Discord string representation of an emoji to a :class:`PartialEmoji`.
The formats accepted are:
- ``a:name:id``
- ``<a:name:id>``
- ``name:id``
- ``<:name:id>``
If the format does not match then it is assumed to be a unicode emoji.
.. versionadded:: 2.0
Parameters
----------
value: :class:`str`
The string representation of an emoji.
Returns
-------
:class:`PartialEmoji`
The partial emoji from this string.
"""
match = cls._CUSTOM_EMOJI_RE.match(value)
if match is not None:
groups = match.groupdict()
animated = bool(groups["animated"])
emoji_id = int(groups["id"])
name = groups["name"]
return cls(name=name, animated=animated, id=emoji_id)
return cls(name=value, id=None, animated=False)
def to_dict(self) -> Dict[str, Any]:
o: Dict[str, Any] = {"name": self.name}
if self.id:
o["id"] = self.id
if self.animated:
o["animated"] = self.animated
return o
def _to_partial(self) -> PartialEmoji:
return self
@classmethod
def with_state(
cls,
state: ConnectionState,
*,
name: str,
animated: bool = False,
id: Optional[int] = None,
) -> Self:
self = cls(name=name, animated=animated, id=id)
self._state = state
return self
def __str__(self) -> str:
if self.id is None:
return self.name
if self.animated:
return f"<a:{self.name}:{self.id}>"
return f"<:{self.name}:{self.id}>"
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} animated={self.animated} name={self.name!r} id={self.id}>"
)
def __eq__(self, other: Any) -> bool:
if self.is_unicode_emoji():
return isinstance(other, PartialEmoji) and self.name == other.name
if isinstance(other, _EmojiTag):
return self.id == other.id
return False
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash((self.id, self.name))
def is_custom_emoji(self) -> bool:
""":class:`bool`: Checks if this is a custom non-Unicode emoji."""
return self.id is not None
def is_unicode_emoji(self) -> bool:
""":class:`bool`: Checks if this is a Unicode emoji."""
return self.id is None
def _as_reaction(self) -> str:
if self.id is None:
return self.name
return f"{self.name}:{self.id}"
@property
def created_at(self) -> Optional[datetime]:
"""Optional[:class:`datetime.datetime`]: Returns the emoji's creation time in UTC, or None if Unicode emoji.
.. versionadded:: 1.6
"""
if self.id is None:
return None
return utils.snowflake_time(self.id)
@property
def url(self) -> str:
""":class:`str`: Returns the URL of the emoji, if it is custom.
If this isn't a custom emoji then an empty string is returned
"""
if self.is_unicode_emoji():
return ""
fmt = "gif" if self.animated else "png"
return f"{Asset.BASE}/emojis/{self.id}.{fmt}"
async def read(self) -> bytes:
if self.is_unicode_emoji():
raise InvalidArgument("PartialEmoji is not a custom emoji")
return await super().read()
|
2,801 |
region
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetStaticMemberResult',
'AwaitableGetStaticMemberResult',
'get_static_member',
'get_static_member_output',
]
@pulumi.output_type
class GetStaticMemberResult:
"""
StaticMember Item.
"""
def __init__(__self__, etag=None, id=None, name=None, provisioning_state=None, METHOD_NAME=None, resource_id=None, system_data=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", METHOD_NAME)
if resource_id and not isinstance(resource_id, str):
raise TypeError("Expected argument 'resource_id' to be a str")
pulumi.set(__self__, "resource_id", resource_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the scope assignment resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource region.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
Resource Id.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata related to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetStaticMemberResult(GetStaticMemberResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStaticMemberResult(
etag=self.etag,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
resource_id=self.resource_id,
system_data=self.system_data,
type=self.type)
def get_static_member(network_group_name: Optional[str] = None,
network_manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
static_member_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStaticMemberResult:
"""
Gets the specified static member.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
:param str static_member_name: The name of the static member.
"""
__args__ = dict()
__args__['networkGroupName'] = network_group_name
__args__['networkManagerName'] = network_manager_name
__args__['resourceGroupName'] = resource_group_name
__args__['staticMemberName'] = static_member_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230501:getStaticMember', __args__, opts=opts, typ=GetStaticMemberResult).value
return AwaitableGetStaticMemberResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'region'),
resource_id=pulumi.get(__ret__, 'resource_id'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_static_member)
def get_static_member_output(network_group_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
static_member_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStaticMemberResult]:
"""
Gets the specified static member.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
:param str static_member_name: The name of the static member.
"""
...
|
2,802 |
create bias correction command
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import numpy as np
import onnx
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNode
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.tensor_statistics.collectors import ReductionShape
from nncf.common.utils.backend import BackendType
from nncf.onnx.graph.model_utils import remove_fq_from_inputs
from nncf.onnx.graph.node_utils import get_bias_value
from nncf.onnx.graph.node_utils import is_any_weight_quantized
from nncf.onnx.graph.node_utils import is_node_with_bias
from nncf.onnx.graph.onnx_graph import ONNXGraph
from nncf.onnx.graph.transformations.command_creation import METHOD_NAME
from nncf.onnx.graph.transformations.commands import ONNXBiasCorrectionCommand
from nncf.onnx.graph.transformations.commands import ONNXModelExtractionCommand
from nncf.onnx.graph.transformations.commands import ONNXNullBiasInsertionCommand
from nncf.onnx.graph.transformations.commands import ONNXOutputInsertionCommand
from nncf.onnx.graph.transformations.commands import ONNXTargetPoint
from nncf.onnx.statistics.collectors import ONNXMeanStatisticCollector
from nncf.onnx.statistics.collectors import ONNXNNCFCollectorTensorProcessor
from nncf.onnx.statistics.collectors import ONNXRawStatisticCollector
from nncf.onnx.tensor import ONNXNNCFTensor
from nncf.quantization.algorithms.bias_correction.backend import ALGO_BACKENDS
from nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend
# pylint:disable=too-many-public-methods
@ALGO_BACKENDS.register(BackendType.ONNX)
class ONNXBiasCorrectionAlgoBackend(BiasCorrectionAlgoBackend):
@property
def tensor_processor(self) -> ONNXNNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
@property
def types_to_insert_bias(self):
return []
@staticmethod
def target_point(target_type: TargetType, target_node_name: str, port_id: int) -> ONNXTargetPoint:
return ONNXTargetPoint(target_type, target_node_name, port_id)
@staticmethod
def METHOD_NAME(
node: NNCFNode, bias_value: np.ndarray, nncf_graph: NNCFGraph
) -> ONNXBiasCorrectionCommand:
return METHOD_NAME(node, bias_value)
@staticmethod
def model_extraction_command(inputs: List[str], outputs: List[str]) -> ONNXModelExtractionCommand:
return ONNXModelExtractionCommand(inputs, outputs)
@staticmethod
def create_bias_insertion_command(node: NNCFNode) -> ONNXNullBiasInsertionCommand:
return ONNXNullBiasInsertionCommand(node)
@staticmethod
def output_insertion_command(nncf_graph: NNCFGraph, target_point: ONNXTargetPoint) -> ONNXOutputInsertionCommand:
nncf_input_node_next_nodes = {}
for input_node in nncf_graph.get_input_nodes():
next_nodes = nncf_graph.get_next_nodes(input_node)
nncf_input_node_next_nodes[input_node.node_name] = [node.node_name for node in next_nodes]
return ONNXOutputInsertionCommand(target_point, nncf_input_node_next_nodes)
@staticmethod
def mean_statistic_collector(
reduction_shape: ReductionShape,
inplace: bool,
num_samples: Optional[int] = None,
window_size: Optional[int] = None,
) -> ONNXMeanStatisticCollector:
return ONNXMeanStatisticCollector(reduction_shape, num_samples, window_size)
@staticmethod
def raw_statistic_collector(inplace: bool, num_samples: int = None) -> ONNXMeanStatisticCollector:
return ONNXRawStatisticCollector(num_samples)
@staticmethod
def process_model_output(raw_data: Dict, output_name: str) -> ONNXNNCFTensor:
return ONNXNNCFTensor(raw_data[output_name])
@staticmethod
def get_activation_port_id(node: NNCFNode, nncf_graph: NNCFGraph) -> Tuple[int, int]:
return 0
@staticmethod
def get_bias_value(node: NNCFNode, model: onnx.ModelProto, nncf_graph: NNCFGraph) -> np.ndarray:
return get_bias_value(node, model)
@staticmethod
def get_input_name(model: onnx.ModelProto, node_name: str) -> str:
onnx_graph = ONNXGraph(model)
node = onnx_graph.get_node_by_name(node_name)
return node.input[0]
@staticmethod
def get_output_name(model: onnx.ModelProto, node_name: str, output_id: int) -> List[str]:
onnx_graph = ONNXGraph(model)
node = onnx_graph.get_node_by_name(node_name)
return node.output[output_id]
@staticmethod
def is_quantized_weights(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:
return is_any_weight_quantized(node, nncf_graph)
@staticmethod
def is_node_with_bias(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:
return is_node_with_bias(node)
@staticmethod
def remove_fq_from_inputs(model: onnx.ModelProto, nncf_graph: NNCFGraph) -> onnx.ModelProto:
return remove_fq_from_inputs(model, nncf_graph)
@staticmethod
def insert_null_biases(model: onnx.ModelProto, nncf_graph: NNCFGraph) -> onnx.ModelProto:
return model
|
2,803 |
product repository
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
from typing import Any
import pytest
import sqlalchemy as sa
from aiohttp import web
from aiohttp.test_utils import TestClient
from aiopg.sa.result import RowProxy
from pytest_mock import MockerFixture
from simcore_postgres_database import utils_products
from simcore_postgres_database.models.products import (
EmailFeedback,
Forum,
IssueTracker,
Manual,
Vendor,
WebFeedback,
products,
)
from simcore_service_webserver.db.plugin import APP_DB_ENGINE_KEY
from simcore_service_webserver.products._db import ProductRepository
from simcore_service_webserver.products._middlewares import (
_get_app_default_product_name,
)
from simcore_service_webserver.products._model import Product
@pytest.fixture
def app(client: TestClient) -> web.Application:
assert client.app
return client.app
@pytest.fixture
async def product_row(app: web.Application, product_data: dict[str, Any]) -> RowProxy:
"""Injects product_data in products table and returns the associated table's database row
Note that product_data is a SUBSET of product_row (e.g. modified dattimes etc)!
"""
engine = app[APP_DB_ENGINE_KEY]
assert engine
async with engine.acquire() as conn:
# writes
insert_stmt = (
products.insert().values(**product_data).returning(products.c.name)
)
name = await conn.scalar(insert_stmt)
# reads
select_stmt = sa.select(products).where(products.c.name == name)
row = await (await conn.execute(select_stmt)).fetchone()
assert row
return row
@pytest.fixture
async def METHOD_NAME(
app: web.Application, mocker: MockerFixture
) -> ProductRepository:
assert product_row
fake_request = mocker.MagicMock()
fake_request.app = app
repo = ProductRepository(request=fake_request)
return repo
@pytest.mark.parametrize(
"product_data",
[
# DATA introduced by operator e.g. in adminer
{
"name": "tis",
"display_name": "COMPLETE example",
"short_name": "dummy",
"host_regex": r"([\.-]{0,1}dummy[\.-])",
"support_email": "[email protected]",
"twilio_messaging_sid": None,
"vendor": Vendor(
name="ACME",
copyright="© ACME correcaminos",
url="https://acme.com",
license_url="http://docs.acme.app/#/license-terms",
invitation_url="http://docs.acme.app/#/how-to-request-invitation",
has_landing_page=False,
),
"issues": [
IssueTracker(
label="github",
login_url="https://github.com/ITISFoundation/osparc-simcore",
new_url="https://github.com/ITISFoundation/osparc-simcore/issues/new/choose",
),
IssueTracker(
label="fogbugz",
login_url="https://fogbugz.com/login",
new_url="https://fogbugz.com/new?project=123",
),
],
"manuals": [
Manual(label="main", url="doc.acme.com"),
Manual(label="z43", url="yet-another-manual.acme.com"),
],
"support": [
Forum(label="forum", kind="forum", url="forum.acme.com"),
EmailFeedback(label="email", kind="email", email="[email protected]"),
WebFeedback(label="web-form", kind="web", url="support.acme.com"),
],
},
# Minimal
{
"name": "s4llite",
"display_name": "MINIMAL example",
"short_name": "dummy",
"host_regex": "([\\.-]{0,1}osparc[\\.-])",
"support_email": "[email protected]",
},
],
ids=lambda d: d["display_name"],
)
async def test_product_repository_get_product(
METHOD_NAME: ProductRepository,
product_data: dict[str, Any],
product_row: RowProxy,
app: web.Application,
mocker: MockerFixture,
):
# check differences between the original product_data and the product_row in database
assert set(product_data.keys()).issubset(set(product_row.keys()))
common_keys = set(product_data.keys()).intersection(set(product_row.keys()))
assert {k: product_data[k] for k in common_keys} == {
k: product_row[k] for k in common_keys
}
# check RowProxy -> pydantic's Product
product = Product.from_orm(product_row)
print(product.json(indent=1))
# product repo
assert METHOD_NAME.engine
assert await METHOD_NAME.get_product(product.name) == product
# tests definitions of default from utle_products and web-server.products are in sync
mock_request = mocker.MagicMock()
mock_request.app = app
async with METHOD_NAME.engine.acquire() as conn:
default_product = await utils_products.get_default_product_name(conn)
assert default_product == _get_app_default_product_name(mock_request)
|
2,804 |
sign
|
"""JWS Linked Data class."""
import json
from datetime import datetime
from typing import Union
from pyld.jsonld import JsonLdProcessor
from ....wallet.util import b64_to_bytes, bytes_to_b64, str_to_b64, b64_to_str
from ..crypto import _KeyPair as KeyPair
from ..document_loader import DocumentLoaderMethod
from ..error import LinkedDataProofException
from .linked_data_signature import LinkedDataSignature
class JwsLinkedDataSignature(LinkedDataSignature):
"""JWS Linked Data class."""
def __init__(
self,
*,
signature_type: str,
algorithm: str,
required_key_type: str,
key_pair: KeyPair,
proof: dict = None,
verification_method: str = None,
date: Union[datetime, str] = None,
):
"""Create new JwsLinkedDataSignature instance.
Must be subclassed, not initialized directly.
Args:
signature_type (str): Signature type for the proof, provided by subclass
algorithm (str): JWS alg to use, provided by subclass
required_key_type (str): Required key type in verification method.
key_pair (KeyPair): Key pair to use, provided by subclass
proof (dict, optional): A JSON-LD document with options to use for the
`proof` node (e.g. any other custom fields can be provided here
using a context different from security-v2).
verification_method (str, optional): A key id URL to the paired public key.
date (datetime, optional): Signing date to use. Defaults to now
"""
super().__init__(
signature_type=signature_type,
verification_method=verification_method,
proof=proof,
date=date,
)
self.algorithm = algorithm
self.key_pair = key_pair
self.required_key_type = required_key_type
async def METHOD_NAME(self, *, verify_data: bytes, proof: dict) -> dict:
"""Sign the data and add it to the proof.
Adds a jws to the proof that can be used for multiple
signature algorithms.
Args:
verify_data (bytes): The data to sign.
proof (dict): The proof to add the signature to
Returns:
dict: The proof object with the added signature
"""
header = {"alg": self.algorithm, "b64": False, "crit": ["b64"]}
encoded_header = self._encode_header(header)
data = self._create_jws(encoded_header=encoded_header, verify_data=verify_data)
signature = await self.key_pair.METHOD_NAME(data)
encoded_signature = bytes_to_b64(
signature, urlsafe=True, pad=False, encoding="utf-8"
)
proof["jws"] = encoded_header + ".." + encoded_signature
return proof
async def verify_signature(
self,
*,
verify_data: bytes,
verification_method: dict,
document: dict,
proof: dict,
document_loader: DocumentLoaderMethod,
):
"""Verify the data against the proof.
Checks for a jws on the proof.
Args:
verify_data (bytes): The data to check
verification_method (dict): The verification method to use.
document (dict): The document the verify data is derived for as extra context
proof (dict): The proof to check
document_loader (DocumentLoader): Document loader used for resolving
Returns:
bool: Whether the signature is valid for the data
"""
if not (isinstance(proof.get("jws"), str) and (".." in proof.get("jws"))):
raise LinkedDataProofException(
'The proof does not contain a valid "jws" property.'
)
encoded_header, payload, encoded_signature = proof.get("jws").split(".")
header = self._decode_header(encoded_header)
self._validate_header(header)
signature = b64_to_bytes(encoded_signature, urlsafe=True)
data = self._create_jws(encoded_header=encoded_header, verify_data=verify_data)
# If the key pair has not public key yet, create a new key pair
# from the verification method. We don't want to overwrite data
# on the original key pair
key_pair = self.key_pair
if not key_pair.has_public_key:
key_pair = key_pair.from_verification_method(verification_method)
return await key_pair.verify(data, signature)
def _decode_header(self, encoded_header: str) -> dict:
"""Decode header."""
header = None
try:
header = json.loads(b64_to_str(encoded_header, urlsafe=True))
except Exception:
raise LinkedDataProofException("Could not parse JWS header.")
return header
def _encode_header(self, header: dict) -> str:
"""Encode header."""
return str_to_b64(json.dumps(header), urlsafe=True, pad=False)
def _create_jws(self, *, encoded_header: str, verify_data: bytes) -> bytes:
"""Compose JWS."""
return (encoded_header + ".").encode("utf-8") + verify_data
def _validate_header(self, header: dict):
"""Validate the JWS header, throws if not ok."""
if not (header and isinstance(header, dict)):
raise LinkedDataProofException("Invalid JWS header.")
if not (
header.get("alg") == self.algorithm
and header.get("b64") is False
and isinstance(header.get("crit"), list)
and len(header.get("crit")) == 1
and header.get("crit")[0] == "b64"
and len(header.keys()) == 3
):
raise LinkedDataProofException(
f"Invalid JWS header params for {self.signature_type}"
)
def _assert_verification_method(self, verification_method: dict):
"""Assert verification method. Throws if not ok."""
if not JsonLdProcessor.has_value(
verification_method, "type", self.required_key_type
):
raise LinkedDataProofException(
f"Invalid key type. The key type must be {self.required_key_type}"
)
def _get_verification_method(
self, *, proof: dict, document_loader: DocumentLoaderMethod
):
"""Get verification method.
Overwrites base get verification method to assert key type.
"""
verification_method = super()._get_verification_method(
proof=proof, document_loader=document_loader
)
self._assert_verification_method(verification_method)
return verification_method
|
2,805 |
reset
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
import torch
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def METHOD_NAME(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
# image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# def plot_current_losses(self, epoch, counter_ratio, losses):
# """display the current losses on visdom display: dictionary of error labels and values
#
# Parameters:
# epoch (int) -- current epoch
# counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
# losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
# """
# if not hasattr(self, 'plot_data'):
# self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
# self.plot_data['X'].append(epoch + counter_ratio)
# self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
# try:
# self.vis.line(
# X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
# Y=np.array(self.plot_data['Y']),
# opts={
# 'title': self.name + ' loss over time',
# 'legend': self.plot_data['legend'],
# 'xlabel': 'epoch',
# 'ylabel': 'loss'},
# win=self.display_id)
# except VisdomExceptionBase:
# self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
2,806 |
start
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from nvflare.apis.fl_constant import FLContextKey, ServerCommandKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey, ReturnCode, make_reply
from nvflare.fuel.f3.message import Message as CellMessage
from nvflare.private.defs import CellChannel, CellMessageHeaderKeys, new_cell_message
from .server_commands import ServerCommands
class ServerCommandAgent(object):
def __init__(self, engine, cell: Cell) -> None:
"""To init the CommandAgent.
Args:
listen_port: port to listen the command
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.asked_to_stop = False
self.engine = engine
self.cell = cell
def METHOD_NAME(self):
self.cell.register_request_cb(
channel=CellChannel.SERVER_COMMAND,
topic="*",
cb=self.execute_command,
)
self.cell.register_request_cb(
channel=CellChannel.AUX_COMMUNICATION,
topic="*",
cb=self.aux_communicate,
)
self.logger.info(f"ServerCommandAgent cell register_request_cb: {self.cell.get_fqcn()}")
def execute_command(self, request: CellMessage) -> CellMessage:
if not isinstance(request, CellMessage):
raise RuntimeError("request must be CellMessage but got {}".format(type(request)))
command_name = request.get_header(MessageHeaderKey.TOPIC)
# data = fobs.loads(request.payload)
data = request.payload
token = request.get_header(CellMessageHeaderKeys.TOKEN, None)
# client_name = request.get_header(CellMessageHeaderKeys.CLIENT_NAME, None)
client = None
if token:
client = self._get_client(token)
if client:
data.set_header(ServerCommandKey.FL_CLIENT, client)
command = ServerCommands.get_command(command_name)
if command:
if command_name in ServerCommands.client_request_commands_names:
if not client:
return make_reply(
ReturnCode.AUTHENTICATION_ERROR,
"Request from client: missing client token",
None,
)
with self.engine.new_context() as new_fl_ctx:
if command_name in ServerCommands.client_request_commands_names:
state_check = command.get_state_check(new_fl_ctx)
error = self.engine.server.authentication_check(request, state_check)
if error:
return make_reply(ReturnCode.AUTHENTICATION_ERROR, error, None)
reply = command.process(data=data, fl_ctx=new_fl_ctx)
if reply is not None:
return_message = new_cell_message({}, reply)
return_message.set_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
else:
return_message = make_reply(ReturnCode.PROCESS_EXCEPTION, "No process results", None)
return return_message
else:
return make_reply(ReturnCode.INVALID_REQUEST, "No server command found", None)
def _get_client(self, token):
fl_server = self.engine.server
client_manager = fl_server.client_manager
clients = client_manager.clients
return clients.get(token)
def aux_communicate(self, request: CellMessage) -> CellMessage:
assert isinstance(request, CellMessage), "request must be CellMessage but got {}".format(type(request))
data = request.payload
topic = request.get_header(MessageHeaderKey.TOPIC)
with self.engine.new_context() as fl_ctx:
server_state = self.engine.server.server_state
state_check = server_state.aux_communicate(fl_ctx)
error = self.engine.server.authentication_check(request, state_check)
if error:
make_reply(ReturnCode.AUTHENTICATION_ERROR, error, None)
engine = fl_ctx.get_engine()
reply = engine.dispatch(topic=topic, request=data, fl_ctx=fl_ctx)
shared_fl_ctx = FLContext()
shared_fl_ctx.set_public_props(copy.deepcopy(get_serializable_data(fl_ctx).get_all_public_props()))
reply.set_header(key=FLContextKey.PEER_CONTEXT, value=shared_fl_ctx)
if reply is not None:
return_message = new_cell_message({}, reply)
return_message.set_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
else:
return_message = new_cell_message({}, None)
return return_message
def shutdown(self):
self.asked_to_stop = True
|
2,807 |
test scene ex by name
|
import sys
import unittest
from sickchill import settings
from sickchill.oldbeard import common, db, name_cache, scene_exceptions, show_name_helpers
from sickchill.tv import TVShow as Show
from tests import conftest
class SceneTests(conftest.SickChillTestDBCase):
"""
Test Scene
"""
def _test_all_possible_show_names(self, name, indexerid=0, expected=None):
"""
Test all possible show names
:param name:
:param indexerid:
:param expected:
:return:
"""
expected = expected or []
show = Show(1, indexerid)
show.name = name
result = show_name_helpers.allPossibleShowNames(show)
assert len(set(expected).intersection(set(result))) == len(expected)
def test_all_possible_show_names(self):
"""
Test all possible show names
"""
# common.sceneExceptions[-1] = ['Exception Test']
test_cache_db_con = db.DBConnection("cache.db")
test_cache_db_con.action("INSERT INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?)", [-1, "Exception Test", -1])
common.countryList["Full Country Name"] = "FCN"
self._test_all_possible_show_names("Show Name", expected=["Show Name"])
self._test_all_possible_show_names("Show Name", -1, expected=["Show Name", "Exception Test"])
self._test_all_possible_show_names("Show Name FCN", expected=["Show Name FCN", "Show Name (Full Country Name)"])
self._test_all_possible_show_names("Show Name (FCN)", expected=["Show Name (FCN)", "Show Name (Full Country Name)"])
self._test_all_possible_show_names("Show Name Full Country Name", expected=["Show Name Full Country Name", "Show Name (FCN)"])
self._test_all_possible_show_names("Show Name (Full Country Name)", expected=["Show Name (Full Country Name)", "Show Name (FCN)"])
def test_filter_bad_releases(self):
"""
Test filtering of bad releases
"""
settings.IGNORE_WORDS = "GermaN"
settings.REQUIRE_WORDS = "STUFF"
assert not show_name_helpers.filter_bad_releases("Show.S02.German.Stuff-Grp")
assert show_name_helpers.filter_bad_releases("Show.S02.Some.Stuff-Core2HD")
assert not show_name_helpers.filter_bad_releases("Show.S02.Some.German.Stuff-Grp")
# assert show_name_helpers.filter_bad_releases('German.Show.S02.Some.Stuff-Grp')
assert not show_name_helpers.filter_bad_releases("Show.S02.This.Is.German")
class SceneExceptionTestCase(conftest.SickChillTestDBCase):
"""
Test scene exceptions test case
"""
def setUp(self):
"""
Set up tests
"""
super().setUp()
scene_exceptions.retrieve_exceptions()
def test_scene_ex_empty(self):
"""
Test empty scene exception
"""
assert scene_exceptions.get_scene_exceptions(0) == []
def test_scene_ex_babylon_5(self):
"""
Test scene exceptions for Babylon 5
"""
assert sorted(scene_exceptions.get_scene_exceptions(70726)) == ["Babylon 5", "Babylon5"]
def METHOD_NAME(self):
"""
Test scene exceptions by name
:return:
"""
assert scene_exceptions.get_scene_exception_by_name("Babylon5") == (70726, -1)
assert scene_exceptions.get_scene_exception_by_name("babylon 5") == (70726, -1)
assert scene_exceptions.get_scene_exception_by_name("Carlos 2010") == (164451, -1)
def test_scene_ex_by_name_empty(self):
"""
Test scene exceptions by name are empty
"""
assert scene_exceptions.get_scene_exception_by_name("nothing useful") == (None, None)
def test_scene_ex_reset_name_cache(self):
"""
Test scene exceptions reset name cache
"""
# clear the exceptions
test_cache_db_con = db.DBConnection("cache.db")
test_cache_db_con.action("DELETE FROM scene_exceptions WHERE 1")
# put something in the cache
name_cache.add_name("Cached Name", 0)
# updating should not clear the cache this time since our exceptions didn't change
scene_exceptions.retrieve_exceptions()
assert name_cache.get_id_from_name("Cached Name") == 0
if __name__ == "__main__":
if len(sys.argv) > 1:
SUITE = unittest.TestLoader().loadTestsFromName("scene_helpers_tests.SceneExceptionTestCase.test_" + sys.argv[1])
unittest.TextTestRunner(verbosity=2).run(SUITE)
else:
SUITE = unittest.TestLoader().loadTestsFromTestCase(SceneTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
SUITE = unittest.TestLoader().loadTestsFromTestCase(SceneExceptionTestCase)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
2,808 |
query parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"relay namespace show",
)
class Show(AAZCommand):
"""Shows the Relay Service Namespace details.
:example: shows the Namespace details.
az relay namespace show --resource-group myresourcegroup --name mynamespace
"""
_aaz_info = {
"version": "2017-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.relay/namespaces/{}", "2017-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of Namespace.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NamespacesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NamespacesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.sku = AAZObjectType()
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
properties.metric_id = AAZStrType(
serialized_name="metricId",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service_bus_endpoint = AAZStrType(
serialized_name="serviceBusEndpoint",
flags={"read_only": True},
)
properties.updated_at = AAZStrType(
serialized_name="updatedAt",
flags={"read_only": True},
)
sku = cls._schema_on_200.sku
sku.name = AAZStrType(
flags={"required": True},
)
sku.tier = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
2,809 |
parse op
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2020 IBM.
# Author: Nageswara R Sastry <[email protected]>
# Disha Goel <[email protected]>
import os
from avocado import Test
from avocado import skipUnless
from avocado.utils import process, distro, genio, dmesg
from avocado.utils.software_manager.manager import SoftwareManager
IS_POWER_NV = 'PowerNV' in genio.read_file('/proc/cpuinfo').rstrip('\t\r\n\0')
class PerfCoreIMCEvents(Test):
"""
Checking core, thread, trace IMC events
:avocado: tags=privileged,perf
"""
@skipUnless(IS_POWER_NV, "This test is for PowerNV")
def setUp(self):
'''
Install the basic packages to support perf
'''
smg = SoftwareManager()
dist = distro.detect()
if dist.name in ['Ubuntu', 'debian']:
linux_tools = "linux-tools-" + os.uname()[2][3]
pkgs = [linux_tools]
if dist.name in ['Ubuntu']:
pkgs.extend(['linux-tools-common'])
elif dist.name in ['centos', 'fedora', 'rhel', 'SuSE']:
pkgs = ['perf']
else:
self.cancel("perf is not supported on %s" % dist.name)
for pkg in pkgs:
if not smg.check_installed(pkg) and not smg.install(pkg):
self.cancel(
"Package %s is missing/could not be installed" % pkg)
# running some workload in background
process.run("ppc64_cpu --frequency -t 10 &", shell=True,
ignore_status=True, verbose=True, ignore_bg_processes=True)
# collect all imc events
self.list_core_imc = []
self.list_thread_imc = []
self.list_trace_imc = []
for line in process.get_command_output_matching('perf list', 'imc'):
line = "%s" % line.split()[0]
if 'core_imc' in line:
self.list_core_imc.append(line)
elif 'thread_imc' in line:
self.list_thread_imc.append(line)
elif 'trace_imc' in line:
self.list_trace_imc.append(line)
# Clear the dmesg, by that we can capture delta at the end of the test
dmesg.clear_dmesg()
def METHOD_NAME(self, cmd):
# helper function to run events and check for failure
fail_count = 0
result = process.run(cmd, shell=True, sudo=True)
output = result.stdout.decode() + result.stderr.decode()
if ("not counted" in output) or ("not supported" in output):
fail_count = fail_count + 1
if fail_count > 0:
self.fail("%s : command failed" % cmd)
def imc_events(self, event):
# helper function to parse different imc events
for line in event:
if line in self.list_core_imc:
cmd = "perf stat -e %s -I 1000 sleep 5" % line
self.METHOD_NAME(cmd)
# running thread_imc events with workload
if line in self.list_thread_imc:
cmd = "perf stat -e %s -I 1000 ls -R /usr/ > /dev/null" % line
self.METHOD_NAME(cmd)
# running trace_imc events with record/report and
# validating perf.data samples
if line in self.list_trace_imc:
cmd = "perf record -o perf.data -e %s -C 0 sleep 5" % line
process.run(cmd, shell=True, sudo=True)
res = process.run(
"perf report --stdio -i perf.data", shell=True, sudo=True)
if "data has no samples!" in res.stderr.decode():
self.fail("trace_imc perf.data sample not captured")
def test_core_imc(self):
# test function to run each imc events in the list
for event in [self.list_core_imc, self.list_thread_imc,
self.list_trace_imc]:
self.imc_events(event)
# negative testcase running two different imc events parallely
process.run("perf stat -e core_imc/CPM_CCYC/ -I 1000 &",
sudo=True, shell=True, ignore_bg_processes=True)
if not process.system("perf stat -e thread_imc/CPM_CCYC/ -I 1000 ls",
ignore_status=True, sudo=True):
self.fail("test failed because able to run two different imc "
"events parallely")
def tearDown(self):
# kill process running in background
process.system("pkill perf", ignore_status=True)
process.system('pkill ppc64_cpu', ignore_status=True)
# remove perf.data file generated from perf record
if os.path.isfile("perf.data"):
process.run('rm -f perf.data')
# Collect the dmesg
dmesg.collect_dmesg()
|
2,810 |
name
|
from __future__ import annotations
import typing
from river import base, stats
__all__ = ["PreviousImputer", "StatImputer"]
class PreviousImputer(base.Transformer):
"""Imputes missing values by using the most recent value.
Examples
--------
>>> from river import preprocessing
>>> imputer = preprocessing.PreviousImputer()
>>> imputer = imputer.learn_one({'x': 1, 'y': 2})
>>> imputer.transform_one({'y': None})
{'y': 2}
>>> imputer.transform_one({'x': None})
{'x': 1}
"""
def __init__(self):
self._latest = {}
def learn_one(self, x):
for i, v in x.items():
if v is not None:
self._latest[i] = v
return self
def transform_one(self, x):
for i, v in x.items():
if v is None:
x[i] = self._latest.get(i)
return x
class StatImputer(base.Transformer):
"""Replaces missing values with a statistic.
This transformer allows you to replace missing values with the value of a running statistic.
During a call to `learn_one`, for each feature, a statistic is updated whenever a numeric feature
is observed. When `transform_one` is called, each feature with a `None` value is replaced with
the current value of the corresponding statistic.
Parameters
----------
imputers
A list of tuples where each tuple has two elements. The first elements is a
feature name and the second value is an instance of `stats.base.Univariate`. The second
value can also be an arbitrary value, such as -1, in which case the missing values will
be replaced with it.
Examples
--------
>>> from river import preprocessing
>>> from river import stats
For numeric data, we can use a `stats.Mean()` to replace missing values by the running
average of the previously seen values:
>>> X = [
... {'temperature': 1},
... {'temperature': 8},
... {'temperature': 3},
... {'temperature': None},
... {'temperature': 4}
... ]
>>> imp = preprocessing.StatImputer(('temperature', stats.Mean()))
>>> for x in X:
... imp = imp.learn_one(x)
... print(imp.transform_one(x))
{'temperature': 1}
{'temperature': 8}
{'temperature': 3}
{'temperature': 4.0}
{'temperature': 4}
For discrete/categorical data, a common practice is to `stats.Mode` to replace missing
values by the most commonly seen value:
>>> X = [
... {'weather': 'sunny'},
... {'weather': 'rainy'},
... {'weather': 'sunny'},
... {'weather': None},
... {'weather': 'rainy'},
... {'weather': 'rainy'},
... {'weather': None}
... ]
>>> imp = preprocessing.StatImputer(('weather', stats.Mode()))
>>> for x in X:
... imp = imp.learn_one(x)
... print(imp.transform_one(x))
{'weather': 'sunny'}
{'weather': 'rainy'}
{'weather': 'sunny'}
{'weather': 'sunny'}
{'weather': 'rainy'}
{'weather': 'rainy'}
{'weather': 'rainy'}
You can also choose to replace missing values with a constant value, as so:
>>> imp = preprocessing.StatImputer(('weather', 'missing'))
>>> for x in X:
... imp = imp.learn_one(x)
... print(imp.transform_one(x))
{'weather': 'sunny'}
{'weather': 'rainy'}
{'weather': 'sunny'}
{'weather': 'missing'}
{'weather': 'rainy'}
{'weather': 'rainy'}
{'weather': 'missing'}
Multiple imputers can be defined by providing a tuple for each feature which you want to
impute:
>>> X = [
... {'weather': 'sunny', 'temperature': 8},
... {'weather': 'rainy', 'temperature': 3},
... {'weather': 'sunny', 'temperature': None},
... {'weather': None, 'temperature': 4},
... {'weather': 'snowy', 'temperature': -4},
... {'weather': 'snowy', 'temperature': -3},
... {'weather': 'snowy', 'temperature': -3},
... {'weather': None, 'temperature': None}
... ]
>>> imp = preprocessing.StatImputer(
... ('temperature', stats.Mean()),
... ('weather', stats.Mode())
... )
>>> for x in X:
... imp = imp.learn_one(x)
... print(imp.transform_one(x))
{'weather': 'sunny', 'temperature': 8}
{'weather': 'rainy', 'temperature': 3}
{'weather': 'sunny', 'temperature': 5.5}
{'weather': 'sunny', 'temperature': 4}
{'weather': 'snowy', 'temperature': -4}
{'weather': 'snowy', 'temperature': -3}
{'weather': 'snowy', 'temperature': -3}
{'weather': 'snowy', 'temperature': 0.8333}
A sophisticated way to go about imputation is condition the statistics on a given feature.
For instance, we might want to replace a missing temperature with the average temperature
of a particular weather condition. As an example, consider the following dataset where the
temperature is missing, but not the weather condition:
>>> X = [
... {'weather': 'sunny', 'temperature': 8},
... {'weather': 'rainy', 'temperature': 3},
... {'weather': 'sunny', 'temperature': None},
... {'weather': 'rainy', 'temperature': 4},
... {'weather': 'sunny', 'temperature': 10},
... {'weather': 'sunny', 'temperature': None},
... {'weather': 'sunny', 'temperature': 12},
... {'weather': 'rainy', 'temperature': None}
... ]
Each missing temperature can be replaced with the average temperature of the corresponding
weather condition as so:
>>> from river import compose
>>> imp = compose.Grouper(
... preprocessing.StatImputer(('temperature', stats.Mean())),
... by='weather'
... )
>>> for x in X:
... imp = imp.learn_one(x)
... print(imp.transform_one(x))
{'weather': 'sunny', 'temperature': 8}
{'weather': 'rainy', 'temperature': 3}
{'weather': 'sunny', 'temperature': 8.0}
{'weather': 'rainy', 'temperature': 4}
{'weather': 'sunny', 'temperature': 10}
{'weather': 'sunny', 'temperature': 9.0}
{'weather': 'sunny', 'temperature': 12}
{'weather': 'rainy', 'temperature': 3.5}
Note that you can also create a `Grouper` with the `*` operator:
>>> imp = preprocessing.StatImputer(('temperature', stats.Mean())) * 'weather'
"""
def __init__(self, *imputers):
self.imputers = imputers
self.stats = {
feature: stat if isinstance(stat, stats.base.Univariate) else Constant(stat)
for feature, stat in imputers
}
def learn_one(self, x):
for i in self.stats:
if x[i] is not None:
self.stats[i].update(x[i])
return self
def transform_one(self, x):
# Transformers are supposed to be pure, therefore we make a copy of the features
x = x.copy()
for i in self.stats:
if x[i] is None:
x[i] = self.stats[i].get()
return x
class Constant(stats.base.Univariate):
"""Implements the `stats.base.Univariate` interface but always returns the same value.
Parameters
----------
value
"""
def __init__(self, value: typing.Any):
self.value = value
def update(self, x):
return self
def get(self):
return self.value
@property
def METHOD_NAME(self):
return self.value
|
2,811 |
get workload network dns zone output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWorkloadNetworkDnsZoneResult',
'AwaitableGetWorkloadNetworkDnsZoneResult',
'get_workload_network_dns_zone',
'get_workload_network_dns_zone_output',
]
@pulumi.output_type
class GetWorkloadNetworkDnsZoneResult:
"""
NSX DNS Zone
"""
def __init__(__self__, display_name=None, dns_server_ips=None, dns_services=None, domain=None, id=None, name=None, provisioning_state=None, revision=None, source_ip=None, type=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if dns_server_ips and not isinstance(dns_server_ips, list):
raise TypeError("Expected argument 'dns_server_ips' to be a list")
pulumi.set(__self__, "dns_server_ips", dns_server_ips)
if dns_services and not isinstance(dns_services, float):
raise TypeError("Expected argument 'dns_services' to be a float")
pulumi.set(__self__, "dns_services", dns_services)
if domain and not isinstance(domain, list):
raise TypeError("Expected argument 'domain' to be a list")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if revision and not isinstance(revision, float):
raise TypeError("Expected argument 'revision' to be a float")
pulumi.set(__self__, "revision", revision)
if source_ip and not isinstance(source_ip, str):
raise TypeError("Expected argument 'source_ip' to be a str")
pulumi.set(__self__, "source_ip", source_ip)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name of the DNS Zone.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="dnsServerIps")
def dns_server_ips(self) -> Optional[Sequence[str]]:
"""
DNS Server IP array of the DNS Zone.
"""
return pulumi.get(self, "dns_server_ips")
@property
@pulumi.getter(name="dnsServices")
def dns_services(self) -> Optional[float]:
"""
Number of DNS Services using the DNS zone.
"""
return pulumi.get(self, "dns_services")
@property
@pulumi.getter
def domain(self) -> Optional[Sequence[str]]:
"""
Domain names of the DNS Zone.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def revision(self) -> Optional[float]:
"""
NSX revision number.
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> Optional[str]:
"""
Source IP of the DNS Zone.
"""
return pulumi.get(self, "source_ip")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWorkloadNetworkDnsZoneResult(GetWorkloadNetworkDnsZoneResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkloadNetworkDnsZoneResult(
display_name=self.display_name,
dns_server_ips=self.dns_server_ips,
dns_services=self.dns_services,
domain=self.domain,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
revision=self.revision,
source_ip=self.source_ip,
type=self.type)
def get_workload_network_dns_zone(dns_zone_id: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadNetworkDnsZoneResult:
"""
NSX DNS Zone
:param str dns_zone_id: NSX DNS Zone identifier. Generally the same as the DNS Zone's display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['dnsZoneId'] = dns_zone_id
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20230301:getWorkloadNetworkDnsZone', __args__, opts=opts, typ=GetWorkloadNetworkDnsZoneResult).value
return AwaitableGetWorkloadNetworkDnsZoneResult(
display_name=pulumi.get(__ret__, 'display_name'),
dns_server_ips=pulumi.get(__ret__, 'dns_server_ips'),
dns_services=pulumi.get(__ret__, 'dns_services'),
domain=pulumi.get(__ret__, 'domain'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
revision=pulumi.get(__ret__, 'revision'),
source_ip=pulumi.get(__ret__, 'source_ip'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workload_network_dns_zone)
def METHOD_NAME(dns_zone_id: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkDnsZoneResult]:
"""
NSX DNS Zone
:param str dns_zone_id: NSX DNS Zone identifier. Generally the same as the DNS Zone's display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,812 |
system data
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMSTIDataConnectorResult',
'AwaitableGetMSTIDataConnectorResult',
'get_msti_data_connector',
'get_msti_data_connector_output',
]
@pulumi.output_type
class GetMSTIDataConnectorResult:
"""
Represents Microsoft Threat Intelligence data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, kind=None, name=None, METHOD_NAME=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.MSTIDataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'MicrosoftThreatIntelligence'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetMSTIDataConnectorResult(GetMSTIDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMSTIDataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
tenant_id=self.tenant_id,
type=self.type)
def get_msti_data_connector(data_connector_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMSTIDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20210301preview:getMSTIDataConnector', __args__, opts=opts, typ=GetMSTIDataConnectorResult).value
return AwaitableGetMSTIDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_msti_data_connector)
def get_msti_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMSTIDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
...
|
2,813 |
test construct
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class CubeTest( GafferSceneTest.SceneTestCase ) :
def METHOD_NAME( self ) :
c = GafferScene.Cube()
self.assertEqual( c.getName(), "Cube" )
def testCompute( self ) :
c = GafferScene.Cube()
self.assertEqual( c["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( c["out"].transform( "/" ), imath.M44f() )
self.assertEqual( c["out"].bound( "/" ), imath.Box3f( imath.V3f( -0.5, -0.5, -0.5 ), imath.V3f( 0.5, 0.5, 0.5 ) ) )
self.assertEqual( c["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( c["out"].object( "/cube" ), IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -0.5 ), imath.V3f( 0.5 ) ) ) )
self.assertEqual( c["out"].transform( "/cube" ), imath.M44f() )
self.assertEqual( c["out"].bound( "/cube" ), imath.Box3f( imath.V3f( -0.5, -0.5, -0.5 ), imath.V3f( 0.5, 0.5, 0.5 ) ) )
self.assertEqual( c["out"].childNames( "/cube" ), IECore.InternedStringVectorData() )
def testPlugs( self ) :
c = GafferScene.Cube()
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -0.5 ), imath.V3f( 0.5 ) ) )
self.assertEqual( c["out"].object( "/cube" ), m )
h = c["out"].objectHash( "/cube" )
c["dimensions"].setValue( imath.V3f( 2.5, 5, 6 ) )
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1.25, -2.5, -3 ), imath.V3f( 1.25, 2.5, 3 ) ) )
self.assertEqual( c["out"].object( "/cube" ), m )
self.assertNotEqual( c["out"].objectHash( "/cube" ), h )
def testAffects( self ) :
c = GafferScene.Cube()
s = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["name"].setValue( "box" )
self.assertEqual(
{ x[0] for x in s if not x[0].getName().startswith( "__" ) },
{ c["name"], c["out"]["childNames"], c["out"]["childBounds"], c["out"]["exists"], c["out"]["set"], c["out"] }
)
del s[:]
c["dimensions"]["x"].setValue( 10 )
found = False
for ss in s :
if ss[0].isSame( c["out"] ) :
found = True
self.assertTrue( found )
def testTransform( self ) :
c = GafferScene.Cube()
c["transform"]["translate"].setValue( imath.V3f( 1, 0, 0 ) )
self.assertEqual( c["out"].transform( "/" ), imath.M44f() )
self.assertEqual( c["out"].transform( "/cube" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( c["out"].bound( "/" ), imath.Box3f( imath.V3f( 0.5, -0.5, -0.5 ), imath.V3f( 1.5, 0.5, 0.5 ) ) )
self.assertEqual( c["out"].bound( "/cube" ), imath.Box3f( imath.V3f( -0.5, -0.5, -0.5 ), imath.V3f( 0.5, 0.5, 0.5 ) ) )
def testEnabled( self ) :
c = GafferScene.Cube()
c["enabled"].setValue( False )
self.assertSceneValid( c["out"] )
self.assertTrue( c["out"].bound( "/" ).isEmpty() )
self.assertEqual( c["out"].childNames( "/" ), IECore.InternedStringVectorData() )
c["enabled"].setValue( True )
self.assertSceneValid( c["out"] )
self.assertEqual( c["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "cube" ] ) )
def testSerialise( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferScene.Cube()
ss = s.serialise()
if __name__ == "__main__":
unittest.main()
|
2,814 |
test ok
|
from unittest import TestCase
from zentral.contrib.inventory.conf import (is_apple_os, macos_version_from_build,
os_version_display, os_version_version_display)
class MacOSBuildTestCase(TestCase):
def test_wrong_errors(self):
for build in ("A", "", "9"):
with self.assertRaises(ValueError) as cm:
macos_version_from_build(build)
self.assertEqual(cm.exception.args[0], "Bad build number")
def test_too_old_errors(self):
for build in ("11G56", "9A581"):
with self.assertRaises(ValueError) as cm:
macos_version_from_build(build)
self.assertEqual(cm.exception.args[0], "Cannot parse build str for macos < 10.8")
def METHOD_NAME(self):
for build, (name, major, minor, patch, version), display in (
("12E4022", ("OS X", 10, 8, 4, None), "OS X 10.8.4 (12E4022)"),
("15G31", ("OS X", 10, 11, 6, None), "OS X 10.11.6 (15G31)"),
("19A471t", ("macOS", 10, 15, 0, None), "macOS 10.15 (19A471t)"),
("19D76", ("macOS", 10, 15, 3, None), "macOS 10.15.3 (19D76)"),
("20A2411", ("macOS", 11, 0, 0, None), "macOS 11.0 (20A2411)"),
("20B29", ("macOS", 11, 0, 1, None), "macOS 11.0.1 (20B29)"),
("20B50", ("macOS", 11, 0, 1, None), "macOS 11.0.1 (20B50)"),
("20C69", ("macOS", 11, 1, 0, None), "macOS 11.1 (20C69)"),
("20D74", ("macOS", 11, 2, 1, None), "macOS 11.2.1 (20D74)"),
("20D80", ("macOS", 11, 2, 2, None), "macOS 11.2.2 (20D80)"),
("20D91", ("macOS", 11, 2, 3, None), "macOS 11.2.3 (20D91)"),
("20G95", ("macOS", 11, 5, 2, None), "macOS 11.5.2 (20G95)"),
("20G165", ("macOS", 11, 6, 0, None), "macOS 11.6 (20G165)"),
("20G224", ("macOS", 11, 6, 1, None), "macOS 11.6.1 (20G224)"),
("20G314", ("macOS", 11, 6, 2, None), "macOS 11.6.2 (20G314)"),
("20G415", ("macOS", 11, 6, 3, None), "macOS 11.6.3 (20G415)"),
("20G417", ("macOS", 11, 6, 4, None), "macOS 11.6.4 (20G417)"),
("20G527", ("macOS", 11, 6, 5, None), "macOS 11.6.5 (20G527)"),
("20G624", ("macOS", 11, 6, 6, None), "macOS 11.6.6 (20G624)"),
("20G630", ("macOS", 11, 6, 7, None), "macOS 11.6.7 (20G630)"),
("20G730", ("macOS", 11, 6, 8, None), "macOS 11.6.8 (20G730)"),
("21A5522h", ("macOS", 12, 0, 0, None), "macOS 12.0 (21A5522h)"),
("21A558", ("macOS", 12, 0, 1, None), "macOS 12.0.1 (21A558)"),
("21C5021h", ("macOS", 12, 1, 0, None), "macOS 12.1 (21C5021h)"),
("21D62", ("macOS", 12, 2, 1, None), "macOS 12.2.1 (21D62)"),
("21E5212f", ("macOS", 12, 3, 0, None), "macOS 12.3 (21E5212f)"),
("21E258", ("macOS", 12, 3, 1, None), "macOS 12.3.1 (21E258)"),
("22A5266r", ("macOS", 13, 0, 0, None), "macOS 13.0 (22A5266r)"),
("21G83", ("macOS", 12, 5, 1, None), "macOS 12.5.1 (21G83)"),
("21G115", ("macOS", 12, 6, 0, None), "macOS 12.6 (21G115)"),
("20G817", ("macOS", 11, 7, 0, None), "macOS 11.7 (20G817)"),
("20G918", ("macOS", 11, 7, 1, None), "macOS 11.7.1 (20G918)"),
("20G1008", ("macOS", 11, 7, 2, None), "macOS 11.7.2 (20G1008)"),
("20G1020", ("macOS", 11, 7, 2, None), "macOS 11.7.2 (20G1020)"),
("20G1116", ("macOS", 11, 7, 3, None), "macOS 11.7.3 (20G1116)"),
("21G217", ("macOS", 12, 6, 1, None), "macOS 12.6.1 (21G217)"),
("21G309", ("macOS", 12, 6, 2, None), "macOS 12.6.2 (21G309)"),
("21G320", ("macOS", 12, 6, 2, None), "macOS 12.6.2 (21G320)"),
("21G417", ("macOS", 12, 6, 3, None), "macOS 12.6.3 (21G417)"),
("21G419", ("macOS", 12, 6, 3, None), "macOS 12.6.3 (21G419)"),
("22A400", ("macOS", 13, 0, 1, None), "macOS 13.0.1 (22A400)"),
("22D68", ("macOS", 13, 2, 1, None), "macOS 13.2.1 (22D68)"),
("22E261", ("macOS", 13, 3, 1, None), "macOS 13.3.1 (22E261)"),
("22E772610a", ("macOS", 13, 3, 1, "(a)"), "macOS 13.3.1 (a) (22E772610a)"),
("22F82", ("macOS", 13, 4, 1, None), "macOS 13.4.1 (22F82)"),
("22F770820b", ("macOS", 13, 4, 1, "(a)"), "macOS 13.4.1 (a) (22F770820b)"),
("22F770820d", ("macOS", 13, 4, 1, "(c)"), "macOS 13.4.1 (c) (22F770820d)"),
("22G90", ("macOS", 13, 5, 1, None), "macOS 13.5.1 (22G90)"),
):
expected_version_d = {
"name": name,
"major": major,
"minor": minor,
"patch": patch,
"build": build
}
if version:
expected_version_d["version"] = version
parsed_version_d = macos_version_from_build(build)
self.assertEqual(parsed_version_d, expected_version_d)
self.assertEqual(os_version_display(parsed_version_d), display)
def test_is_apple_os(self):
for os_name, result in (("mACos Beta", True),
("OS X", True),
("iOS", True),
("iPadOS", True),
("tvOS", True),
("watchOS", True),
("", False),
(None, False),
("Windows", False)):
self.assertEqual(is_apple_os(os_name), result)
def test_os_version_version_display(self):
for os_version_d, result in (({"name": "", "major": 10, "minor": 10, "patch": 0}, "10.10.0"),
({"name": "MacOs", "major": 10, "minor": 10, "patch": 0}, "10.10"),
({"name": "MacOs", "major": 10, "minor": 10, "patch": 1}, "10.10.1")):
self.assertEqual(os_version_version_display(os_version_d), result)
|
2,815 |
test scanning empty sr
|
import testlib
import unittest
import unittest.mock as mock
import SRCommand
import HBASR
import xmlrpc.client
import devscan
def create_hba_sr():
command = SRCommand.SRCommand(driver_info=None)
command_parameter = (
{
'device_config': {},
'command': 'irrelevant_some_command',
},
'irrelevant_method'
)
xmlrpc_arg = xmlrpc.client.dumps(command_parameter)
argv_patcher = mock.patch('sys.argv', new=[None, xmlrpc_arg])
argv_patcher.start()
command.parse()
argv_patcher.stop()
sr = HBASR.HBASR(command, '0')
return sr
class TestScan(unittest.TestCase, testlib.XmlMixIn):
@testlib.with_context
def METHOD_NAME(self, context):
sr = create_hba_sr()
sr._init_hbadict()
result = devscan.scan(sr)
self.assertXML("""
<?xml version="1.0" ?>
<Devlist/>
""", result)
@testlib.with_context
def test_scanning_sr_with_devices(self, context):
sr = create_hba_sr()
adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
sr._init_hbadict()
result = devscan.scan(sr)
self.assertXML("""
<?xml version="1.0" ?>
<Devlist>
<Adapter>
<host>host0</host>
<name>Unknown</name>
<manufacturer>Unknown-description</manufacturer>
<id>0</id>
</Adapter>
</Devlist>
""", result)
@testlib.with_context
def test_scanning_sr_includes_parameters(self, context):
sr = create_hba_sr()
adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
sr._init_hbadict()
adapter.add_parameter('fc_host', dict(port_name='VALUE'))
result = devscan.scan(sr)
self.assertXML("""
<?xml version="1.0" ?>
<Devlist>
<Adapter>
<host>host0</host>
<name>Unknown</name>
<manufacturer>Unknown-description</manufacturer>
<id>0</id>
<fc_host>
<port_name>VALUE</port_name>
</fc_host>
</Adapter>
</Devlist>
""", result)
class TestAdapters(unittest.TestCase):
@testlib.with_context
def test_no_adapters(self, context):
result = devscan.adapters()
self.assertEqual({'devs': {}, 'adt': {}}, result)
@mock.patch('devscan.match_hbadevs', autospec=True)
@testlib.with_context
def test_exotic_adapter_with_security_device(self, context, match_hbadevs):
adapter = context.add_adapter(testlib.AdapterWithNonBlockDevice())
adapter.add_disk()
match_hbadevs.return_value = 'lpfc'
result = devscan.adapters()
self.assertEqual(
{
'devs': {},
'adt': {
'host0': 'lpfc'
}
},
result)
@testlib.with_context
def test_adapter_and_disk_added(self, context):
adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
result = devscan.adapters()
self.assertEqual(
{
'devs': {
'sda': {
'procname': 'Unknown',
'host': '0',
'target': '0'
}
},
'adt': {
'host0': 'Unknown'
}
},
result)
class TestUpdateDevsDict(unittest.TestCase):
def test_whencalled_updates_dict(self):
devices = {}
dev = 'dev'
entry = 'entry'
devscan.update_devs_dict(devices, dev, entry)
self.assertEqual({'dev': 'entry'}, devices)
def test_whencalled_with_empty_key_does_not_update_dict(self):
devices = {}
dev = devscan.INVALID_DEVICE_NAME
entry = 'entry'
devscan.update_devs_dict(devices, dev, entry)
self.assertEqual({}, devices)
|
2,816 |
do get
|
# Copyright (C) 2013 Kristoffer Gronlund <[email protected]>
# See COPYING for license information.
import os
from . import command
from . import completers
from . import utils
from . import corosync
from . import parallax
from . import bootstrap
from . import log
logger = log.setup_logger(__name__)
def _push_completer(args):
try:
n = utils.list_cluster_nodes()
n.remove(utils.this_node())
if args[-1] in n:
# continue complete
return [args[-1]]
for item in args:
if item in n:
n.remove(item)
return n
except:
n = []
def _diff_nodes(args):
try:
if len(args) > 3:
return []
n = utils.list_cluster_nodes()
if args[-1] in n:
# continue complete
return [args[-1]]
for item in args:
if item in n:
# remove already complete item
n.remove(item)
return n
except:
return []
class Corosync(command.UI):
'''
Corosync is the underlying messaging layer for most HA clusters.
This level provides commands for editing and managing the corosync
configuration.
'''
name = "corosync"
def requires(self):
return corosync.check_tools()
@command.completers(completers.choice(['ring', 'quorum', 'qdevice', 'qnetd']))
def do_status(self, context, status_type="ring"):
'''
Quick cluster health status. Corosync status or QNetd status
'''
if not utils.service_is_active("corosync.service"):
logger.error("corosync.service is not running!")
return False
try:
corosync.query_status(status_type)
except ValueError as err:
logger.error(str(err))
return False
@command.skill_level('administrator')
def do_reload(self, context):
'''
Reload the corosync configuration
'''
return corosync.cfgtool('-R')[0] == 0
@command.skill_level('administrator')
@command.completers_repeating(_push_completer)
def do_push(self, context, *nodes):
'''
Push corosync configuration to other cluster nodes.
If no nodes are provided, configuration is pushed to
all other cluster nodes.
'''
if not nodes:
nodes = utils.list_cluster_nodes()
nodes.remove(utils.this_node())
return corosync.push_configuration(nodes)
@command.skill_level('administrator')
@command.completers(_push_completer)
def do_pull(self, context, node):
'''
Pull corosync configuration from another node.
'''
return corosync.pull_configuration(node)
@command.completers_repeating(_diff_nodes)
def do_diff(self, context, *nodes):
'''
Compare corosync configuration between nodes.
'''
checksum = False
if nodes and nodes[0] == '--checksum':
checksum = True
nodes = nodes[1:]
if not nodes:
nodes = utils.list_cluster_nodes()
return corosync.diff_configuration(nodes, checksum=checksum)
@command.skill_level('administrator')
def do_edit(self, context):
'''
Edit the corosync configuration.
'''
cfg = corosync.conf()
try:
utils.edit_file_ext(cfg, template='')
except IOError as e:
context.fatal_error(str(e))
def do_show(self, context):
'''
Display the corosync configuration.
'''
cfg = corosync.conf()
if not os.path.isfile(cfg):
context.fatal_error("No corosync configuration found on this node.")
utils.page_string(open(cfg).read())
def do_log(self, context):
'''
Display the corosync log file (if any).
'''
logfile = corosync.get_value('logging.logfile')
if not logfile:
context.fatal_error("No corosync log file configured")
utils.page_file(logfile)
@command.skill_level('administrator')
def METHOD_NAME(self, context, path):
"Get a corosync configuration value"
for v in corosync.get_values(path):
print(v)
@command.skill_level('administrator')
def do_set(self, context, path, value, index: int = 0):
"Set a corosync configuration value"
corosync.set_value(path, value, index)
|
2,817 |
test no project with connected account
|
import unittest
import os
import json
from unittest.mock import patch
import threading
from test.support.os_helper import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.cloud.bigquery._http import Connection
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient, _DataProxyConnection, init_bigquery
import kaggle_secrets
class TestBigQuery(unittest.TestCase):
API_BASE_URL = "http://127.0.0.1:2121"
def _test_integration(self, client):
class HTTPHandler(BaseHTTPRequestHandler):
called = False
bearer_header_found = False
def do_HEAD(self):
self.send_response(200)
def do_GET(self):
HTTPHandler.called = True
HTTPHandler.bearer_header_found = any(
k for k in self.headers if k == "authorization" and self.headers[k] == "Bearer secret")
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
sample_dataset = {
"id": "bigqueryproject:datasetname",
"datasetReference": {
"datasetId": "datasetname",
"projectId": "bigqueryproject"
}
}
self.wfile.write(json.dumps({"kind": "bigquery#datasetList", "datasets": [sample_dataset]}).encode("utf-8"))
server_address = urlparse(self.API_BASE_URL)
with HTTPServer((server_address.hostname, server_address.port), HTTPHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
for dataset in client.list_datasets():
self.assertEqual(dataset.dataset_id, "datasetname")
httpd.shutdown()
self.assertTrue(
HTTPHandler.called, msg="Fake server was not called from the BQ client, but should have been.")
self.assertTrue(
HTTPHandler.bearer_header_found, msg="authorization header was missing from the BQ request.")
def _setup_mocks(self, api_url_mock):
api_url_mock.__str__.return_value = self.API_BASE_URL
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials(), client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(client)
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_empty_integrations(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', '')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials(), client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(client)
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_unrelated_integrations(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS:ANOTHER_ONE')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials(), client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(client)
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_default_credentials(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
client = bigquery.Client(project='ANOTHER_PROJECT', client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self.assertTrue(client._connection.user_agent.startswith("kaggle-gcp-client/1.0"))
self._test_integration(client)
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_env_var_project_default_credentials(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
env.set('GOOGLE_CLOUD_PROJECT', 'ANOTHER_PROJECT')
with env:
client = bigquery.Client(client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(client)
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_simultaneous_clients(self, mock_access_token):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
proxy_client = bigquery.Client(client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
bq_client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials(), client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(bq_client)
# Verify that proxy client is still going to proxy to ensure global Connection
# isn't being modified.
self.assertNotEqual(type(proxy_client._connection), KaggleKernelCredentials)
self.assertEqual(type(proxy_client._connection), _DataProxyConnection)
def METHOD_NAME(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
with self.assertRaises(OSError):
# TODO(vimota): Handle this case, either default to Kaggle Proxy or use some default project
# by the user or throw a custom exception.
client = bigquery.Client(client_options={"api_endpoint": TestBigQuery.API_BASE_URL})
self._test_integration(client)
def test_magics_with_connected_account_default_credentials(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
init_bigquery()
from google.cloud.bigquery import magics
self.assertEqual(type(magics.context._credentials), KaggleKernelCredentials)
magics.context.credentials = None
def test_magics_without_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
init_bigquery()
from google.cloud.bigquery import magics
self.assertIsNone(magics.context._credentials)
|
2,818 |
print gh commands
|
"""
Script to create issue for every unsupported command.
To run this:
- Install pygithub: pip install pygithub
- Set environment variable `GITHUB_TOKEN` to a github token with permissions to create issues.
- Another option is to create `.env` file with `GITHUB_TOKEN`.
"""
import os
from dotenv import load_dotenv
from github import Github
from supported import download_redis_commands, implemented_commands
load_dotenv() # take environment variables from .env.
IGNORE_GROUPS = {
'suggestion', 'tdigest', 'scripting', 'cf', 'topk',
'hyperloglog', 'graph', 'timeseries', 'connection',
'server', 'generic', 'cms', 'bf', 'cluster',
'search', 'bitmap',
}
IGNORE_COMMANDS = {
'PUBSUB HELP', 'OBJECT HELP', 'FUNCTION HELP',
'SCRIPT HELP',
'JSON.DEBUG', 'JSON.DEBUG HELP', 'JSON.DEBUG MEMORY',
'JSON.RESP',
'XINFO', 'XINFO HELP', 'XGROUP', 'XGROUP HELP', 'XSETID',
'ACL HELP', 'COMMAND HELP', 'CONFIG HELP', 'DEBUG',
'MEMORY HELP', 'MODULE HELP', 'CLIENT HELP',
}
def commands_groups(
all_commands: dict, implemented_set: set
) -> tuple[dict[str, list[str]], dict[str, list[str]]]:
implemented, unimplemented = dict(), dict()
for cmd in all_commands:
if cmd.upper() in IGNORE_COMMANDS:
continue
group = all_commands[cmd]['group']
unimplemented.setdefault(group, [])
implemented.setdefault(group, [])
if cmd in implemented_set:
implemented[group].append(cmd)
else:
unimplemented[group].append(cmd)
return implemented, unimplemented
def get_unimplemented_and_implemented_commands() -> tuple[dict[str, list[str]], dict[str, list[str]]]:
"""Returns 2 dictionaries, one of unimplemented commands and another of implemented commands
"""
commands = download_redis_commands()
implemented_commands_set = implemented_commands()
implemented_dict, unimplemented_dict = commands_groups(commands, implemented_commands_set)
groups = sorted(implemented_dict.keys(), key=lambda x: len(unimplemented_dict[x]))
for group in groups:
unimplemented_count = len(unimplemented_dict[group])
total_count = len(implemented_dict.get(group)) + unimplemented_count
print(f'{group} has {unimplemented_count}/{total_count} unimplemented commands')
return unimplemented_dict, implemented_dict
class GithubData:
def __init__(self, dry=False):
token = os.getenv('GITHUB_TOKEN', None)
g = Github(token)
self.dry = dry or (token is None)
self.gh_repo = g.get_repo('cunla/fakeredis')
open_issues = self.gh_repo.get_issues(state='open')
self.issues = {i.title: i.number for i in open_issues}
gh_labels = self.gh_repo.get_labels()
self.labels = {label.name for label in gh_labels}
def create_label(self, name):
if self.dry:
print(f'Creating label "{name}"')
else:
self.gh_repo.create_label(name, "f29513")
self.labels.add(name)
def create_issue(self, group: str, cmd: str, summary: str):
link = f"https://redis.io/commands/{cmd.replace(' ', '-')}/"
title = f"Implement support for `{cmd.upper()}` ({group} command)"
filename = f'{group}_mixin.py'
body = f"""Implement support for command `{cmd.upper()}` in {filename}.
{summary}.
Here is the [Official documentation]({link})"""
labels = [f'{group}-commands', 'enhancement', 'help wanted']
for label in labels:
if label not in self.labels:
self.create_label(label)
if title in self.issues:
return
if self.dry:
print(f'Creating issue with title "{title}" and labels {labels}')
else:
self.gh_repo.create_issue(title, body, labels=labels)
def METHOD_NAME(commands: dict, unimplemented: dict):
gh = GithubData()
for group in unimplemented:
if group in IGNORE_GROUPS:
continue
print(f'### Creating issues for {group} commands')
for cmd in unimplemented[group]:
if cmd.upper() in IGNORE_COMMANDS:
continue
summary = commands[cmd]['summary']
gh.create_issue(group, cmd, summary)
if __name__ == '__main__':
commands = download_redis_commands()
unimplemented_dict, _ = get_unimplemented_and_implemented_commands()
METHOD_NAME(commands, unimplemented_dict)
|
2,819 |
test run
|
from __future__ import unicode_literals
# Copyright (C) 2019 Xin Liang <[email protected]>
# See COPYING for license information.
#
# unit tests for parallax.py
import unittest
from unittest import mock
import crmsh.parallax
import crmsh.prun.prun
class TestParallax(unittest.TestCase):
def setUp(self):
"""
Test setUp.
"""
# Use the setup to create a fresh instance for each test
@mock.patch("crmsh.prun.prun.prun")
def test_call(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.ProcessResult(0, None, None)
}
result = crmsh.parallax.parallax_call(["node1"], "ls")
self.assertEqual(
result,
[("node1", (0, None, None))],
)
@mock.patch("crmsh.prun.prun.prun")
def test_call_non_zero_exit_code(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.ProcessResult(1, None, None)
}
with self.assertRaises(ValueError):
crmsh.parallax.parallax_call(["node1"], "ls")
@mock.patch("crmsh.prun.prun.prun")
def test_call_255_exit_code(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.ProcessResult(255, None, None)
}
with self.assertRaises(ValueError):
crmsh.parallax.parallax_call(["node1"], "ls")
@mock.patch("crmsh.prun.prun.prun")
def METHOD_NAME(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.ProcessResult(0, None, None)
}
result = crmsh.parallax.parallax_run(["node1"], "ls")
self.assertEqual(
{"node1": (0, None, None)},
result,
)
@mock.patch("crmsh.prun.prun.prun")
def test_run_non_zero_exit_code(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.ProcessResult(1, None, None)
}
result = crmsh.parallax.parallax_run(["node1"], "ls")
self.assertEqual(
{"node1": (1, None, None)},
result,
)
@mock.patch("crmsh.prun.prun.prun")
def test_run_255_exit_code(self, mock_prun: mock.MagicMock):
mock_prun.return_value = {
"node1": crmsh.prun.prun.SSHError("alice", "node1", "foo")
}
with self.assertRaises(ValueError):
crmsh.parallax.parallax_run(["node1"], "ls")
@mock.patch("crmsh.prun.prun.pfetch_from_remote")
def test_slurp(self, mock_pfetch: mock.MagicMock):
mock_pfetch.return_value = {"node1": "/opt/node1/file.c"}
results = crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
self.assertListEqual([("node1", "/opt/node1/file.c")], results)
mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
@mock.patch("crmsh.prun.prun.pfetch_from_remote")
def test_slurp_exception(self, mock_pfetch: mock.MagicMock):
mock_pfetch.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo")}
with self.assertRaises(ValueError):
crmsh.parallax.parallax_slurp(["node1"], "/opt", "/opt/file.c")
mock_pfetch.assert_called_once_with(["node1"], "/opt/file.c", "/opt")
@mock.patch("crmsh.prun.prun.pcopy_to_remote")
def test_copy(self, mock_pcopy: mock.MagicMock):
mock_pcopy.return_value = {"node1": None, "node2": None}
crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
@mock.patch("crmsh.prun.prun.pcopy_to_remote")
def test_copy_exception(self, mock_pcopy: mock.MagicMock):
mock_pcopy.return_value = {"node1": crmsh.prun.prun.PRunError("alice", "node1", "foo"), "node2": None}
with self.assertRaises(ValueError):
crmsh.parallax.parallax_copy(["node1", "node2"], "/opt/file.c", "/tmp")
mock_pcopy.assert_called_once_with("/opt/file.c", ["node1", "node2"], "/tmp", False, timeout_seconds=-1)
|
2,820 |
pad image
|
"""
Copyright (C) 2020-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import math
class Detection:
def __init__(self, xmin, ymin, xmax, ymax, score, id):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.score = score
self.id = id
def bottom_left_point(self):
return self.xmin, self.ymin
def top_right_point(self):
return self.xmax, self.ymax
def get_coords(self):
return self.xmin, self.ymin, self.xmax, self.ymax
def clip_detections(detections, size):
for detection in detections:
detection.xmin = max(int(detection.xmin), 0)
detection.ymin = max(int(detection.ymin), 0)
detection.xmax = min(int(detection.xmax), size[1])
detection.ymax = min(int(detection.ymax), size[0])
return detections
class DetectionWithLandmarks(Detection):
def __init__(self, xmin, ymin, xmax, ymax, score, id, landmarks_x, landmarks_y):
super().__init__(xmin, ymin, xmax, ymax, score, id)
self.landmarks = []
for x, y in zip(landmarks_x, landmarks_y):
self.landmarks.append((x, y))
class OutputTransform:
def __init__(self, input_size, output_resolution):
self.output_resolution = output_resolution
if self.output_resolution:
self.new_resolution = self.compute_resolution(input_size)
def compute_resolution(self, input_size):
self.input_size = input_size
size = self.input_size[::-1]
self.scale_factor = min(self.output_resolution[0] / size[0],
self.output_resolution[1] / size[1])
return self.scale(size)
def resize(self, image):
if not self.output_resolution:
return image
curr_size = image.shape[:2]
if curr_size != self.input_size:
self.new_resolution = self.compute_resolution(curr_size)
if self.scale_factor == 1:
return image
return cv2.resize(image, self.new_resolution)
def scale(self, inputs):
if not self.output_resolution or self.scale_factor == 1:
return inputs
return (np.array(inputs) * self.scale_factor).astype(np.int32)
class InputTransform:
def __init__(self, reverse_input_channels=False, mean_values=None, scale_values=None):
self.reverse_input_channels = reverse_input_channels
self.is_trivial = not (reverse_input_channels or mean_values or scale_values)
self.means = np.array(mean_values, dtype=np.float32) if mean_values else np.array([0., 0., 0.])
self.std_scales = np.array(scale_values, dtype=np.float32) if scale_values else np.array([1., 1., 1.])
def __call__(self, inputs):
if self.is_trivial:
return inputs
if self.reverse_input_channels:
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB)
return (inputs - self.means) / self.std_scales
def load_labels(label_file):
with open(label_file, 'r') as f:
labels_map = [x.strip() for x in f]
return labels_map
def resize_image(image, size, keep_aspect_ratio=False, interpolation=cv2.INTER_LINEAR):
if not keep_aspect_ratio:
resized_frame = cv2.resize(image, size, interpolation=interpolation)
else:
h, w = image.shape[:2]
scale = min(size[1] / h, size[0] / w)
resized_frame = cv2.resize(image, None, fx=scale, fy=scale, interpolation=interpolation)
return resized_frame
def resize_image_with_aspect(image, size, interpolation=cv2.INTER_LINEAR):
return resize_image(image, size, keep_aspect_ratio=True, interpolation=interpolation)
def METHOD_NAME(image, size):
h, w = image.shape[:2]
if h != size[1] or w != size[0]:
image = np.pad(image, ((0, size[1] - h), (0, size[0] - w), (0, 0)),
mode='constant', constant_values=0)
return image
def resize_image_letterbox(image, size, interpolation=cv2.INTER_LINEAR):
ih, iw = image.shape[0:2]
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = cv2.resize(image, (nw, nh), interpolation=interpolation)
dx = (w - nw) // 2
dy = (h - nh) // 2
resized_image = np.pad(image, ((dy, dy + (h - nh) % 2), (dx, dx + (w - nw) % 2), (0, 0)),
mode='constant', constant_values=0)
return resized_image
def crop_resize(image, size):
desired_aspect_ratio = size[1] / size[0] # width / height
if desired_aspect_ratio == 1:
if (image.shape[0] > image.shape[1]):
offset = (image.shape[0] - image.shape[1]) // 2
cropped_frame = image[offset:image.shape[1] + offset]
else:
offset = (image.shape[1] - image.shape[0]) // 2
cropped_frame = image[:, offset:image.shape[0] + offset]
elif desired_aspect_ratio < 1:
new_width = math.floor(image.shape[0] * desired_aspect_ratio)
offset = (image.shape[1] - new_width) // 2
cropped_frame = image[:, offset:new_width + offset]
elif desired_aspect_ratio > 1:
new_height = math.floor(image.shape[1] / desired_aspect_ratio)
offset = (image.shape[0] - new_height) // 2
cropped_frame = image[offset:new_height + offset]
return cv2.resize(cropped_frame, size)
RESIZE_TYPES = {
'crop' : crop_resize,
'standard': resize_image,
'fit_to_window': resize_image_with_aspect,
'fit_to_window_letterbox': resize_image_letterbox,
}
INTERPOLATION_TYPES = {
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'NEAREST': cv2.INTER_NEAREST,
'AREA': cv2.INTER_AREA,
}
def nms(x1, y1, x2, y2, scores, thresh, include_boundaries=False, keep_top_k=None):
b = 1 if include_boundaries else 0
areas = (x2 - x1 + b) * (y2 - y1 + b)
order = scores.argsort()[::-1]
if keep_top_k:
order = order[:keep_top_k]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + b)
h = np.maximum(0.0, yy2 - yy1 + b)
intersection = w * h
union = (areas[i] + areas[order[1:]] - intersection)
overlap = np.divide(intersection, union, out=np.zeros_like(intersection, dtype=float), where=union != 0)
order = order[np.where(overlap <= thresh)[0] + 1]
return keep
def softmax(logits, axis=None, keepdims=False):
exp = np.exp(logits - np.max(logits))
return exp / np.sum(exp, axis=axis, keepdims=keepdims)
|
2,821 |
parse
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import str
""" robotparser.py
Copyright (C) 2000 Bastian Kleineidam
You can choose between two licenses when using this package:
1) GNU GPLv2
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
"""
# Was: import urllib.parse, urllib.request
from future.backports import urllib
from future.backports.urllib import METHOD_NAME as _parse, request as _request
urllib.METHOD_NAME = _parse
urllib.request = _request
__all__ = ["RobotFileParser"]
class RobotFileParser(object):
""" This class provides a set of methods to read, parse and answer
questions about a single robots.txt file.
"""
def __init__(self, url=''):
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.set_url(url)
self.last_checked = 0
def mtime(self):
"""Returns the time the robots.txt file was last fetched.
This is useful for long-running web spiders that need to
check for new robots.txt files periodically.
"""
return self.last_checked
def modified(self):
"""Sets the time the robots.txt file was last fetched to the
current time.
"""
import time
self.last_checked = time.time()
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urllib.METHOD_NAME.urlparse(url)[1:3]
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
try:
f = urllib.request.urlopen(self.url)
except urllib.error.HTTPError as err:
if err.code in (401, 403):
self.disallow_all = True
elif err.code >= 400:
self.allow_all = True
else:
raw = f.read()
self.METHOD_NAME(raw.decode("utf-8").splitlines())
def _add_entry(self, entry):
if "*" in entry.useragents:
# the default entry is considered last
if self.default_entry is None:
# the first default entry wins
self.default_entry = entry
else:
self.entries.append(entry)
def METHOD_NAME(self, lines):
"""Parse the input lines from a robots.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
"""
# states:
# 0: start state
# 1: saw user-agent line
# 2: saw an allow or disallow line
state = 0
entry = Entry()
for line in lines:
if not line:
if state == 1:
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.METHOD_NAME.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
if state == 2:
self._add_entry(entry)
def can_fetch(self, useragent, url):
"""using the parsed robots.txt decide if useragent can fetch url"""
if self.disallow_all:
return False
if self.allow_all:
return True
# search for given user agent matches
# the first match counts
parsed_url = urllib.METHOD_NAME.urlparse(urllib.METHOD_NAME.unquote(url))
url = urllib.METHOD_NAME.urlunparse(('','',parsed_url.path,
parsed_url.params,parsed_url.query, parsed_url.fragment))
url = urllib.METHOD_NAME.quote(url)
if not url:
url = "/"
for entry in self.entries:
if entry.applies_to(useragent):
return entry.allowance(url)
# try the default entry last
if self.default_entry:
return self.default_entry.allowance(url)
# agent not found ==> access granted
return True
def __str__(self):
return ''.join([str(entry) + "\n" for entry in self.entries])
class RuleLine(object):
"""A rule line is a single "Allow:" (allowance==True) or "Disallow:"
(allowance==False) followed by a path."""
def __init__(self, path, allowance):
if path == '' and not allowance:
# an empty value means allow all
allowance = True
self.path = urllib.METHOD_NAME.quote(path)
self.allowance = allowance
def applies_to(self, filename):
return self.path == "*" or filename.startswith(self.path)
def __str__(self):
return (self.allowance and "Allow" or "Disallow") + ": " + self.path
class Entry(object):
"""An entry has one or more user-agents and zero or more rulelines"""
def __init__(self):
self.useragents = []
self.rulelines = []
def __str__(self):
ret = []
for agent in self.useragents:
ret.extend(["User-agent: ", agent, "\n"])
for line in self.rulelines:
ret.extend([str(line), "\n"])
return ''.join(ret)
def applies_to(self, useragent):
"""check if this entry applies to the specified agent"""
# split the name token and make it lower case
useragent = useragent.split("/")[0].lower()
for agent in self.useragents:
if agent == '*':
# we have the catch-all agent
return True
agent = agent.lower()
if agent in useragent:
return True
return False
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True
|
2,822 |
set soft kill
|
#!/usr/bin/python3
import rospy
from mil_usb_to_can.sub8 import SimulatedCANDevice
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from .packets import (
KILL_SEND_ID,
THRUST_SEND_ID,
HeartbeatMessage,
KillMessage,
StatusMessage,
ThrustPacket,
)
class ThrusterAndKillBoardSimulation(SimulatedCANDevice):
"""
Serial simulator for the thruster and kill board,
providing services to simulate physical plug connections/disconnections.
Inherits from :class:`~mil_usb_to_can.SimulatedCANDevice`.
Attributes:
hard_kill_plug_pulled (bool): Whether the hard kill was set.
hard_kill_mobo (bool): Whether the motherboard experienced a hard kill request.
soft_kill_plug_pulled (bool): Whether the soft kill was set.
soft_kill_mobo (bool): Whether the motherboard experienced a soft kill request.
"""
HEARTBEAT_TIMEOUT_SECONDS = rospy.Duration(1.0)
def __init__(self, *args, **kwargs):
self.hard_kill_plug_pulled = False
self.hard_kill_mobo = False
self.soft_kill_plug_pulled = False
self.soft_kill_mobo = False
self.go_button = False
self._last_heartbeat = None
super().__init__(*args, **kwargs)
self._update_timer = rospy.Timer(rospy.Duration(1), self.send_updates)
self._soft_kill = rospy.Service(
"/simulate_soft_kill",
SetBool,
self.METHOD_NAME,
)
self._hard_kill = rospy.Service(
"/simulate_hard_kill",
SetBool,
self.set_hard_kill,
)
self._go_srv = rospy.Service("/simulate_go", SetBool, self._on_go_srv)
def _on_go_srv(self, req):
self.go_button = req.data
return {"success": True}
def METHOD_NAME(self, req: SetBoolRequest) -> SetBoolResponse:
"""
Called by the `/simulate_soft_kill` service to set the soft kill state of the
simluated device.
Args:
req (SetBoolRequest): The request to set the service with.
Returns:
SetBoolResponse: The response to the service that the operation was successful.
"""
self.soft_kill_plug_pulled = req.data
return SetBoolResponse(success=True)
def set_hard_kill(self, req: SetBoolRequest) -> SetBoolResponse:
"""
Called by the `/simulate_hard_kill` service to set the hard kill state of the
simluated device.
Args:
req (SetBoolRequest): The request to set the service with.
Returns:
SetBoolResponse: The response to the service that the operation was successful.
"""
self.hard_kill_plug_pulled = req.data
return SetBoolResponse(success=True)
@property
def hard_killed(self) -> bool:
"""
Whether the board was hard killed.
Returns:
bool: The status of the board being hard killed.
"""
return self.hard_kill_mobo or self.hard_kill_plug_pulled
@property
def heartbeat_timedout(self) -> bool:
"""
Whether the heartbeat timed out.
Returns:
bool: The status of the heartbeat timing out.
"""
return (
self._last_heartbeat is None
or (rospy.Time.now() - self._last_heartbeat)
> self.HEARTBEAT_TIMEOUT_SECONDS
)
@property
def soft_killed(self) -> bool:
"""
Whether the board was soft killed.
Returns:
bool: The status of the board being soft killed.
"""
return (
self.soft_kill_plug_pulled or self.soft_kill_mobo or self.heartbeat_timedout
)
def send_updates(self, *args) -> None:
"""
Sends data about the class in a new status message.
"""
status = StatusMessage(
self.heartbeat_timedout,
self.soft_kill_mobo,
self.soft_kill_plug_pulled,
self.soft_killed,
self.hard_killed,
False,
self.go_button,
not self.soft_kill_plug_pulled,
not self.hard_kill_plug_pulled,
)
self.send_data(bytes(status))
def on_data(self, data: bytes, can_id: int) -> None:
"""
Serves as the data handler for the device. Handles :class:`KillMessage`,
:class:`ThrustPacket`, and :class:`HeartbeatMessage` types.
"""
assert can_id in (THRUST_SEND_ID, KILL_SEND_ID)
if data[0] == KillMessage.IDENTIFIER:
packet = KillMessage.from_bytes(data)
assert packet.is_command
assert packet.is_hard or packet.is_soft
if packet.is_hard:
self.hard_kill_mobo = packet.is_asserted
elif packet.is_soft:
self.soft_kill_mobo = packet.is_asserted
self.send_updates()
elif data[0] == ThrustPacket.IDENTIFIER:
packet = ThrustPacket.from_bytes(data)
elif data[0] == HeartbeatMessage.IDENTIFIER:
packet = HeartbeatMessage.from_bytes(data)
self._last_heartbeat = rospy.Time.now()
else:
assert False, "No recognized identifier"
|
2,823 |
test enum attribute
|
# -*- coding: utf-8 -*-
"""Test attribute descriptors.
"""
import enum
import pytest
from pyvisa import constants
from pyvisa.attributes import (
Attribute,
AttrVI_ATTR_ASRL_BAUD,
AttrVI_ATTR_INTF_INST_NAME,
BooleanAttribute,
CharAttribute,
EnumAttribute,
IntAttribute,
RangeAttribute,
ValuesAttribute,
)
from . import BaseTestCase
class FakeResource:
"""Fake resource to test attributes."""
def __init__(self, attr_id, attr_value):
self.attr_id = attr_id
self.attr_value = attr_value
def get_visa_attribute(self, attr_id):
if attr_id == self.attr_id:
return self.attr_value
else:
raise ValueError()
def set_visa_attribute(self, attr_id, value):
if attr_id == self.attr_id:
self.attr_value = value
else:
raise ValueError()
def create_resource_cls(
attribute_name, attribute_type, read=True, write=True, attrs={}
):
"""Create a new attribute class and a resource using it."""
attrs.update({"attribute_id": attribute_name, "read": read, "write": write})
attr_cls = type("CA", (attribute_type,), attrs)
return type("FakeR", (FakeResource,), {"attr": attr_cls()})
class TestAttributeClasses(BaseTestCase):
"""Test the descriptors used to handle VISA attributes."""
def test_in_resource_method(self):
"""Test the in_resource class method."""
assert AttrVI_ATTR_INTF_INST_NAME.in_resource(object())
assert AttrVI_ATTR_ASRL_BAUD.in_resource(
(constants.InterfaceType.asrl, "INSTR")
)
assert not AttrVI_ATTR_ASRL_BAUD.in_resource(object())
def test_Attribute(self):
"""Test the base class Attribute."""
rc = create_resource_cls("attr_id", Attribute)
r = rc("attr_id", 1)
assert r.attr == 1
r.attr = 2
assert r.attr == 2
# Check we do pass the write ID
r.attr_id = "dummy"
with pytest.raises(ValueError):
r.attr
with pytest.raises(ValueError):
r.attr = 2
# Un-readable attribute
rc = create_resource_cls("attr_id", Attribute, read=False)
r = rc("attr_id", 1)
with pytest.raises(AttributeError):
r.attr
# Un-writable attribute
rc = create_resource_cls("attr_id", Attribute, write=False)
r = rc("attr_id", 1)
with pytest.raises(AttributeError):
r.attr = 1
def test_BooleanAttribute(self):
"""Test BooleanAttribute."""
rc = create_resource_cls("attr_id", BooleanAttribute)
r = rc("attr_id", constants.VI_TRUE)
assert r.attr is True
r.attr = False
assert r.attr is False
assert r.attr_value == constants.VI_FALSE
def test_CharAttribute(self):
"""Test CharAttribute."""
rc = create_resource_cls("attr_id", CharAttribute)
r = rc("attr_id", ord("\n"))
assert r.attr == "\n"
r.attr = "\r"
assert r.attr == "\r"
assert r.attr_value == 13
def METHOD_NAME(self):
"""Test EnumAttribute"""
@enum.unique
class E(enum.IntEnum):
a = 1
b = 2
rc = create_resource_cls("attr_id", EnumAttribute, attrs={"enum_type": E})
r = rc("attr_id", 1)
assert r.attr == E.a
r.attr = E.b
assert r.attr == E.b
assert r.attr_value == 2
with pytest.raises(ValueError):
r.attr = 3
with pytest.raises(ValueError):
r.attr = ""
def test_IntAttribute(self):
"""Test IntAttribute."""
rc = create_resource_cls("attr_id", IntAttribute)
r = rc("attr_id", "1")
assert r.attr == 1
def test_RangeAttribute(self):
"""Test RangeAttribute"""
rc = create_resource_cls(
"attr_id", RangeAttribute, attrs={"min_value": 0, "max_value": 2}
)
r = rc("attr_id", 1)
r.attr = 0
assert r.attr_value == 0
r.attr = 2
assert r.attr_value == 2
r.attr = 1
assert r.attr_value == 1
with pytest.raises(ValueError) as cm:
r.attr = -1
assert "invalid value" in str(cm.exconly())
assert " or " not in str(cm.exconly())
with pytest.raises(ValueError) as cm:
r.attr = 3
assert "invalid value" in str(cm.exconly())
assert " or " not in str(cm.exconly())
rc = create_resource_cls(
"attr_id",
RangeAttribute,
attrs={"min_value": 0, "max_value": 2, "values": [10]},
)
r = rc("attr_id", 1)
r.attr = 10
assert r.attr_value == 10
with pytest.raises(ValueError) as cm:
r.attr = 3
assert "invalid value" in str(cm.exconly())
assert " or " in str(cm.exconly())
def test_ValuesAttribute(self):
"""Test ValuesAttribute"""
rc = create_resource_cls("attr_id", ValuesAttribute, attrs={"values": [10, 20]})
r = rc("attr_id", 1)
r.attr = 10
assert r.attr_value == 10
with pytest.raises(ValueError) as cm:
r.attr = 3
assert "invalid value" in str(cm.exconly())
|
2,824 |
on del player
|
from __future__ import annotations
from collections import Counter
from src.containers import UserDict
from src.functions import get_players, get_main_role, change_role
from src.messages import messages
from src.events import Event, event_listener
from src.cats import Wolf, Category
from src.gamestate import GameState
from src.users import User
__all__ = ["add_lycanthropy", "remove_lycanthropy", "add_lycanthropy_scope", "try_lycanthropy"]
LYCANTHROPES: UserDict[User, str] = UserDict()
SCOPE = set()
# To handle non-standard lycanthropy behavior, you will need to implement the get_role_metadata event
# with the "lycanthropy_role" key. Depending on how you fill out the metadata, implementing the get_lycanthrope_role
# event may also be required. For the metadata event, fill in the data dict as follows:
# evt.data[your_role]["role"] should be all possible wolf roles that your_role can turn into when lycanthropy triggers.
# This can either be a string (indicating one valid role) or an iterable of strings (indicating multiple valid roles).
# If an iterable of strings, you **must** also implement the get_lycanthrope_role event.
# If omitted, it is assumed that the only valid role that your_role can turn into is "wolf".
# evt.data[your_role]["prefix"] should be a message key fragment if set.
# The message "{prefix}_turn" is sent to the user when lycanthropy triggers for them, and you must also create that
# message if it does not already exist. If this data key is set, it will override the prefix specified in
# add_lycanthropy() for the user. By default, the prefix specified in add_lycanthropy() is used.
# evt.data[your_role]["secondary_roles"] should be an iterable of strings for secondary roles to add to the user
# as part of them turning. By default, no secondary roles are added to the user.
# The get_lycanthrope_role event is only fired if multiple valid roles are specified in the metadata (as per above).
# This event requires you to fill in the data dict as follows:
# evt.data["role"] must be set to a role name that is one of the possible roles specified from the metadata.
# By default, if this is not filled in, the bot will error.
# If a role is given that is not one of the possible roles specified in the metadata, the bot will error.
def add_lycanthropy(var: GameState, target: User, prefix="lycan"):
"""Effect the target with lycanthropy. Fire the add_lycanthropy event."""
if target in LYCANTHROPES or target not in get_players(var):
return True
if Event("add_lycanthropy", {}).dispatch(var, target):
LYCANTHROPES[target] = prefix
return True
return False
def remove_lycanthropy(var: GameState, target: User):
"""Remove the lycanthropy effect from the target."""
del LYCANTHROPES[:target:]
def add_lycanthropy_scope(var: GameState, scope: Category | set[str]):
"""Add a scope for roles that can effect lycanthropy, for stats."""
SCOPE.update(scope)
def try_lycanthropy(var: GameState, target: User) -> bool:
"""Trigger lycanthropy on the target, if able."""
if target not in LYCANTHROPES:
return False
role = get_main_role(var, target)
if role in Wolf:
return False
evt = Event("get_role_metadata", {})
evt.dispatch(var, "lycanthropy_role")
new_role = "wolf"
prefix = LYCANTHROPES[target]
if role in evt.data:
if "role" in evt.data[role]:
new_role = evt.data[role]["role"]
if not isinstance(evt.data[role]["role"], str):
evt2 = Event("get_lycanthrope_role", {"role": None})
evt2.dispatch(var, target, role, evt.data[role]["role"])
assert evt2.data["role"] in evt.data[role]["role"]
new_role = evt2.data["role"]
if "prefix" in evt.data[role]:
prefix = evt.data[role]["prefix"]
for sec_role in evt.data[role].get("secondary_roles", ()):
var.roles[sec_role].add(target)
to_send = "{0}_notify".format(sec_role.replace(" ", "_"))
target.send(messages[to_send])
change_role(var, target, role, new_role, message=prefix + "_turn")
return True
@event_listener("reconfigure_stats")
def on_reconfigure_stats(evt: Event, var: GameState, roleset: Counter, reason: str):
from src.roles.helper.wolves import get_wolfchat_roles
if reason != "howl" or not SCOPE:
return
evt2 = Event("get_role_metadata", {})
evt2.dispatch(var, "lycanthropy_role")
roles = {}
wolfchat = get_wolfchat_roles()
for role, count in roleset.items():
if role in wolfchat or count == 0 or role not in SCOPE:
continue
if role in evt2.data and "role" in evt2.data[role]:
roles[role] = evt2.data[role]["role"]
else:
roles[role] = "wolf"
if roles and roleset in evt.data["new"]:
evt.data["new"].remove(roleset)
for role, new_roles in roles.items():
if isinstance(new_roles, str):
new_roles = [new_roles]
for new_role in new_roles:
rs = roleset.copy()
rs[role] -= 1
rs[new_role] = rs.get(new_role, 0) + 1
evt.data["new"].append(rs)
@event_listener("del_player")
def METHOD_NAME(evt: Event, var: GameState, player: User, all_roles: set[str], death_triggers: bool):
remove_lycanthropy(var, player)
@event_listener("revealroles")
def on_revealroles(evt: Event, var: GameState):
if LYCANTHROPES:
evt.data["output"].append(messages["lycanthropy_revealroles"].format(LYCANTHROPES))
@event_listener("transition_night_begin")
def on_begin_day(evt: Event, var: GameState):
LYCANTHROPES.clear()
SCOPE.clear()
@event_listener("reset")
def on_reset(evt: Event, var: GameState):
LYCANTHROPES.clear()
SCOPE.clear()
|
2,825 |
imports
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
from typing import Dict, Optional, Set, Union, Tuple
from ..utils import convert_list_to_tuple
class ImportType(str, Enum):
STDLIB = "stdlib"
THIRDPARTY = "thirdparty"
AZURECORE = "azurecore"
LOCAL = "local"
BYVERSION = "by_version"
class TypingSection(str, Enum):
REGULAR = "regular" # this import is always a typing import
CONDITIONAL = "conditional" # is a typing import when we're dealing with files that py2 will use, else regular
TYPING = "typing" # never a typing import
class FileImport:
def __init__(
self,
METHOD_NAME: Optional[
Dict[
TypingSection,
Dict[
ImportType,
Dict[
str,
Set[
Optional[
Union[
str,
Tuple[
str,
str,
],
Tuple[
str,
Optional[str],
Tuple[
Tuple[Tuple[int, int], str, Optional[str]]
],
],
]
]
],
],
],
]
] = None,
) -> None:
# Basic implementation
# First level dict: TypingSection
# Second level dict: ImportType
# Third level dict: the package name.
# Fourth level set: None if this import is a "import", the name to import if it's a "from"
self._imports: Dict[
TypingSection,
Dict[
ImportType,
Dict[
str,
Set[
Optional[
Union[
str,
Tuple[
str,
str,
],
Tuple[
str,
Optional[str],
Tuple[Tuple[Tuple[int, int], str, Optional[str]]],
],
]
]
],
],
],
] = (
METHOD_NAME or {}
)
def _add_import(
self,
from_section: str,
import_type: ImportType,
name_import: Optional[
Union[
str,
Tuple[
str,
str,
],
Tuple[
str,
Optional[str],
Tuple[Tuple[Tuple[int, int], str, Optional[str]]],
],
]
] = None,
typing_section: TypingSection = TypingSection.REGULAR,
) -> None:
name_input: Optional[
Union[
str,
Tuple[
str,
str,
],
Tuple[
str,
Optional[str],
Tuple[Tuple[Tuple[int, int], str, Optional[str]]],
],
]
] = None
name_input = convert_list_to_tuple(name_import)
self._imports.setdefault(typing_section, {}).setdefault(
import_type, {}
).setdefault(from_section, set()).add(name_input)
def add_submodule_import(
self,
from_section: str,
name_import: str,
import_type: ImportType,
typing_section: TypingSection = TypingSection.REGULAR,
) -> None:
"""Add an import to this import block."""
self._add_import(from_section, import_type, name_import, typing_section)
@property
def METHOD_NAME(
self,
) -> Dict[
TypingSection,
Dict[
ImportType,
Dict[
str,
Set[
Optional[
Union[
str,
Tuple[
str,
str,
],
Tuple[
str,
Optional[str],
Tuple[Tuple[Tuple[int, int], str, Optional[str]]],
],
]
]
],
],
],
]:
return self._imports
def merge(self, file_import: "FileImport") -> None:
"""Merge the given file import format."""
for typing_section, import_type_dict in file_import.METHOD_NAME.items():
for import_type, package_list in import_type_dict.items():
for package_name, module_list in package_list.items():
for module_name in module_list:
self._add_import(
package_name, import_type, module_name, typing_section
)
|
2,826 |
get setup
|
"""
Copyright 2018 Grid Singularity
This file is part of Grid Singularity Exchange.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from gsy_e.models.area import Area
from gsy_e.models.strategy.market_maker_strategy import MarketMakerStrategy
from gsy_e.models.strategy.load_hours import LoadHoursStrategy
from gsy_e.models.strategy.pv import PVStrategy
from gsy_e.models.strategy.storage import StorageStrategy
from gsy_framework.constants_limits import ConstSettings
def METHOD_NAME(config):
ConstSettings.MASettings.MARKET_TYPE = 2
ConstSettings.GeneralSettings.EXPORT_ENERGY_TRADE_PROFILE_HR = True
ConstSettings.GeneralSettings.EXPORT_OFFER_BID_TRADE_HR = True
area = Area(
"Grid",
[
Area(
"House 1",
[
Area("H1 General Load", strategy=LoadHoursStrategy(avg_power_W=100,
hrs_per_day=6,
hrs_of_day=list(
range(12, 18)),
final_buying_rate=30,
fit_to_limit=True,
update_interval=1)
),
Area("H1 Storage1", strategy=StorageStrategy(initial_soc=50,
min_allowed_soc=10,
battery_capacity_kWh=1.2,
max_abs_battery_power_kW=5,
initial_selling_rate=30,
final_buying_rate=25,
final_selling_rate=25.1,
initial_buying_rate=0,
fit_to_limit=True,
update_interval=1)
),
Area("H1 Storage2", strategy=StorageStrategy(initial_soc=10,
min_allowed_soc=10,
battery_capacity_kWh=1.2,
max_abs_battery_power_kW=5,
initial_selling_rate=31,
final_buying_rate=25,
final_selling_rate=25.1,
initial_buying_rate=5,
fit_to_limit=True,
update_interval=1)
)
]
),
Area(
"House 2",
[
Area("H2 General Load", strategy=LoadHoursStrategy(avg_power_W=100,
hrs_per_day=4,
hrs_of_day=list(range(12,
16)),
final_buying_rate=30,
fit_to_limit=True,
update_interval=1)
),
Area("H2 PV", strategy=PVStrategy(4,
initial_selling_rate=30,
final_selling_rate=0,
fit_to_limit=True,
update_interval=1)
),
]
),
Area("Cell Tower",
strategy=LoadHoursStrategy(avg_power_W=100,
hrs_per_day=24,
hrs_of_day=list(range(0, 24)),
final_buying_rate=30,
fit_to_limit=True,
update_interval=1)
),
Area("Market Maker", strategy=MarketMakerStrategy(energy_rate=30,
grid_connected=True)
),
],
config=config
)
return area
|
2,827 |
device
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import METHOD_NAME
from torch.nn import functional as F
from annotator.oneformer.detectron2.layers.wrappers import move_device_like, shapes_to_tensor
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size.
The original sizes of each image is stored in `image_sizes`.
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def METHOD_NAME(self) -> METHOD_NAME:
return self.tensor.METHOD_NAME
@staticmethod
def from_tensors(
tensors: List[torch.Tensor],
size_divisibility: int = 0,
pad_value: float = 0.0,
padding_constraints: Optional[Dict[str, int]] = None,
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad.
padding_constraints (optional[Dict]): If given, it would follow the format as
{"size_divisibility": int, "square_size": int}, where `size_divisibility` will
overwrite the above one if presented and `square_size` indicates the
square padding size if `square_size` > 0.
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if padding_constraints is not None:
square_size = padding_constraints.get("square_size", 0)
if square_size > 0:
# pad to square.
max_size[0] = max_size[1] = square_size
if "size_divisibility" in padding_constraints:
size_divisibility = padding_constraints["size_divisibility"]
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
if torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
METHOD_NAME = (
None if torch.jit.is_scripting() else ("cpu" if torch.jit.is_tracing() else None)
)
batched_imgs = tensors[0].new_full(batch_shape, pad_value, METHOD_NAME=METHOD_NAME)
batched_imgs = move_device_like(batched_imgs, tensors[0])
for i, img in enumerate(tensors):
# Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`
# Tracing mode cannot capture `copy_()` of temporary locals
batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes)
|
2,828 |
main function
|
# """Sorting components: clustering"""
from pathlib import Path
import shutil
import numpy as np
try:
import hdbscan
HAVE_HDBSCAN = True
except:
HAVE_HDBSCAN = False
import random, string, os
from spikeinterface.core import get_global_tmp_folder, get_noise_levels, get_channel_distances
from sklearn.preprocessing import QuantileTransformer, MaxAbsScaler
from spikeinterface.core.waveform_tools import extract_waveforms_to_buffers
from .clustering_tools import remove_duplicates, remove_duplicates_via_matching, remove_duplicates_via_dip
from spikeinterface.core import NumpySorting
from spikeinterface.core import extract_waveforms
from spikeinterface.sortingcomponents.features_from_peaks import compute_features_from_peaks
class PositionAndFeaturesClustering:
"""
hdbscan clustering on peak_locations previously done by localize_peaks()
"""
_default_params = {
"peak_localization_kwargs": {"method": "center_of_mass"},
"hdbscan_kwargs": {
"min_cluster_size": 50,
"allow_single_cluster": True,
"core_dist_n_jobs": -1,
"cluster_selection_method": "leaf",
},
"cleaning_kwargs": {},
"radius_um": 100,
"max_spikes_per_unit": 200,
"selection_method": "random",
"ms_before": 1.5,
"ms_after": 1.5,
"cleaning_method": "dip",
"job_kwargs": {"n_jobs": -1, "chunk_memory": "10M", "verbose": True, "progress_bar": True},
}
@classmethod
def METHOD_NAME(cls, recording, peaks, params):
assert HAVE_HDBSCAN, "twisted clustering need hdbscan to be installed"
if "n_jobs" in params["job_kwargs"]:
if params["job_kwargs"]["n_jobs"] == -1:
params["job_kwargs"]["n_jobs"] = os.cpu_count()
if "core_dist_n_jobs" in params["hdbscan_kwargs"]:
if params["hdbscan_kwargs"]["core_dist_n_jobs"] == -1:
params["hdbscan_kwargs"]["core_dist_n_jobs"] = os.cpu_count()
d = params
peak_dtype = [("sample_index", "int64"), ("unit_index", "int64"), ("segment_index", "int64")]
fs = recording.get_sampling_frequency()
nbefore = int(params["ms_before"] * fs / 1000.0)
nafter = int(params["ms_after"] * fs / 1000.0)
num_samples = nbefore + nafter
position_method = d["peak_localization_kwargs"]["method"]
features_list = [position_method, "ptp", "energy"]
features_params = {
position_method: {"radius_um": params["radius_um"]},
"ptp": {"all_channels": False, "radius_um": params["radius_um"]},
"energy": {"radius_um": params["radius_um"]},
}
features_data = compute_features_from_peaks(
recording, peaks, features_list, features_params, ms_before=1, ms_after=1, **params["job_kwargs"]
)
hdbscan_data = np.zeros((len(peaks), 4), dtype=np.float32)
hdbscan_data[:, 0] = features_data[0]["x"]
hdbscan_data[:, 1] = features_data[0]["y"]
hdbscan_data[:, 2] = features_data[1]
hdbscan_data[:, 3] = features_data[2]
preprocessing = QuantileTransformer(output_distribution="uniform")
hdbscan_data = preprocessing.fit_transform(hdbscan_data)
clusterer = hdbscan.HDBSCAN(**d["hdbscan_kwargs"])
clusterer.fit(X=hdbscan_data)
peak_labels = clusterer.labels_
labels = np.unique(peak_labels)
labels = labels[labels >= 0] # Noisy samples are given the label -1 in hdbscan
best_spikes = {}
num_spikes = 0
all_indices = np.arange(0, peak_labels.size)
max_spikes = params["max_spikes_per_unit"]
selection_method = params["selection_method"]
import sklearn
for unit_ind in labels:
mask = peak_labels == unit_ind
if selection_method == "closest_to_centroid":
data = hdbscan_data[mask]
centroid = np.median(data, axis=0)
distances = sklearn.metrics.pairwise_distances(centroid[np.newaxis, :], data)[0]
best_spikes[unit_ind] = all_indices[mask][np.argsort(distances)[:max_spikes]]
elif selection_method == "random":
best_spikes[unit_ind] = np.random.permutation(all_indices[mask])[:max_spikes]
num_spikes += best_spikes[unit_ind].size
spikes = np.zeros(num_spikes, dtype=peak_dtype)
mask = np.zeros(0, dtype=np.int32)
for unit_ind in labels:
mask = np.concatenate((mask, best_spikes[unit_ind]))
idx = np.argsort(mask)
mask = mask[idx]
spikes["sample_index"] = peaks[mask]["sample_index"]
spikes["segment_index"] = peaks[mask]["segment_index"]
spikes["unit_index"] = peak_labels[mask]
cleaning_method = params["cleaning_method"]
print("We found %d raw clusters, starting to clean with %s..." % (len(labels), cleaning_method))
if cleaning_method == "cosine":
num_chans = recording.get_num_channels()
wfs_arrays = extract_waveforms_to_buffers(
recording,
spikes,
labels,
nbefore,
nafter,
mode="shared_memory",
return_scaled=False,
folder=None,
dtype=recording.get_dtype(),
sparsity_mask=None,
copy=True,
**params["job_kwargs"],
)
noise_levels = get_noise_levels(recording, return_scaled=False)
labels, peak_labels = remove_duplicates(
wfs_arrays, noise_levels, peak_labels, num_samples, num_chans, **params["cleaning_kwargs"]
)
elif cleaning_method == "dip":
wfs_arrays = {}
for label in labels:
mask = label == peak_labels
wfs_arrays[label] = hdbscan_data[mask]
labels, peak_labels = remove_duplicates_via_dip(wfs_arrays, peak_labels, **params["cleaning_kwargs"])
elif cleaning_method == "matching":
name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8))
tmp_folder = Path(os.path.join(get_global_tmp_folder(), name))
sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs)
we = extract_waveforms(
recording,
sorting,
tmp_folder,
overwrite=True,
ms_before=params["ms_before"],
ms_after=params["ms_after"],
**params["job_kwargs"],
return_scaled=False,
)
labels, peak_labels = remove_duplicates_via_matching(
we, peak_labels, job_kwargs=params["job_kwargs"], **params["cleaning_kwargs"]
)
shutil.rmtree(tmp_folder)
print("We kept %d non-duplicated clusters..." % len(labels))
return labels, peak_labels
|
2,829 |
function at time t
|
"""Animations related to movement."""
from __future__ import annotations
__all__ = [
"Homotopy",
"SmoothedVectorizedHomotopy",
"ComplexHomotopy",
"PhaseFlow",
"MoveAlongPath",
]
from typing import TYPE_CHECKING, Any, Callable
import numpy as np
from ..animation.animation import Animation
from ..utils.rate_functions import linear
if TYPE_CHECKING:
from ..mobject.mobject import Mobject, VMobject
class Homotopy(Animation):
"""A Homotopy.
This is an animation transforming the points of a mobject according
to the specified transformation function. With the parameter :math:`t`
moving from 0 to 1 throughout the animation and :math:`(x, y, z)`
describing the coordinates of the point of a mobject,
the function passed to the ``homotopy`` keyword argument should
transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,
the coordinates the original point is transformed to at time :math:`t`.
Parameters
----------
homotopy
A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.
mobject
The mobject transformed under the given homotopy.
run_time
The run time of the animation.
apply_function_kwargs
Keyword arguments propagated to :meth:`.Mobject.apply_function`.
kwargs
Further keyword arguments passed to the parent class.
"""
def __init__(
self,
homotopy: Callable[[float, float, float, float], tuple[float, float, float]],
mobject: Mobject,
run_time: float = 3,
apply_function_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
self.homotopy = homotopy
self.apply_function_kwargs = (
apply_function_kwargs if apply_function_kwargs is not None else {}
)
super().__init__(mobject, run_time=run_time, **kwargs)
def METHOD_NAME(self, t: float) -> tuple[float, float, float]:
return lambda p: self.homotopy(*p, t)
def interpolate_submobject(
self,
submobject: Mobject,
starting_submobject: Mobject,
alpha: float,
) -> None:
submobject.points = starting_submobject.points
submobject.apply_function(
self.METHOD_NAME(alpha), **self.apply_function_kwargs
)
class SmoothedVectorizedHomotopy(Homotopy):
def interpolate_submobject(
self,
submobject: Mobject,
starting_submobject: Mobject,
alpha: float,
) -> None:
super().interpolate_submobject(submobject, starting_submobject, alpha)
submobject.make_smooth()
class ComplexHomotopy(Homotopy):
def __init__(
self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs
) -> None:
"""
Complex Homotopy a function Cx[0, 1] to C
"""
def homotopy(
x: float,
y: float,
z: float,
t: float,
) -> tuple[float, float, float]:
c = complex_homotopy(complex(x, y), t)
return (c.real, c.imag, z)
super().__init__(homotopy, mobject, **kwargs)
class PhaseFlow(Animation):
def __init__(
self,
function: Callable[[np.ndarray], np.ndarray],
mobject: Mobject,
virtual_time: float = 1,
suspend_mobject_updating: bool = False,
rate_func: Callable[[float], float] = linear,
**kwargs,
) -> None:
self.virtual_time = virtual_time
self.function = function
super().__init__(
mobject,
suspend_mobject_updating=suspend_mobject_updating,
rate_func=rate_func,
**kwargs,
)
def interpolate_mobject(self, alpha: float) -> None:
if hasattr(self, "last_alpha"):
dt = self.virtual_time * (
self.rate_func(alpha) - self.rate_func(self.last_alpha)
)
self.mobject.apply_function(lambda p: p + dt * self.function(p))
self.last_alpha = alpha
class MoveAlongPath(Animation):
"""Make one mobject move along the path of another mobject.
Example
--------
.. manim:: MoveAlongPathExample
class MoveAlongPathExample(Scene):
def construct(self):
d1 = Dot().set_color(ORANGE)
l1 = Line(LEFT, RIGHT)
l2 = VMobject()
self.add(d1, l1, l2)
l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))
self.play(MoveAlongPath(d1, l1), rate_func=linear)
"""
def __init__(
self,
mobject: Mobject,
path: VMobject,
suspend_mobject_updating: bool | None = False,
**kwargs,
) -> None:
self.path = path
super().__init__(
mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs
)
def interpolate_mobject(self, alpha: float) -> None:
point = self.path.point_from_proportion(self.rate_func(alpha))
self.mobject.move_to(point)
|
2,830 |
method test
|
from future import standard_library
standard_library.install_aliases()
from future.utils import viewitems
import hashlib
import hmac
import urllib.parse
from http.client import HTTPConnection
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3
from WMCore.WebTools.Page import make_rfc_timestamp
def makeRequest(url, values=None, verb='GET', accept="text/plain",
contentType=None, secure=False, secureParam={}):
"""
:rtype: (bytes (both py2 and py3), int, str (native),
(instance httplib.HTTPResponse in py2, 'http.client.HTTPResponse' in py3))
"""
headers = {}
contentType = contentType or "application/x-www-form-urlencoded"
headers = {"content-type": contentType,
"Accept": accept,
"cms-auth-status": "NONE"}
if secure:
headers.update({"cms-auth-status": "OK",
"cms-authn-dn": "/DC=ch/OU=Organic Units/OU=Users/CN=Fake User",
"cms-authn-name": "Fake User",
"cms-authz-%s" % secureParam['role']:
"group:%s site:%s" % (secureParam['group'],
secureParam['site'])})
headers["cms-authn-hmac"] = _generateHash(secureParam["key"], headers)
data = None
if verb == 'GET' and values:
data = urllib.parse.urlencode(values, doseq=True)
elif verb != 'GET' and values:
# needs to test other encoding type
if contentType == "application/x-www-form-urlencoded":
data = urllib.parse.urlencode(values)
else:
# for other encoding scheme values assumed to be encoded already
data = values
parser = urllib.parse.urlparse(url)
uri = parser.path
if parser.query:
uri += "?" + parser.query
if verb == 'GET' and data != None:
uri = '%s?%s' % (uri, data)
# need to specify Content-length for POST method
# TODO: this function needs refactoring - too verb-related branching
if verb != 'GET':
if data:
headers.update({"content-length": len(data)})
else:
headers.update({"content-length": 0})
conn = HTTPConnection(parser.netloc)
conn.connect()
conn.request(verb, uri, data, headers)
response = conn.getresponse()
data = response.read()
conn.close()
cType = response.getheader('content-type').split(';')[0]
# data returned could be something a json like: b'"foo"', so we need to properly load it
#if '/json' in accept:
# data = json.loads(data)
return data, response.status, cType, response
def METHOD_NAME(verb, url, request_input={}, accept='text/json', contentType=None,
output={}, expireTime=0, secure=False, secureParam={}):
data, code, content_type, response = makeRequest(url, request_input, verb,
accept, contentType,
secure, secureParam)
data = decodeBytesToUnicodeConditional(data, condition=PY3)
keyMap = {'code': code, 'data': data, 'type': content_type, 'response': response}
for key, value in viewitems(output):
msg = 'Got a return %s != %s (got %s, type %s) (expected %s, type %s)' \
% (keyMap[key], value, keyMap[key], type(keyMap[key]), value, type(value))
assert keyMap[key] == value, msg
expires = response.getheader('Expires')
if expireTime != 0:
timeStamp = make_rfc_timestamp(expireTime)
assert expires == timeStamp, \
'Expires header incorrect (%s) != (%s)' % (expires, timeStamp)
return data, expires
def _generateHash(keyfile, headers):
prefix = suffix = ""
hkeys = sorted(headers.keys())
for hk in hkeys:
hk = hk.lower()
if hk[0:9] in ["cms-authn", "cms-authz"]:
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
return hmac.new(keyfile, prefix + "#" + suffix, hashlib.sha1).hexdigest()
|
2,831 |
set up class
|
import logging
import platform
import sys
import tempfile
from pathlib import Path
from unittest import mock
from unittest import TestCase
from click.testing import CliRunner
from tests import fixtures_dir
from xsdata import __version__
from xsdata.cli import cli
from xsdata.cli import resolve_source
from xsdata.codegen.transformer import SchemaTransformer
from xsdata.codegen.writer import CodeWriter
from xsdata.formats.dataclass.generator import DataclassGenerator
from xsdata.logger import logger
from xsdata.models.config import GeneratorConfig
from xsdata.models.config import StructureStyle
from xsdata.utils.downloader import Downloader
CodeWriter.register_generator("testing", DataclassGenerator)
class CliTests(TestCase):
def setUp(self):
self.runner = CliRunner()
super().setUp()
@classmethod
def METHOD_NAME(cls):
CodeWriter.register_generator("testing", DataclassGenerator)
@classmethod
def tearDownClass(cls):
CodeWriter.unregister_generator("testing")
@mock.patch.object(SchemaTransformer, "process")
@mock.patch.object(SchemaTransformer, "__init__", return_value=None)
def test_generate(self, mock_init, mock_process):
source = fixtures_dir.joinpath("defxmlschema/chapter03.xsd")
result = self.runner.invoke(cli, [str(source), "--package", "foo"])
config = mock_init.call_args[1]["config"]
self.assertIsNone(result.exception)
self.assertFalse(mock_init.call_args[1]["print"])
self.assertEqual("foo", config.output.package)
self.assertEqual("dataclasses", config.output.format.value)
self.assertFalse(config.output.relative_imports)
self.assertEqual(StructureStyle.FILENAMES, config.output.structure_style)
self.assertEqual([source.as_uri()], mock_process.call_args[0][0])
@mock.patch.object(SchemaTransformer, "process")
@mock.patch.object(SchemaTransformer, "__init__", return_value=None)
def test_generate_with_configuration_file(self, mock_init, mock_process):
file_path = Path(tempfile.mktemp())
config = GeneratorConfig()
config.output.package = "foo.bar"
config.output.structure_style = StructureStyle.NAMESPACES
with file_path.open("w") as fp:
config.write(fp, config)
source = fixtures_dir.joinpath("defxmlschema/chapter03.xsd")
result = self.runner.invoke(
cli,
[str(source), "--config", str(file_path), "--no-eq"],
catch_exceptions=False,
)
config = mock_init.call_args[1]["config"]
self.assertIsNone(result.exception)
self.assertFalse(mock_init.call_args[1]["print"])
self.assertEqual("foo.bar", config.output.package)
self.assertEqual("dataclasses", config.output.format.value)
self.assertFalse(config.output.format.eq)
self.assertEqual(StructureStyle.NAMESPACES, config.output.structure_style)
self.assertEqual([source.as_uri()], mock_process.call_args[0][0])
file_path.unlink()
@mock.patch.object(SchemaTransformer, "process")
@mock.patch.object(SchemaTransformer, "__init__", return_value=None)
def test_generate_with_print_mode(self, mock_init, mock_process):
source = fixtures_dir.joinpath("defxmlschema/chapter03.xsd")
result = self.runner.invoke(cli, [str(source), "--package", "foo", "--print"])
self.assertIsNone(result.exception)
self.assertEqual([source.as_uri()], mock_process.call_args[0][0])
self.assertTrue(mock_init.call_args[1]["print"])
@mock.patch.object(SchemaTransformer, "process")
@mock.patch.object(SchemaTransformer, "__init__", return_value=None)
def test_generate_with_debug_mode(self, *args):
self.runner.invoke(cli, ["foo.xsd", "--package", "foo", "--debug"])
self.assertEqual(logging.DEBUG, logger.level)
@mock.patch("xsdata.cli.logger.info")
def test_init_config(self, mock_info):
output = tempfile.mktemp()
output_path = Path(output)
result = self.runner.invoke(cli, ["init-config", str(output_path)])
self.assertIsNone(result.exception)
self.assertEqual(GeneratorConfig.create(), GeneratorConfig.read(output_path))
mock_info.assert_has_calls(
[
mock.call(
"========= xsdata v%s / Python %s / Platform %s =========\n",
__version__,
platform.python_version(),
sys.platform,
),
mock.call("Initializing configuration file %s", str(output_path)),
]
)
output_path.unlink()
@mock.patch("xsdata.cli.logger.info")
def test_init_config_when_file_exists(self, mock_info):
output = tempfile.mktemp()
output_path = Path(output).resolve()
config = GeneratorConfig.create()
config.version = "20.8"
with output_path.open("w") as fp:
config.write(fp, config)
result = self.runner.invoke(cli, ["init-config", str(output_path)])
self.assertIsNone(result.exception)
self.assertNotEqual("20.8", GeneratorConfig.read(output_path))
mock_info.assert_has_calls(
[
mock.call(
"========= xsdata v%s / Python %s / Platform %s =========\n",
__version__,
platform.python_version(),
sys.platform,
),
mock.call("Updating configuration file %s", str(output_path)),
]
)
output_path.unlink()
def test_init_config_with_print_mode(self):
result = self.runner.invoke(cli, ["init-config", "--print"])
self.assertIsNone(result.exception)
self.assertIn('<Config xmlns="http://pypi.org/project/xsdata"', result.output)
@mock.patch.object(Downloader, "wget")
@mock.patch.object(Downloader, "__init__", return_value=None)
def test_download(self, mock_init, mock_wget):
uri = "http://www.w3.org/2009/01/xml.xsd"
result = self.runner.invoke(cli, ["download", uri])
self.assertIsNone(result.exception)
mock_init.assert_called_once_with(output=Path.cwd())
mock_wget.assert_called_once_with(uri)
@mock.patch.object(Downloader, "wget")
@mock.patch.object(Downloader, "__init__", return_value=None)
def test_download_with_custom_output(self, mock_init, mock_wget):
uri = "http://www.w3.org/2009/01/xml.xsd"
result = self.runner.invoke(cli, ["download", uri, "--output", "here/schemas"])
self.assertIsNone(result.exception)
mock_init.assert_called_once_with(output=Path("here/schemas").resolve())
mock_wget.assert_called_once_with(uri)
def test_resolve_source(self):
hello_path = fixtures_dir.joinpath("hello")
file = hello_path.joinpath("hello.xsd")
url = "http://www.xsdata/schema.xsd"
self.assertEqual([file.as_uri()], list(resolve_source(str(file), False)))
self.assertEqual([url], list(resolve_source(url, False)))
self.assertEqual(5, len(list(resolve_source(str(hello_path), False))))
def_xml_path = fixtures_dir.joinpath("calculator")
self.assertEqual(3, len(list(resolve_source(str(def_xml_path), False))))
actual = list(resolve_source(str(fixtures_dir), True))
self.assertEqual(39, len(actual))
|
2,832 |
works not in py
|
import re
import tempfile
import shutil
import logging
import sys
import os
import pytest
import yatest.common
import parso
from parso import cache
from parso.utils import parse_version_string
collect_ignore = ["setup.py"]
VERSIONS_2 = '2.7',
VERSIONS_3 = '3.4', '3.5', '3.6', '3.7', '3.8'
@pytest.fixture(scope='session')
def clean_parso_cache():
"""
Set the default cache directory to a temporary directory during tests.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
old = cache._default_cache_path
tmp = tempfile.mkdtemp(prefix='parso-test-')
cache._default_cache_path = tmp
yield
cache._default_cache_path = old
shutil.rmtree(tmp)
def pytest_addoption(parser):
parser.addoption("--logging", "-L", action='store_true',
help="Enables the logging output.")
def pytest_generate_tests(metafunc):
if 'normalizer_issue_case' in metafunc.fixturenames:
base_dir = os.path.join(yatest.common.test_source_path(), 'normalizer_issue_files')
cases = list(colllect_normalizer_tests(base_dir))
metafunc.parametrize(
'normalizer_issue_case',
cases,
ids=[c.name for c in cases]
)
elif 'each_version' in metafunc.fixturenames:
metafunc.parametrize('each_version', VERSIONS_2 + VERSIONS_3)
elif 'each_py2_version' in metafunc.fixturenames:
metafunc.parametrize('each_py2_version', VERSIONS_2)
elif 'each_py3_version' in metafunc.fixturenames:
metafunc.parametrize('each_py3_version', VERSIONS_3)
elif 'version_ge_py36' in metafunc.fixturenames:
metafunc.parametrize('version_ge_py36', ['3.6', '3.7', '3.8'])
elif 'version_ge_py38' in metafunc.fixturenames:
metafunc.parametrize('version_ge_py38', ['3.8'])
class NormalizerIssueCase(object):
"""
Static Analysis cases lie in the static_analysis folder.
The tests also start with `#!`, like the goto_definition tests.
"""
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
match = re.search(r'python([\d.]+)\.py', self.name)
self.python_version = match and match.group(1)
def colllect_normalizer_tests(base_dir):
for f_name in os.listdir(base_dir):
if f_name.endswith(".py"):
path = os.path.join(base_dir, f_name)
yield NormalizerIssueCase(path)
def pytest_configure(config):
if config.option.logging:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
#ch = logging.StreamHandler(sys.stdout)
#ch.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#ch.setFormatter(formatter)
#root.addHandler(ch)
class Checker():
def __init__(self, version, is_passing):
self.version = version
self._is_passing = is_passing
self.grammar = parso.load_grammar(version=self.version)
def parse(self, code):
if self._is_passing:
return parso.parse(code, version=self.version, error_recovery=False)
else:
self._invalid_syntax(code)
def _invalid_syntax(self, code):
with pytest.raises(parso.ParserSyntaxError):
module = parso.parse(code, version=self.version, error_recovery=False)
# For debugging
print(module.children)
def get_error(self, code):
errors = list(self.grammar.iter_errors(self.grammar.parse(code)))
assert bool(errors) != self._is_passing
if errors:
return errors[0]
def get_error_message(self, code):
error = self.get_error(code)
if error is None:
return
return error.message
def assert_no_error_in_passing(self, code):
if self._is_passing:
module = self.grammar.parse(code)
assert not list(self.grammar.iter_errors(module))
@pytest.fixture
def METHOD_NAME(each_version):
return Checker(each_version, False)
@pytest.fixture
def works_in_py2(each_version):
return Checker(each_version, each_version.startswith('2'))
@pytest.fixture
def works_ge_py27(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (2, 7))
@pytest.fixture
def works_ge_py3(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 0))
@pytest.fixture
def works_ge_py35(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 5))
@pytest.fixture
def works_ge_py36(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 6))
@pytest.fixture
def works_ge_py38(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 8))
@pytest.fixture
def works_ge_py39(each_version):
version_info = parse_version_string(each_version)
return Checker(each_version, version_info >= (3, 9))
|
2,833 |
zip choices
|
from collections import namedtuple
from django.forms import TypedChoiceField
from django.template import Library
from django.utils.translation import gettext_lazy as _
from evap.evaluation.models import BASE_UNIPOLAR_CHOICES, Contribution, Evaluation
from evap.rewards.tools import can_reward_points_be_used_by
from evap.student.forms import HeadingField
# the names displayed for contributors
STATE_NAMES = {
Evaluation.State.NEW: _("new"),
Evaluation.State.PREPARED: _("prepared"),
Evaluation.State.EDITOR_APPROVED: _("editor approved"),
Evaluation.State.APPROVED: _("approved"),
Evaluation.State.IN_EVALUATION: _("in evaluation"),
Evaluation.State.EVALUATED: _("evaluated"),
Evaluation.State.REVIEWED: _("reviewed"),
Evaluation.State.PUBLISHED: _("published"),
}
STR_TO_STATE = {s: i for i, s in Evaluation.STATE_STR_CONVERSION.items()}
# the descriptions used in tooltips for contributors
STATE_DESCRIPTIONS = {
Evaluation.State.NEW: _("The evaluation was newly created and will be prepared by the evaluation team."),
Evaluation.State.PREPARED: _(
"The evaluation was prepared by the evaluation team and is now available for editors."
),
Evaluation.State.EDITOR_APPROVED: _(
"The evaluation was approved by an editor and will now be checked by the evaluation team."
),
Evaluation.State.APPROVED: _(
"All preparations are finished. The evaluation will begin once the defined start date is reached."
),
Evaluation.State.IN_EVALUATION: _("The evaluation is currently running until the defined end date is reached."),
Evaluation.State.EVALUATED: _("The evaluation has finished and will now be reviewed by the evaluation team."),
Evaluation.State.REVIEWED: _(
"The evaluation has finished and was reviewed by the evaluation team. You will receive an email when its results are published."
),
Evaluation.State.PUBLISHED: _("The results for this evaluation have been published."),
}
# values for approval states shown to staff
StateValues = namedtuple("StateValues", ("order", "icon", "filter", "description"))
APPROVAL_STATES = {
Evaluation.State.NEW: StateValues(
0,
"fas fa-circle icon-yellow",
Evaluation.State.NEW,
_("In preparation"),
),
Evaluation.State.PREPARED: StateValues(
2,
"far fa-square icon-gray",
Evaluation.State.PREPARED,
_("Awaiting editor review"),
),
Evaluation.State.EDITOR_APPROVED: StateValues(
1,
"far fa-square-check icon-yellow",
Evaluation.State.EDITOR_APPROVED,
_("Approved by editor, awaiting manager review"),
),
Evaluation.State.APPROVED: StateValues(
3, "far fa-square-check icon-green", Evaluation.State.APPROVED, _("Approved by manager")
),
}
register = Library()
@register.filter(name="zip")
def _zip(a, b):
return zip(a, b)
@register.filter()
def METHOD_NAME(counts, choices):
return zip(counts, choices.names, choices.colors, choices.values)
@register.filter
def ordering_index(evaluation):
if evaluation.state < Evaluation.State.IN_EVALUATION:
return evaluation.days_until_evaluation
if evaluation.state == Evaluation.State.IN_EVALUATION:
return 100000 + evaluation.days_left_for_evaluation
return 200000 + evaluation.days_left_for_evaluation
# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/
@register.filter
def percentage(fraction, population):
try:
return f"{int(float(fraction) / float(population) * 100):.0f}%"
except ValueError:
return None
except ZeroDivisionError:
return None
@register.filter
def percentage_one_decimal(fraction, population):
try:
return f"{float(fraction) / float(population) * 100:.1f}%"
except ValueError:
return None
except ZeroDivisionError:
return None
@register.filter
def to_colors(choices):
if not choices:
# When displaying the course distribution, there are no associated voting choices.
# In that case, we just use the colors of a unipolar scale.
return BASE_UNIPOLAR_CHOICES["colors"]
return choices.colors
@register.filter
def weight_info(evaluation):
try:
course = evaluation.course
except AttributeError:
return None
if course.evaluation_weight_sum and course.evaluation_count > 1:
return percentage(evaluation.weight, course.evaluation_weight_sum)
return None
@register.filter
def statename(state):
return STATE_NAMES.get(state)
@register.filter
def statedescription(state):
return STATE_DESCRIPTIONS.get(state)
@register.filter
def approval_state_values(state):
if state in APPROVAL_STATES:
return APPROVAL_STATES[state]
return APPROVAL_STATES[Evaluation.State.APPROVED]
@register.filter
def approval_state_icon(state):
return approval_state_values(state).icon
@register.filter
def can_results_page_be_seen_by(evaluation, user):
return evaluation.can_results_page_be_seen_by(user)
@register.filter(name="can_reward_points_be_used_by")
def _can_reward_points_be_used_by(user):
return can_reward_points_be_used_by(user)
@register.filter
def is_choice_field(field):
return isinstance(field.field, TypedChoiceField)
@register.filter
def is_heading_field(field):
return isinstance(field.field, HeadingField)
@register.filter
def is_user_editor_or_delegate(evaluation, user):
return evaluation.is_user_editor_or_delegate(user)
@register.filter
def is_user_responsible_or_contributor_or_delegate(evaluation, user):
return evaluation.is_user_responsible_or_contributor_or_delegate(user)
@register.filter
def message_class(level):
return {
"debug": "info",
"info": "info",
"success": "success",
"warning": "warning",
"error": "danger",
}.get(level, "info")
@register.filter
def hours_and_minutes(time_left_for_evaluation):
hours = time_left_for_evaluation.seconds // 3600
minutes = (time_left_for_evaluation.seconds // 60) % 60
return f"{hours:02}:{minutes:02}"
@register.filter
def has_nonresponsible_editor(evaluation):
return (
evaluation.contributions.filter(role=Contribution.Role.EDITOR)
.exclude(contributor__in=evaluation.course.responsibles.all())
.exists()
)
@register.filter
def order_by(iterable, attribute):
return sorted(iterable, key=lambda item: getattr(item, attribute))
@register.filter
def get(dictionary, key):
return dictionary.get(key)
|
2,834 |
test logout redirect url
|
from unittest.mock import patch
from django.http import HttpResponse
from django.test import Client, TestCase
from django.urls import reverse
from .common import less_console_noise
@patch("djangooidc.views.CLIENT", autospec=True)
class ViewsTest(TestCase):
def setUp(self):
self.client = Client()
def say_hi(*args):
return HttpResponse("Hi")
def user_info(*args):
return {
"sub": "TEST",
"email": "[email protected]",
"first_name": "Testy",
"last_name": "Tester",
"phone": "814564000",
}
def test_error_page(self, mock_client):
pass
def test_openid_sets_next(self, mock_client):
# setup
callback_url = reverse("openid_login_callback")
# mock
mock_client.create_authn_request.side_effect = self.say_hi
# test
response = self.client.get(reverse("login"), {"next": callback_url})
# assert
session = mock_client.create_authn_request.call_args[0][0]
self.assertEqual(session["next"], callback_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hi")
def test_openid_raises(self, mock_client):
# mock
mock_client.create_authn_request.side_effect = Exception("Test")
# test
with less_console_noise():
response = self.client.get(reverse("login"))
# assert
self.assertEqual(response.status_code, 500)
self.assertTemplateUsed(response, "500.html")
self.assertIn("Server Error", response.content.decode("utf-8"))
def test_login_callback_reads_next(self, mock_client):
# setup
session = self.client.session
session["next"] = reverse("logout")
session.save()
# mock
mock_client.callback.side_effect = self.user_info
# test
with less_console_noise():
response = self.client.get(reverse("openid_login_callback"))
# assert
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse("logout"))
@patch("djangooidc.views.authenticate")
def test_login_callback_raises(self, mock_auth, mock_client):
# mock
mock_client.callback.side_effect = self.user_info
mock_auth.return_value = None
# test
with less_console_noise():
response = self.client.get(reverse("openid_login_callback"))
# assert
self.assertEqual(response.status_code, 401)
self.assertTemplateUsed(response, "401.html")
self.assertIn("Unauthorized", response.content.decode("utf-8"))
def METHOD_NAME(self, mock_client):
# setup
session = self.client.session
session["state"] = "TEST" # nosec B105
session.save()
# mock
mock_client.callback.side_effect = self.user_info
mock_client.registration_response = {
"post_logout_redirect_uris": ["http://example.com/back"]
}
mock_client.provider_info = {
"end_session_endpoint": "http://example.com/log_me_out"
}
mock_client.client_id = "TEST"
# test
with less_console_noise():
response = self.client.get(reverse("logout"))
# assert
expected = (
"http://example.com/log_me_out?client_id=TEST&state"
"=TEST&post_logout_redirect_uri=http%3A%2F%2Fexample.com%2Fback"
)
actual = response.url
self.assertEqual(response.status_code, 302)
self.assertEqual(actual, expected)
@patch("djangooidc.views.auth_logout")
def test_logout_always_logs_out(self, mock_logout, _):
# Without additional mocking, logout will always fail.
# Here we test that auth_logout is called regardless
with less_console_noise():
self.client.get(reverse("logout"))
self.assertTrue(mock_logout.called)
def test_logout_callback_redirects(self, _):
# setup
session = self.client.session
session["next"] = reverse("logout")
session.save()
# test
response = self.client.get(reverse("openid_logout_callback"))
# assert
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse("logout"))
|
2,835 |
check connection
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
from datetime import datetime
from typing import Any, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import AirbyteMessage, AirbyteStateMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.connector_state_manager import ConnectorStateManager
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from airbyte_cdk.sources.utils.schema_helpers import InternalConfig
from airbyte_cdk.utils.traced_exception import AirbyteTracedException
from dateutil.relativedelta import relativedelta
from requests import codes, exceptions # type: ignore[import]
from .api import UNSUPPORTED_BULK_API_SALESFORCE_OBJECTS, UNSUPPORTED_FILTERING_STREAMS, Salesforce
from .streams import BulkIncrementalSalesforceStream, BulkSalesforceStream, Describe, IncrementalRestSalesforceStream, RestSalesforceStream
logger = logging.getLogger("airbyte")
class AirbyteStopSync(AirbyteTracedException):
pass
class SourceSalesforce(AbstractSource):
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
START_DATE_OFFSET_IN_YEARS = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.catalog = None
@staticmethod
def _get_sf_object(config: Mapping[str, Any]) -> Salesforce:
sf = Salesforce(**config)
sf.login()
return sf
def METHOD_NAME(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[str]]:
try:
salesforce = self._get_sf_object(config)
salesforce.describe()
except exceptions.HTTPError as error:
error_msg = f"An error occurred: {error.response.text}"
try:
error_data = error.response.json()[0]
except (KeyError, requests.exceptions.JSONDecodeError):
pass
else:
error_code = error_data.get("errorCode")
if error.response.status_code == codes.FORBIDDEN and error_code == "REQUEST_LIMIT_EXCEEDED":
logger.warn(f"API Call limit is exceeded. Error message: '{error_data.get('message')}'")
error_msg = "API Call limit is exceeded"
return False, error_msg
return True, None
@classmethod
def _get_api_type(cls, stream_name: str, properties: Mapping[str, Any], force_use_bulk_api: bool) -> str:
# Salesforce BULK API currently does not support loading fields with data type base64 and compound data
properties_not_supported_by_bulk = {
key: value for key, value in properties.items() if value.get("format") == "base64" or "object" in value["type"]
}
rest_only = stream_name in UNSUPPORTED_BULK_API_SALESFORCE_OBJECTS
if rest_only:
logger.warning(f"BULK API is not supported for stream: {stream_name}")
return "rest"
if force_use_bulk_api and properties_not_supported_by_bulk:
logger.warning(
f"Following properties will be excluded from stream: {stream_name} due to BULK API limitations: {list(properties_not_supported_by_bulk)}"
)
return "bulk"
if properties_not_supported_by_bulk:
return "rest"
return "bulk"
@classmethod
def generate_streams(
cls,
config: Mapping[str, Any],
stream_objects: Mapping[str, Any],
sf_object: Salesforce,
) -> List[Stream]:
""" "Generates a list of stream by their names. It can be used for different tests too"""
authenticator = TokenAuthenticator(sf_object.access_token)
stream_properties = sf_object.generate_schemas(stream_objects)
streams = []
for stream_name, sobject_options in stream_objects.items():
streams_kwargs = {"sobject_options": sobject_options}
selected_properties = stream_properties.get(stream_name, {}).get("properties", {})
api_type = cls._get_api_type(stream_name, selected_properties, config.get("force_use_bulk_api", False))
if api_type == "rest":
full_refresh, incremental = RestSalesforceStream, IncrementalRestSalesforceStream
elif api_type == "bulk":
full_refresh, incremental = BulkSalesforceStream, BulkIncrementalSalesforceStream
else:
raise Exception(f"Stream {stream_name} cannot be processed by REST or BULK API.")
json_schema = stream_properties.get(stream_name, {})
pk, replication_key = sf_object.get_pk_and_replication_key(json_schema)
streams_kwargs.update(dict(sf_api=sf_object, pk=pk, stream_name=stream_name, schema=json_schema, authenticator=authenticator))
if replication_key and stream_name not in UNSUPPORTED_FILTERING_STREAMS:
start_date = config.get(
"start_date", (datetime.now() - relativedelta(years=cls.START_DATE_OFFSET_IN_YEARS)).strftime(cls.DATETIME_FORMAT)
)
stream = incremental(**streams_kwargs, replication_key=replication_key, start_date=start_date)
else:
stream = full_refresh(**streams_kwargs)
if api_type == "rest" and not stream.primary_key and stream.too_many_properties:
logger.warning(
f"Can not instantiate stream {stream_name}. "
f"It is not supported by the BULK API and can not be implemented via REST because the number of its properties "
f"exceeds the limit and it lacks a primary key."
)
continue
streams.append(stream)
return streams
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
sf = self._get_sf_object(config)
stream_objects = sf.get_validated_streams(config=config, catalog=self.catalog)
streams = self.generate_streams(config, stream_objects, sf)
streams.append(Describe(sf_api=sf, catalog=self.catalog))
return streams
def read(
self,
logger: logging.Logger,
config: Mapping[str, Any],
catalog: ConfiguredAirbyteCatalog,
state: Union[List[AirbyteStateMessage], MutableMapping[str, Any]] = None,
) -> Iterator[AirbyteMessage]:
# save for use inside streams method
self.catalog = catalog
try:
yield from super().read(logger, config, catalog, state)
except AirbyteStopSync:
logger.info(f"Finished syncing {self.name}")
def _read_stream(
self,
logger: logging.Logger,
stream_instance: Stream,
configured_stream: ConfiguredAirbyteStream,
state_manager: ConnectorStateManager,
internal_config: InternalConfig,
) -> Iterator[AirbyteMessage]:
try:
yield from super()._read_stream(logger, stream_instance, configured_stream, state_manager, internal_config)
except exceptions.HTTPError as error:
error_data = error.response.json()[0]
error_code = error_data.get("errorCode")
url = error.response.url
if error.response.status_code == codes.FORBIDDEN and error_code == "REQUEST_LIMIT_EXCEEDED":
logger.warning(f"API Call {url} limit is exceeded. Error message: '{error_data.get('message')}'")
raise AirbyteStopSync() # if got 403 rate limit response, finish the sync with success.
raise error
|
2,836 |
test component recreate with long path
|
import functools
import io
import os.path
from typing import Callable
import mock
import pytest
from devtools_testutils import AzureRecordedTestCase
from test_utilities.utils import build_temp_folder
from azure.ai.ml import MLClient, load_component
from azure.ai.ml.entities import CommandComponent
from .._util import _COMPONENT_TIMEOUT_SECOND
def create_component(
client: MLClient,
component_name: str,
path="./tests/test_configs/components/helloworld_component.yml",
params_override=None,
is_anonymous=False,
):
default_param_override = [{"name": component_name}]
if params_override is None:
params_override = default_param_override
else:
params_override += default_param_override
command_component = load_component(
source=path,
params_override=params_override,
)
return client.components.create_or_update(command_component, is_anonymous=is_anonymous)
@pytest.mark.e2etest
@pytest.mark.timeout(_COMPONENT_TIMEOUT_SECOND)
@pytest.mark.usefixtures(
"recorded_test",
"mock_asset_name",
"mock_component_hash",
)
@pytest.mark.pipeline_test
class TestComponentHash(AzureRecordedTestCase):
@classmethod
def _assert_recreated_component_no_change(cls, base_dir, client, randstr, with_code_diff=False):
component_name = randstr("component_name")
component: CommandComponent = create_component(
client, component_name, params_override=[{"code": os.path.join(base_dir, "code")}]
)
# a service error saying that code is immutable will be raised if __pycache__ is not ignored in
# asset hash calculation or
recreated_component: CommandComponent = create_component(
client, component_name, params_override=[{"code": os.path.join(base_dir, "code_copy")}]
)
if not with_code_diff:
# no change, so arm id should be the same
assert recreated_component.id == component.id
assert recreated_component.code == component.code
return component, recreated_component
def test_component_recreated_with_pycache(self, client: MLClient, randstr: Callable[[str], str]) -> None:
with build_temp_folder(
extra_files_to_create={
"code/hello.py": b"def hello():\n print('hello')\n",
"code/__pycache__/__init__.cpython-36.pyc": "hello",
"code_copy/hello.py": b"def hello():\n print('hello')\n",
"code_copy/__pycache__/__init__.cpython-36.pyc": "world",
},
) as base_dir:
self._assert_recreated_component_no_change(base_dir, client, randstr)
@pytest.mark.skip(reason="seems that this still can't trigger the automatic Windows path shortening")
def METHOD_NAME(self, client: MLClient, randstr: Callable[[str], str]) -> None:
long_dir_name = os.path.join(*(["a" * 50] * 10))
with build_temp_folder(
extra_files_to_create={
os.path.join(long_dir_name, "code", "hello.py"): "def hello():\n print('hello')\n",
os.path.join(long_dir_name, "code_copy", "hello.py"): "def hello():\n print('hello')\n",
},
) as base_dir:
self._assert_recreated_component_no_change(os.path.join(base_dir, long_dir_name), client, randstr)
def test_component_recreate_cross_os(self, client: MLClient, randstr: Callable[[str], str]) -> None:
with build_temp_folder(
extra_files_to_create={
"code/hello.py": b"def hello():\n print('hello')\n",
"code_copy/hello.py": b"def hello():\r\n print('hello')\r\n",
},
) as base_dir:
component_name = randstr("component_name")
linux_component: CommandComponent = create_component(
client, component_name, params_override=[{"code": os.path.join(base_dir, "code")}]
)
windows_component: CommandComponent = create_component(
client, component_name, params_override=[{"code": os.path.join(base_dir, "code")}]
)
# although local hash is different for lf file and CRLF file, the remote hash should be the same
assert linux_component.id == windows_component.id
assert linux_component.code == windows_component.code
|
2,837 |
write to repo
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from ci.tests import utils as test_utils
from ci.tests import DBTester
import os, subprocess
from django.conf import settings
from ci.recipe import RecipeCreator
from ci.recipe import RecipeRepoReader, RecipeWriter
class RecipeTester(DBTester.DBTester):
def load_recipes(self, recipes_dir):
creator = RecipeCreator.RecipeCreator(recipes_dir)
removed, new, changed = creator.load_recipes()
return creator, removed, new, changed
def write_recipe_to_repo(self, recipes_dir, recipe_dict, recipe_filename):
new_recipe = RecipeWriter.write_recipe_to_string(recipe_dict)
self.METHOD_NAME(recipes_dir, new_recipe, recipe_filename)
def create_recipe_in_repo(self, recipes_dir, test_recipe, repo_recipe, hostname=None):
recipe_file = self.get_recipe(test_recipe)
if hostname:
recipe_file = recipe_file.replace("github.com", hostname)
return self.METHOD_NAME(recipes_dir, recipe_file, repo_recipe)
def write_script_to_repo(self, recipes_dir, file_data, script_name):
fname = os.path.join("scripts", script_name)
full_fname = os.path.join(recipes_dir, fname)
with open(full_fname, "w") as f:
f.write(file_data)
subprocess.check_output(["git", "add", fname], cwd=recipes_dir)
subprocess.check_output(["git", "commit", "-m", "Added %s" % fname], cwd=recipes_dir)
return fname
def remove_recipe_from_repo(self, recipes_dir, script_name):
fname = os.path.join("recipes", script_name)
subprocess.check_output(["git", "rm", fname], cwd=recipes_dir)
subprocess.check_output(["git", "commit", "-m", "Remove %s" % fname], cwd=recipes_dir)
def METHOD_NAME(self, recipes_dir, file_data, repo_recipe):
fname = os.path.join("recipes", repo_recipe)
full_fname = os.path.join(recipes_dir, fname)
with open(full_fname, "w") as f:
f.write(file_data)
subprocess.check_output(["git", "add", fname], cwd=recipes_dir)
subprocess.check_output(["git", "commit", "-m", "Added %s" % repo_recipe], cwd=recipes_dir)
return fname
def get_recipe(self, fname):
p = '{}/{}'.format(os.path.dirname(__file__), fname)
with open(p, 'r') as f:
contents = f.read()
return contents
def create_records(self, recipe, branch):
info = {}
info["owner"] = test_utils.create_user(name=recipe["repository_owner"])
info["build_user"] = test_utils.create_user_with_token(name=recipe["build_user"])
info["repository"] = test_utils.create_repo(user=info["owner"], name=recipe["repository_name"])
info["branch"] = test_utils.create_branch(repo=info["repository"], name=branch)
return info
def create_default_recipes(self, recipes_dir, server_type=settings.GITSERVER_GITHUB):
hostname = "github.com"
if server_type == settings.GITSERVER_GITLAB:
hostname = "gitlab.com"
self.recipe_file = self.create_recipe_in_repo("recipe_all.cfg", "recipe.cfg", hostname=hostname)
self.recipe_pr_file = self.create_recipe_in_repo("pr_dep.cfg", "pr_dep.cfg", hostname=hostname)
self.recipe_push_file = self.create_recipe_in_repo("push_dep.cfg", "push_dep.cfg", hostname=hostname)
self.server = test_utils.create_git_server(host_type=server_type)
self.build_user = test_utils.create_user_with_token(name="moosebuild", server=self.server)
self.owner = test_utils.create_user(name="idaholab", server=self.server)
self.repo = test_utils.create_repo(name="civet", user=self.owner)
self.branch = test_utils.create_branch(name="devel", repo=self.repo)
return self.load_recipes(recipes_dir)
def find_recipe_dict(self, filename):
for r in self.get_recipe_dicts():
if r["filename"] == filename:
return r
def get_recipe_dicts(self):
reader = RecipeRepoReader.RecipeRepoReader(settings.RECIPE_BASE_DIR)
return reader.recipes
|
2,838 |
test awaiting retry without scheduled time defaults
|
from uuid import UUID, uuid4
import pendulum
import pydantic
import pytest
from prefect.deprecated.data_documents import DataDocument
from prefect.server.schemas.states import (
AwaitingRetry,
Completed,
Failed,
Late,
Pending,
Retrying,
Running,
Scheduled,
State,
StateDetails,
StateType,
)
class TestState:
def test_state_takes_name_from_type(self):
state = State(type=StateType.RUNNING)
assert state.name == "Running"
def test_state_raises_validation_error_for_invalid_type(self):
with pytest.raises(
pydantic.ValidationError, match="(value is not a valid enumeration member)"
):
State(type="Running")
def test_state_custom_name(self):
state = State(type=StateType.RUNNING, name="My Running State")
assert state.name == "My Running State"
def test_state_default_timestamp(self):
dt = pendulum.now("UTC")
state = State(type=StateType.RUNNING)
assert state.timestamp >= dt
def test_state_copy_does_not_create_insertable_object(self):
dt = pendulum.now("UTC")
state = State(type=StateType.RUNNING, timestamp=dt, id=uuid4())
new_state = state.copy()
# Same UUID
assert new_state.id == state.id
def test_state_copy_with_field_reset_creates_insertable_object(self):
dt = pendulum.now("UTC")
state = State(type=StateType.RUNNING, timestamp=dt, id=uuid4())
new_state = state.copy(reset_fields=True)
# New UUID
assert new_state.id != state.id
assert isinstance(new_state.id, UUID)
# New state timestamp
assert new_state.timestamp >= dt
def test_state_result_warns_and_uses_client_result(self):
state = State(data=DataDocument(encoding="text", blob=b"abc"), type="COMPLETED")
with pytest.warns(DeprecationWarning, match="`result` is no longer supported"):
assert state.result() == "abc"
class TestStateTypeFunctions:
@pytest.mark.parametrize("state_type", StateType)
def test_is_scheduled(self, state_type):
state = State(type=state_type)
assert state.is_scheduled() == (state_type == StateType.SCHEDULED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_pending(self, state_type):
state = State(type=state_type)
assert state.is_pending() == (state_type == StateType.PENDING)
@pytest.mark.parametrize("state_type", StateType)
def test_is_running(self, state_type):
state = State(type=state_type)
assert state.is_running() == (state_type == StateType.RUNNING)
@pytest.mark.parametrize("state_type", StateType)
def test_is_completed(self, state_type):
state = State(type=state_type)
assert state.is_completed() == (state_type == StateType.COMPLETED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_failed(self, state_type):
state = State(type=state_type)
assert state.is_failed() == (state_type == StateType.FAILED)
@pytest.mark.parametrize("state_type", StateType)
def test_is_cancelled(self, state_type):
state = State(type=state_type)
assert state.is_cancelled() == (state_type == StateType.CANCELLED)
class TestStateConvenienceFunctions:
def test_completed(self):
state = Completed()
assert state.type == StateType.COMPLETED
def test_completed_with_custom_attrs(self):
state = Completed(name="my-state", state_details=StateDetails(cache_key="123"))
assert state.name == "my-state"
assert state.state_details.cache_key == "123"
def test_failed(self):
state = Failed()
assert state.type == StateType.FAILED
def test_running(self):
state = Running()
assert state.type == StateType.RUNNING
def test_pending(self):
state = Pending()
assert state.type == StateType.PENDING
def test_scheduled(self):
dt = pendulum.now("UTC")
state = Scheduled(scheduled_time=dt)
assert state.type == StateType.SCHEDULED
assert state.name == "Scheduled"
assert state.state_details.scheduled_time == dt
def test_scheduled_without_scheduled_time_defaults_to_now(self):
dt1 = pendulum.now("UTC")
state = Scheduled()
dt2 = pendulum.now("UTC")
assert dt1 <= state.state_details.scheduled_time <= dt2
def test_scheduled_with_state_details_cant_provide_scheduled_time(self):
dt = pendulum.now("UTC")
with pytest.raises(ValueError, match="(extra scheduled_time)"):
Scheduled(
scheduled_time=dt,
state_details=StateDetails(scheduled_time=dt),
)
def test_awaiting_retry(self):
dt = pendulum.now("UTC")
state = AwaitingRetry(scheduled_time=dt)
assert state.type == StateType.SCHEDULED
assert state.name == "AwaitingRetry"
assert state.state_details.scheduled_time == dt
def METHOD_NAME(self):
dt1 = pendulum.now("UTC")
state = AwaitingRetry()
dt2 = pendulum.now("UTC")
assert dt1 <= state.state_details.scheduled_time <= dt2
def test_late(self):
dt = pendulum.now("UTC")
state = Late(scheduled_time=dt)
assert state.type == StateType.SCHEDULED
assert state.name == "Late"
assert state.state_details.scheduled_time == dt
def test_late_without_scheduled_time_defaults_to_now(self):
dt1 = pendulum.now("UTC")
state = Late()
dt2 = pendulum.now("UTC")
assert dt1 <= state.state_details.scheduled_time <= dt2
def test_retrying(self):
state = Retrying()
assert state.type == StateType.RUNNING
assert state.name == "Retrying"
class TestRepresentation:
async def test_state_str_includes_message(self):
assert str(Failed(message="abc")) == "Failed('abc')"
async def test_state_str_excludes_null_message(self):
assert str(Failed(message=None)) == "Failed()"
async def test_state_str_excludes_null_message_with_name(self):
assert str(Failed(message=None, name="Test")) == "Test(type=FAILED)"
async def test_state_str_includes_type_if_name_is_custom(self):
assert str(Failed(message="abc", name="Foo")) == "Foo('abc', type=FAILED)"
async def test_state_repr_includes_message_and_type_and_result(self):
data = DataDocument(encoding="text", blob=b"abc")
assert (
repr(Completed(message="I'm done", data=data))
== """Completed(message="I'm done", type=COMPLETED, result='abc')"""
)
|
2,839 |
poetry
|
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import pytest
from cleo.io.null_io import NullIO
from cleo.testers.application_tester import ApplicationTester
from cleo.testers.command_tester import CommandTester
from METHOD_NAME.installation import Installer
from METHOD_NAME.utils.env import MockEnv
from tests.helpers import MOCK_DEFAULT_GIT_REVISION
from tests.helpers import PoetryTestApplication
from tests.helpers import TestExecutor
from tests.helpers import mock_clone
if TYPE_CHECKING:
from collections.abc import Iterator
from pathlib import Path
from pytest_mock import MockerFixture
from METHOD_NAME.installation.executor import Executor
from METHOD_NAME.METHOD_NAME import Poetry
from METHOD_NAME.repositories import Repository
from METHOD_NAME.utils.env import Env
from tests.conftest import Config
from tests.types import CommandTesterFactory
from tests.types import FixtureDirGetter
from tests.types import ProjectFactory
@pytest.fixture
def env(tmp_path: Path) -> MockEnv:
path = tmp_path / ".venv"
path.mkdir(parents=True)
return MockEnv(path=path, is_venv=True)
@pytest.fixture(autouse=True)
def setup(
mocker: MockerFixture,
installed: Repository,
config: Config,
env: MockEnv,
) -> Iterator[None]:
# Do not run pip commands of the executor
mocker.patch("poetry.installation.executor.Executor.run_pip")
p = mocker.patch("poetry.installation.installer.Installer._get_installed")
p.return_value = installed
p = mocker.patch(
"poetry.repositories.installed_repository.InstalledRepository.load"
)
p.return_value = installed
# Patch git module to not actually clone projects
mocker.patch("poetry.vcs.git.Git.clone", new=mock_clone)
p = mocker.patch("poetry.vcs.git.Git.get_revision")
p.return_value = MOCK_DEFAULT_GIT_REVISION
# Patch the virtual environment creation do actually do nothing
mocker.patch("poetry.utils.env.EnvManager.create_venv", return_value=env)
# Patch the virtual environment creation do actually do nothing
mocker.patch("poetry.utils.env.EnvManager.create_venv", return_value=env)
# Setting terminal width
environ = dict(os.environ)
os.environ["COLUMNS"] = "80"
yield
os.environ.clear()
os.environ.update(environ)
@pytest.fixture
def project_directory() -> str:
return "simple_project"
@pytest.fixture
def METHOD_NAME(
project_directory: str,
project_factory: ProjectFactory,
fixture_dir: FixtureDirGetter,
) -> Poetry:
return project_factory(name="simple", source=fixture_dir(project_directory))
@pytest.fixture
def app(METHOD_NAME: Poetry) -> PoetryTestApplication:
app_ = PoetryTestApplication(METHOD_NAME)
app_._load_plugins()
return app_
@pytest.fixture
def app_tester(app: PoetryTestApplication) -> ApplicationTester:
return ApplicationTester(app)
@pytest.fixture()
def executor(METHOD_NAME: Poetry, config: Config, env: MockEnv) -> TestExecutor:
return TestExecutor(env, METHOD_NAME.pool, config, NullIO())
@pytest.fixture
def command_tester_factory(
app: PoetryTestApplication, env: MockEnv
) -> CommandTesterFactory:
def _tester(
command: str,
METHOD_NAME: Poetry | None = None,
installer: Installer | None = None,
executor: Executor | None = None,
environment: Env | None = None,
) -> CommandTester:
command_obj = app.find(command)
tester = CommandTester(command_obj)
# Setting the formatter from the application
# TODO: Find a better way to do this in Cleo
app_io = app.create_io()
formatter = app_io.output.formatter
tester.io.output.set_formatter(formatter)
tester.io.error_output.set_formatter(formatter)
if METHOD_NAME:
app._poetry = METHOD_NAME
METHOD_NAME = app.METHOD_NAME
if hasattr(command_obj, "set_env"):
command_obj.set_env(environment or env)
if hasattr(command_obj, "set_installer"):
installer = installer or Installer(
tester.io,
env,
METHOD_NAME.package,
METHOD_NAME.locker,
METHOD_NAME.pool,
METHOD_NAME.config,
executor=executor
or TestExecutor(env, METHOD_NAME.pool, METHOD_NAME.config, tester.io),
)
command_obj.set_installer(installer)
return tester
return _tester
@pytest.fixture
def do_lock(command_tester_factory: CommandTesterFactory, METHOD_NAME: Poetry) -> None:
command_tester_factory("lock").execute()
assert METHOD_NAME.locker.lock.exists()
|
2,840 |
bert embeddings from file
|
import numpy as np
from sentence_transformers import SentenceTransformer
import scipy.sparse
import warnings
from octis.models.contextualized_topic_models.datasets.dataset import CTMDataset
import os
import pickle as pkl
def get_bag_of_words(data, min_length):
"""
Creates the bag of words
"""
vect = [np.bincount(x[x != np.array(None)].astype('int'), minlength=min_length)
for x in data if np.sum(x[x != np.array(None)]) != 0]
vect = scipy.sparse.csr_matrix(vect)
return vect
def METHOD_NAME(text_file, sbert_model_to_load, batch_size=200):
"""
Creates SBERT Embeddings from an input file
"""
model = SentenceTransformer(sbert_model_to_load)
with open(text_file, encoding="utf-8") as filino:
train_text = list(map(lambda x: x, filino.readlines()))
return np.array(model.encode(train_text, show_progress_bar=True, batch_size=batch_size))
def bert_embeddings_from_list(texts, sbert_model_to_load="bert-base-nli-mean-tokens", batch_size=100):
"""
Creates SBERT Embeddings from a list
"""
model = SentenceTransformer(sbert_model_to_load)
return np.array(model.encode(texts, show_progress_bar=True, batch_size=batch_size))
class QuickText:
"""
Integrated class to handle all the text preprocessing needed
"""
def __init__(self, bert_model, text_for_bow, text_for_bert=None, bert_path=None):
"""
:param bert_model: string, bert model to use
:param text_for_bert: list, list of sentences with the unpreprocessed text
:param text_for_bow: list, list of sentences with the preprocessed text
"""
self.vocab_dict = {}
self.vocab = []
self.index_dd = None
self.idx2token = None
self.bow = None
self.bert_model = bert_model
self.text_handler = ""
self.data_bert = None
self.text_for_bow = text_for_bow
if text_for_bert is not None:
self.text_for_bert = text_for_bert
else:
self.text_for_bert = None
self.bert_path = bert_path
def prepare_bow(self):
indptr = [0]
indices = []
data = []
vocabulary = {}
if self.text_for_bow is not None:
docs = self.text_for_bow
else:
docs = self.text_for_bert
for d in docs:
for term in d.split():
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(1)
indptr.append(len(indices))
self.vocab_dict = vocabulary
self.vocab = list(vocabulary.keys())
warnings.simplefilter('always', DeprecationWarning)
if len(self.vocab) > 2000:
warnings.warn("The vocab you are using has more than 2000 words, reconstructing high-dimensional vectors requires"
"significantly more training epochs and training samples. "
"Consider reducing the number of vocabulary items. "
"See https://github.com/MilaNLProc/contextualized-topic-models#preprocessing "
"and https://github.com/MilaNLProc/contextualized-topic-models#tldr", Warning)
self.idx2token = {v: k for (k, v) in self.vocab_dict.items()}
self.bow = scipy.sparse.csr_matrix((data, indices, indptr), dtype=int)
def load_contextualized_embeddings(self, embeddings):
self.data_bert = embeddings
def load_dataset(self):
self.prepare_bow()
if self.bert_path is not None:
if os.path.exists(self.bert_path):
self.data_bert = pkl.load(open(self.bert_path, 'r'))
else:
if self.data_bert is None:
if self.text_for_bert is not None:
self.data_bert = bert_embeddings_from_list(self.text_for_bert, self.bert_model)
else:
self.data_bert = bert_embeddings_from_list(self.text_for_bow, self.bert_model)
pkl.dump(self.data_bert, open(self.bert_path, 'w'))
training_dataset = CTMDataset(self.bow, self.data_bert, self.idx2token)
return training_dataset
class TextHandler:
"""
Class used to handle the text preparation and the BagOfWord
"""
def __init__(self, file_name=None, sentences=None):
self.file_name = file_name
self.sentences = sentences
self.vocab_dict = {}
self.vocab = []
self.index_dd = None
self.idx2token = None
self.bow = None
warnings.simplefilter('always', DeprecationWarning)
if len(self.vocab) > 2000:
warnings.warn("TextHandler class is deprecated and will be removed in version 2.0. Use QuickText.", Warning)
def prepare(self):
indptr = [0]
indices = []
data = []
vocabulary = {}
if self.sentences is None and self.file_name is None:
raise Exception("Sentences and file_names cannot both be none")
if self.sentences is not None:
docs = self.sentences
elif self.file_name is not None:
with open(self.file_name, encoding="utf-8") as filino:
docs = filino.readlines()
else:
raise Exception("One parameter between sentences and file_name should be selected")
for d in docs:
for term in d.split():
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(1)
indptr.append(len(indices))
self.vocab_dict = vocabulary
self.vocab = list(vocabulary.keys())
warnings.simplefilter('always', DeprecationWarning)
if len(self.vocab) > 2000:
warnings.warn("The vocab you are using has more than 2000 words, reconstructing high-dimensional vectors requires"
"significantly more training epochs and training samples. "
"Consider reducing the number of vocabulary items. "
"See https://github.com/MilaNLProc/contextualized-topic-models#preprocessing "
"and https://github.com/MilaNLProc/contextualized-topic-models#tldr", Warning)
self.idx2token = {v: k for (k, v) in self.vocab_dict.items()}
self.bow = scipy.sparse.csr_matrix((data, indices, indptr), dtype=int)
|
2,841 |
test uniq grid3d
|
from unittest import TestCase
import numpy as np
from aspire.basis.basis_utils import (
all_besselj_zeros,
besselj_zeros,
lgwt,
norm_assoc_legendre,
real_sph_harmonic,
sph_bessel,
unique_coords_nd,
)
class BesselTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBesselJZeros(self):
zeros = besselj_zeros(5.39, 10)
self.assertTrue(
np.allclose(
zeros,
[
9.22762357,
12.82884915,
16.21119514,
19.50556547,
22.75392676,
25.97476169,
29.17767578,
32.36821427,
35.54982352,
38.72476688,
],
)
)
def testNumBesselJZeros(self):
n, zeros = all_besselj_zeros(10, 20)
self.assertEqual(2, n)
self.assertTrue(np.allclose(zeros, [14.47550069, 18.43346367]))
def testSphBesselj(self):
r = np.array(
[
0,
0.785398163397448,
1.11072073453959,
1.36034952317566,
1.57079632679490,
1.75620368276018,
1.92382474524280,
2.22144146907918,
2.35619449019235,
2.35619449019235,
2.48364706644903,
2.60487101902358,
2.60487101902358,
2.72069904635133,
2.83179334978474,
2.93869083963475,
2.93869083963475,
3.14159265358979,
]
)
sph = sph_bessel(0, r)
self.assertTrue(
np.allclose(
sph,
[
1.000000000000000,
0.900316316157106,
0.806700467600633,
0.718887065276235,
0.636619772367581,
0.559651051304123,
0.487741916756892,
0.358187786013244,
0.300105438719035,
0.300105438719035,
0.246207521717852,
0.196294306927466,
0.196294306927466,
0.150173255502137,
0.107658809425615,
0.068572188169309,
0.068572188169308,
-0.000000000000000,
],
)
)
def testUniqGrid2d(self):
res = unique_coords_nd(8, 2)
self.assertTrue(
np.allclose(
res["r_unique"],
[
0.0,
0.25,
0.35355,
0.5,
0.55902,
0.70711,
0.75,
0.79057,
0.90139,
1.0,
],
)
)
self.assertEqual(res["ang_unique"].shape, (32,))
def METHOD_NAME(self):
res = unique_coords_nd(8, 3)
self.assertTrue(
np.allclose(
res["r_unique"],
[
0.0,
0.25,
0.35355,
0.43301,
0.5,
0.55902,
0.61237,
0.70711,
0.75,
0.79057,
0.82916,
0.86603,
0.90139,
0.93541,
1.0,
],
)
)
self.assertEqual(res["ang_unique"].shape, (2, 218))
def testNormAssocLegendre(self):
res = norm_assoc_legendre(
j=3, # degree
m=-2, # order (abs(m) <= j)
x=np.array(
[
-1.0,
-0.77777778,
-0.55555556,
-0.33333333,
-0.11111111,
0.11111111,
0.33333333,
0.55555556,
0.77777778,
1.0,
]
),
)
self.assertTrue(
np.allclose(
res,
[
-0.0,
-0.78714574,
-0.98393217,
-0.75903339,
-0.28112348,
0.28112348,
0.75903339,
0.98393217,
0.78714574,
0.0,
],
)
)
def testSphHarmonic(self):
res = real_sph_harmonic(
j=3, # degree
m=-2, # order (abs(m) <= j)
theta=np.array([2.1415, 1.492, 0.213]),
phi=np.array([1.45, 0.213, 4.4234]),
)
self.assertTrue(np.allclose(res, [-0.1322862, 0.04672082, 0.03448817]))
def testLGQuad(self):
resx, resw = lgwt(ndeg=10, a=0.0, b=0.5) # degree # start x # end x
self.assertTrue(
np.allclose(
resx,
[
0.00652337,
0.03373416,
0.08014761,
0.14165115,
0.21278142,
0.28721858,
0.35834885,
0.41985239,
0.46626584,
0.49347663,
],
)
)
self.assertTrue(
np.allclose(
resw,
[
0.01666784,
0.03736284,
0.05477159,
0.06731668,
0.07388106,
0.07388106,
0.06731668,
0.05477159,
0.03736284,
0.01666784,
],
)
)
|
2,842 |
unregister
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import os
import bpy
import io
from xml.etree import ElementTree
from .library import RPRMaterialLibrary
from .loader import RPRXMLMaterialCompiler
from .image_loader import MaterialImageLoader
from rprblender.utils.logging import Log
log = Log(tag="material_library")
# The material library instance, referenced by the material browser properties and material import operator.
rpr_material_library = None
def import_xml_material(material: bpy.types.Material, name: str, xml_path: str, copy_textures: bool):
""" Create RPR material at current material slot using xml.
Nodes tree is cleaned if present, new created otherwise.
New output node added if no output found.
Change Blender material name to material library name.
Copy textures locally if requested."""
def clean_material_tree():
""" Remove every node from material tree except for output """
log("Cleaning material nodes tree")
material.node_tree.nodes.clear()
def create_material() -> bpy.types.Material:
""" Create new material and assign to current empty material slot, create slot if none found """
if not bpy.context.object.material_slots.keys():
bpy.ops.object.material_slot_add()
# 2. create material for it
new_material = bpy.data.materials.new(name=name)
# 3. assign material to material slot
bpy.context.object.material_slots[bpy.context.object.active_material_index].material = new_material
new_material.use_nodes = True
return new_material
def create_output_node() -> bpy.types.ShaderNode:
""" Create and return new output node """
output_node = material.node_tree.nodes.new('ShaderNodeOutputMaterial')
log("New output node is {}".format(output_node))
return output_node
if not material:
log("No material tree found, creating new material")
material = create_material()
else:
material.name = name
# overwrite existing nodes tree
clean_material_tree()
output = create_output_node()
root_folder = rpr_material_library.path
material_folder = os.path.dirname(xml_path)
# create images loader
image_loader = MaterialImageLoader(root_folder, material_folder, copy_textures)
# create material by xml
closure = compile_material_from_xml(xml_path, material.node_tree, image_loader)
# Link closure to output node
if closure:
log("Linking closure {} to active output {}".format(closure, output))
material.node_tree.links.new(closure.outputs[0], output.inputs[0])
def iter_materials(root):
for material in root.iter(tag='material'):
material_name = material.get('name')
yield material_name, {node.get('name'): node for node in material.iter(tag='node')}
def compile_material_from_xml(xml_path: str, node_tree, image_loader):
if not xml_path or not os.path.isfile(xml_path):
log.error("Unable to find material xml file '{}'".format(xml_path))
return None
# load material xml
with open(xml_path) as data_file:
if not data_file:
log.error("Unable to open material xml file '{}'".format(xml_path))
return None
else:
xml_tree = ElementTree.parse(io.StringIO(data_file.read()))
materials = [mat for mat in xml_tree.getroot().iter(tag='material')]
if not materials:
log.error("Unable to find material in '{}'".format(xml_path))
return None
# read first material info
material_name = materials[0].get('name')
# Material Library 2.0 uses closure_node to point at output node. For 1.0 use material name.
closure_name = materials[0].get('closure_node', '')
nodes = {node.get('name'): node for node in materials[0].iter(tag='node')}
if closure_name is None:
# MaterialLibrary 1.0 uses the material name for root node name
root_node = nodes.get(material_name)
else:
# MaterialLibrary 2.0 uses attribute "closure_node" to define at root node
root_node = nodes.get(closure_name)
return RPRXMLMaterialCompiler(nodes, node_tree, image_loader).compile(root_node)
def register():
log('material_browser.register')
global rpr_material_library
rpr_material_library = RPRMaterialLibrary()
def METHOD_NAME():
log('material_browser.unregister')
global rpr_material_library
rpr_material_library.clean_up()
rpr_material_library = None
|
2,843 |
get counter update
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of perseo-fe
#
# perseo-fe is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# perseo-fe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with perseo-fe.
# If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License
# please contact with:
# iot_support at tid.es
#
__author__ = 'Jon Calderín Goñi <[email protected]>'
import requests
class Mock(object):
def __init__(self, host, http_port):
"""
Set the host and the port where the mock is started
:param host:
:param http_port:
:return:
"""
base_url = 'http://{host}:{http_port}'.format(host=host, http_port=http_port)
self.mail_url = '{base_url}/get/email'.format(base_url=base_url)
self.sms_url = '{base_url}/get/sms'.format(base_url=base_url)
self.update_url = '{base_url}/get/update'.format(base_url=base_url)
self.post_url = '{base_url}/get/post'.format(base_url=base_url)
self.mail_counter_url = '{base_url}/counter/email'.format(base_url=base_url)
self.sms_counter_url = '{base_url}/counter/sms'.format(base_url=base_url)
self.update_counter_url = '{base_url}/counter/update'.format(base_url=base_url)
self.post_counter_url = '{base_url}/counter/post'.format(base_url=base_url)
self.reset_mails_url = '{base_url}/reset/email'.format(base_url=base_url)
self.reset_sms_url = '{base_url}/reset/sms'.format(base_url=base_url)
self.reset_update_url = '{base_url}/reset/update'.format(base_url=base_url)
self.reset_post_url = '{base_url}/reset/post'.format(base_url=base_url)
def get_mails(self):
"""
Get all the mails sent from the mock
:return:
"""
return requests.request('get', self.mail_url)
def get_sms(self):
"""
Get all smss sent from the mock
:return:
"""
return requests.request('get', self.sms_url)
def get_update(self):
"""
Get all updates sent from the mock
:return:
"""
return requests.request('get', self.update_url)
def get_post(self):
"""
Get all posts sent from the mock
:return:
"""
return requests.request('get', self.post_url)
def get_counter_mails(self):
"""
Get the counter of the mails from the mock
:return:
"""
return requests.request('get', self.mail_counter_url)
def get_counter_post(self):
"""
Get the counter of the posts from the mock
:return:
"""
return requests.request('get', self.post_counter_url)
def METHOD_NAME(self):
"""
Get the counter of the updates from the mock
:return:
"""
return requests.request('get', self.update_counter_url)
def get_counter_sms(self):
"""
Get the counter of the sms from the mock
:return:
"""
return requests.request('get', self.sms_counter_url)
def reset_mails(self):
"""
Reset the mails and the counter mails in the emock
:return:
"""
return requests.request('put', self.reset_mails_url)
def reset_post(self):
"""
Reset the posts and the counter posts in the mock
:return:
"""
return requests.request('put', self.reset_post_url)
def reset_update(self):
"""
Reset the updates and the counter updates in the mock
:return:
"""
return requests.request('put', self.reset_update_url)
def reset_sms(self):
"""
Reste the smss and the counter smss in the mock
:return:
"""
return requests.request('put', self.reset_sms_url
|
2,844 |
get
|
import re
import os
from codalab.common import precondition, UsageError
INSTANCE_SEPARATOR = "::"
WORKSHEET_SEPARATOR = "//"
TARGET_KEY_REGEX = r"(?<=^)(?:([^:]*?)\:(?!:))?(.*(?=$))"
TARGET_REGEX = r"(?<=^)(?:(.*?)\:\:)?(?:(.*?)\/\/)?(.+?)(?:\/(.*?))?(?=$)"
# Formatting Constants
ADDRESS_SPEC_FORMAT = "(<alias>|<address>)"
BASIC_SPEC_FORMAT = '(<uuid>|<name>)'
BASIC_BUNDLE_SPEC_FORMAT = '(<uuid>|<name>|^<index>)'
GLOBAL_SPEC_FORMAT = "[%s%s]%s" % (ADDRESS_SPEC_FORMAT, INSTANCE_SEPARATOR, BASIC_SPEC_FORMAT)
WORKSHEET_SPEC_FORMAT = GLOBAL_SPEC_FORMAT
BUNDLE_SPEC_FORMAT = '[%s%s]%s' % (
WORKSHEET_SPEC_FORMAT,
WORKSHEET_SEPARATOR,
BASIC_BUNDLE_SPEC_FORMAT,
)
BUNDLES_URL_SEPARATOR = '/bundles/'
WORKSHEETS_URL_SEPARATOR = '/worksheets/'
TARGET_SPEC_FORMAT = '%s[%s<subpath within bundle>]' % (BUNDLE_SPEC_FORMAT, os.sep)
RUN_TARGET_SPEC_FORMAT = '[<key>]:' + TARGET_SPEC_FORMAT
MAKE_TARGET_SPEC_FORMAT = '[<key>:]' + TARGET_SPEC_FORMAT
GROUP_SPEC_FORMAT = '(<uuid>|<name>|public)'
PERMISSION_SPEC_FORMAT = '((n)one|(r)ead|(a)ll)'
UUID_POST_FUNC = '[0:8]' # Only keep first 8 characters
def nested_dict_get(obj, *args, **kwargs):
"""
Get a value from a nested dictionary.
Cleans up calls that look lke this:
bundle_info.get('owner', {}).get('user_name', None)
And turns them into:
safe_get(bundle_info, 'owner', 'user_name')
:param obj: dict-like object to 'get' value from
:param args: variable list of nested keys
:param kwargs: supports the kwarg 'default' to specify the default value to
return if any of the keys don't exist. (default is None)
Any other kwarg will raise an exception.
:return: retrieved value or default if it doesn't exist
"""
default = kwargs.pop('default', None)
precondition(not kwargs, 'unsupported kwargs %s' % list(kwargs.keys()))
try:
for arg in args:
obj = obj[arg]
return obj
except (KeyError, TypeError):
return default
def parse_key_target(spec):
"""
Parses a keyed target spec into its key and the rest of the target spec.
Raise UsageError when the value of the spec is empty.
:param spec: a target spec in the form of
[[<key>]:][<instance>::][<worksheet_spec>//]<bundle_spec>[/<subpath>]
where <bundle_spec> is required and the rest are optional.
:return: a tuple of the following in that order:
- <key>: (<key> if present,
empty string if ':' in spec but no <key>,
None otherwise)
- <value> (where value is everything after a <key>: (or everything if no key specified)
"""
match = re.match(TARGET_KEY_REGEX, spec)
key, value = match.groups()
# This check covers three usage errors:
# 1. both key and value are empty, e.g. "cl run : 'echo a'"
# 2. key is not empty, value is empty, e.g. "cl run a.txt: 'echo a'"
if value == '':
raise UsageError(
'target_spec (%s) in wrong format. Please provide a valid target_spec in the format of %s.'
% (spec, RUN_TARGET_SPEC_FORMAT)
)
return (key, value)
def parse_target_spec(spec):
"""
Parses a (non-keyed) target spec into its components
:param spec: a target spec in the form of
[<instance>::][<worksheet_spec>//]<bundle_spec>[/<subpath>]
where <bundle_spec> is required and the rest are optional.
:return: a tuple of the following in that order:
- <instance>
- <worksheet_spec>
- <bundle_spec>
- <subpath>
"""
match = re.match(TARGET_REGEX, spec)
return match.groups() if match else (None, None, None, None)
def desugar_command(orig_target_spec, command):
"""
Desugar command, returning mutated target_spec and command.
Examples:
- %a.txt% => [b1:a.txt], b1
- %:a.txt% => [:a.txt], a.txt (implicit key is a.txt)
- %instance::ws//a.txt% => [b1:instance::ws//a.txt], b1
- %corenlp%/run %a.txt% => [b1:corenlp, b2:a.txt], b1/run b2
- %:word-vectors//glove.6B%/vector.txt =>
[glove.6B/vector.txt:word-vectors//glove.6B/vector.txt], glove.6B/vector.txt
"""
# If key is not specified, use b1, b2, b3 by default.
pattern = re.compile('^([^%]*)%([^%]+)%(.*)$')
buf = '' # Build up the modified command
key2val = {} # e.g., b1 => a.txt
val2key = {} # e.g., a.txt => b1 (use first key)
def METHOD_NAME(dep): # Return the key
key, val = parse_key_target(dep)
if key == '':
# key only matches empty string if ':' present
_, _, bundle, subpath = parse_target_spec(val)
key = subpath if subpath is not None else bundle
elif key is None:
# key only returns None if ':' not present in original spec
key = val2key[val] if val in val2key else 'b' + str(len(target_spec) + 1)
if val not in val2key:
val2key[val] = key
if key in key2val:
if key2val[key] != val:
raise UsageError(
'key %s exists with multiple values: %s and %s' % (key, key2val[key], val)
)
else:
key2val[key] = val
target_spec.append(key + ':' + val)
return key
target_spec = []
for spec in orig_target_spec:
METHOD_NAME(spec)
while True:
match = pattern.match(command)
if not match:
break
buf += match.group(1) + METHOD_NAME(match.group(2))
command = match.group(3)
return (target_spec, buf + command)
|
2,845 |
test basic execution group
|
"""Unit tests for the testplan.testing.multitest.suite module."""
import re
import pytest
from testplan.common.utils.exceptions import should_raise
from testplan.common.utils.interface import MethodSignatureMismatch
from testplan.common.utils.strings import format_description
from testplan.testing.multitest import suite
from testplan.testing import tagging
@suite.testsuite
class MySuite1:
def pre_testcase(self, name, env, result):
pass
def post_testcase(self, name, env, result):
pass
@suite.testcase
def case1(self, env, result):
pass
@suite.skip_if(lambda testsuite: True)
@suite.testcase
def case2(self, env, result):
pass
@suite.testcase
def case3(self, env, result):
pass
@suite.testsuite(tags="A")
class MySuite2:
@suite.testcase(tags="B")
def case1(self, env, result):
pass
@suite.skip_if(lambda testsuite: True)
@suite.testcase(tags={"c": "C"})
def case2(self, env, result):
pass
@suite.testcase(tags={"d": ["D1", "D2"]})
def case3(self, env, result):
pass
@suite.testsuite
class MySuite3:
@suite.testcase(parameters=(1, 2, 3))
def case(self, env, result, param):
pass
@suite.testsuite
class MySuite4:
@suite.testcase(execution_group="group_0")
def case1(self, env, result):
pass
@suite.testcase(execution_group="group_1")
def case2(self, env, result):
pass
@suite.testcase(execution_group="group_0")
def case3(self, env, result):
pass
@suite.testcase(execution_group="group_1")
def case4(self, env, result):
pass
@suite.testcase(parameters=(1, 2, 3), execution_group="group_parallel")
def case(self, env, result, param):
pass
def skip_func(testsuite): # pylint: disable=unused-argument
return True
@suite.skip_if_testcase(skip_func)
@suite.testsuite(name="Skipped Suite")
class MySuite5:
@suite.testcase
def case1(self, env, result):
result.equal(1, 2)
@suite.skip_if(skip_func, lambda testsuite: False)
@suite.testcase
def case2(self, env, result):
result.equal(1, 1)
def test_basic_suites():
mysuite = MySuite1()
cases = ("case1", "case2", "case3")
assert tuple(mysuite.__testcases__) == cases
assert "pre_testcase" not in mysuite.__testcases__
assert "post_testcase" not in mysuite.__testcases__
for method in suite.get_testcase_methods(MySuite1):
assert method.__name__ in cases
assert callable(method)
for method in mysuite.get_testcases():
assert method.__name__ in cases
assert callable(method)
def test_basic_suite_tags():
mysuite = MySuite2()
assert mysuite.__tags__ == {"simple": {"A"}}
case_dict = {
"case1": {"simple": {"B"}},
"case2": {"c": {"C"}},
"case3": {"d": {"D2", "D1"}},
}
for method in mysuite.get_testcases():
assert method.__tags__ == case_dict[method.__name__]
assert method.__tags_index__ == tagging.merge_tag_dicts(
case_dict[method.__name__], mysuite.__tags__
)
def test_basic_parametrization():
mysuite = MySuite3()
cases = ("case__param_1", "case__param_2", "case__param_3")
assert tuple(mysuite.__testcases__) == cases
for method in mysuite.get_testcases():
assert method.__name__ in cases
assert callable(method)
def METHOD_NAME():
mysuite = MySuite4()
for i, method in enumerate(mysuite.get_testcases()):
if method.__name__.startswith("case__"):
assert method.execution_group == "group_parallel"
else:
assert method.execution_group == "group_{}".format(i % 2)
def test_skip_if_predicates():
mysuite = MySuite1()
assert len(getattr(mysuite, "case2").__skip__) == 1
assert getattr(mysuite, "case2").__skip__[0](mysuite)
mysuite = MySuite5()
assert len(getattr(mysuite, "case1").__skip__) == 1
assert len(getattr(mysuite, "case2").__skip__) == 3
# ``skip_func`` is added to ``MySuite5.__skip__`` twice
assert (
getattr(mysuite, "case2").__skip__[0]
== getattr(mysuite, "case2").__skip__[2]
)
assert getattr(mysuite, "case1").__skip__[0](mysuite)
assert getattr(mysuite, "case2").__skip__[0](mysuite)
assert not getattr(mysuite, "case2").__skip__[1](mysuite)
def incorrect_case_signature1():
@suite.testsuite
class _:
@suite.testcase
def case1(self, envs, result):
pass
def incorrect_case_signature2():
@suite.testsuite
class _:
@suite.testcase
def case1(self, env, results):
pass
def test_testcase_signature():
pattern = re.compile(
(
r".*Expected arguments for case1 are \['self', 'env', 'result'\], "
r"not \['self', 'envs', 'result'\].*"
)
)
should_raise(
MethodSignatureMismatch, incorrect_case_signature1, pattern=pattern
)
pattern = re.compile(
(
r".*Expected arguments for case1 are \['self', 'env', 'result'\], "
r"not \['self', 'env', 'results'\].*"
)
)
should_raise(
MethodSignatureMismatch, incorrect_case_signature2, pattern=pattern
)
def incorrent_skip_if_signature1():
@suite.testsuite
class _:
@suite.skip_if(lambda _: True)
@suite.testcase
def case1(self, env, result):
pass
def test_skip_if_signature():
pattern = re.compile(
r".*Expected arguments for <lambda> are \['testsuite'\], not \['_'\].*"
)
try:
should_raise(
MethodSignatureMismatch,
incorrent_skip_if_signature1,
pattern=pattern,
)
finally:
# Reset the global __TESTCASES__ list so that it doesn't contain a
# "case1" entry.
suite.__TESTCASES__ = []
@pytest.mark.parametrize(
"text,expected",
(
("", ""),
("foo", "foo"),
(" foo", "foo"),
("foo", "foo"),
(" foo \n bar\n\n", " foo\n bar"),
("\t\tfoo \n bar\n\n", " foo\n bar"),
(" foo\n bar\n\n", " foo\nbar"),
),
)
def test_format_description(text, expected):
format_description(text) == expected
|
2,846 |
from 3 points
|
# bluemira is an integrated inter-disciplinary design tool for future fusion
# reactors. It incorporates several modules, some of which rely on other
# codes, to carry out a range of typical conceptual fusion reactor design
# activities.
#
# Copyright (C) 2021-2023 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh,
# J. Morris, D. Short
#
# bluemira is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# bluemira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with bluemira; if not, see <https://www.gnu.org/licenses/>.
"""
Wrapper for FreeCAD Plane objects
"""
from __future__ import annotations
import copy
from typing import TYPE_CHECKING, Iterable, Optional, Tuple
import numpy as np
if TYPE_CHECKING:
from bluemira.geometry.placement import BluemiraPlacement
import bluemira.codes._freecadapi as cadapi
from bluemira.geometry.constants import VERY_BIG
from bluemira.geometry.face import BluemiraFace
__all__ = ["BluemiraPlane"]
class BluemiraPlane:
"""
Bluemira Plane class.
Parameters
----------
base:
Plane reference point
axis:
normal vector dto the plane
label:
Label of the plane
"""
def __init__(
self,
base: Tuple[float, float, float] = (0.0, 0.0, 0.0),
axis: Tuple[float, float, float] = (0.0, 0.0, 1.0),
label: str = "",
):
if np.allclose(np.array(axis), np.array([0, 0, 0])):
raise ValueError("Axis must to be a vector with non zero norm.")
self._shape = cadapi.make_plane(base, axis)
self.label = label
@classmethod
def METHOD_NAME(
cls,
point_1: Iterable[float],
point_2: Iterable[float],
point_3: Iterable[float],
label: str = "",
):
"""
Instantiate a BluemiraPlane from three points.
Parameters
----------
point_1:
First point
point_2:
Second Point
point_3:
Third point
label:
Label of the plane
"""
plane = BluemiraPlane()
plane._shape = cadapi.make_plane_from_3_points(point_1, point_2, point_3)
plane.label = label
return plane
@property
def base(self) -> np.ndarray:
"""Plane's reference point"""
return cadapi.vector_to_numpy(self._shape.Position)
@base.setter
def base(self, value: Iterable[float]):
"""
Set a new plane base
Parameters
----------
value:
Base vector
"""
self._shape.Position = cadapi.Base.Vector(value)
@property
def axis(self) -> np.ndarray:
"""Plane's normal vector"""
return cadapi.vector_to_numpy(self._shape.Axis)
@axis.setter
def axis(self, value: Iterable[float]):
"""
Set a new plane axis
Parameters
----------
value:
Axis vector
"""
self._shape.Axis = cadapi.Base.Vector(value)
def move(self, vector: Iterable[float]):
"""Moves the Plane along the given vector"""
self.base = self.base + np.array(vector)
def __repr__(self) -> str:
"""
Plane __repr__
"""
return (
f"([{type(self).__name__}] = Label: {self.label},"
f" base: {self.base},"
f" axis: {self.axis})"
)
def copy(self, label: Optional[str] = None):
"""
Make a copy of the BluemiraGeo.
"""
plane_copy = copy.copy(self)
if label is not None:
plane_copy.label = label
else:
plane_copy.label = self.label
return plane_copy
def deepcopy(self, label: Optional[str] = None):
"""Make a deepcopy of the BluemiraPlane"""
plane_copy = BluemiraPlane(self.base, self.axis)
if label is not None:
plane_copy.label = label
else:
plane_copy.label = self.label
return plane_copy
def to_face(
self, width: float = VERY_BIG, height: float = VERY_BIG, label: str = ""
) -> BluemiraFace:
"""
Convert the plane to a face with dimension (width, height) and centred into
the plane base position.
"""
face = cadapi.face_from_plane(self._shape, width, height)
bmface = BluemiraFace._create(face, label)
return bmface
def to_placement(self) -> BluemiraPlacement:
"""
Convert the plane into a placement
"""
from bluemira.geometry.placement import BluemiraPlacement
return BluemiraPlacement._create(cadapi.placement_from_plane(self._shape))
|
2,847 |
create context menu
|
from PyQt5.QtCore import QItemSelection, pyqtSlot
from PyQt5.QtCore import pyqtSignal, QItemSelectionModel, Qt
from PyQt5.QtGui import QContextMenuEvent, QDropEvent, QIcon
from PyQt5.QtWidgets import QTreeView, QAbstractItemView, QMenu
from urh.models.ProtocolTreeModel import ProtocolTreeModel
class ProtocolTreeView(QTreeView):
create_new_group_clicked = pyqtSignal()
selection_changed = pyqtSignal()
files_dropped_on_group = pyqtSignal(list, int)
close_wanted = pyqtSignal(list)
def __init__(self, parent=None):
super().__init__(parent)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.move_to_group_actions = {}
self.context_menu_pos = None
def model(self) -> ProtocolTreeModel:
return super().model()
def selectionModel(self) -> QItemSelectionModel:
return super().selectionModel()
def METHOD_NAME(self):
menu = QMenu()
new_group_action = menu.addAction(self.tr("Create a new group"))
new_group_action.setIcon(QIcon.fromTheme("list-add"))
new_group_action.triggered.connect(self.on_new_group_action_triggered)
item = self.model().getItem(self.indexAt(self.context_menu_pos))
selected_items = [self.model().getItem(index) for index in self.selectionModel().selectedIndexes()]
selected_protocols = [item.protocol for item in selected_items if not item.is_group]
self.move_to_group_actions.clear()
if item.is_group:
delete_group_action = menu.addAction(self.tr("Delete group"))
delete_group_action.setIcon(QIcon.fromTheme("list-remove"))
delete_group_action.triggered.connect(self.on_delete_group_action_triggered)
elif item != self.model().rootItem:
tree_items = self.model().protocol_tree_items
other_groups = [i for i in tree_items.keys() if item not in tree_items[i]]
if len(selected_protocols) > 0:
menu.addSeparator()
close_action = menu.addAction(self.tr("Close"))
close_action.setIcon(QIcon.fromTheme("window-close"))
close_action.triggered.connect(self.on_close_action_triggered)
if len(other_groups) > 0:
move_to_group_menu = menu.addMenu("Move to Group")
for i in other_groups:
group_name = self.model().rootItem.child(i).data()
move_to_group_action = move_to_group_menu.addAction(group_name)
move_to_group_action.triggered.connect(self.on_move_to_group_action_triggered)
self.move_to_group_actions[move_to_group_action] = i
if item != self.model().rootItem:
menu.addSeparator()
sort_group_elements_action = menu.addAction("Sort Group Elements")
sort_group_elements_action.setIcon(QIcon.fromTheme("view-sort-ascending"))
sort_group_elements_action.triggered.connect(self.on_sort_group_elements_action_triggered)
return menu
def contextMenuEvent(self, event: QContextMenuEvent):
self.context_menu_pos = event.pos()
menu = self.METHOD_NAME()
menu.exec(self.mapToGlobal(event.pos()))
self.context_menu_pos = None
def selectionChanged(self, selection1: QItemSelection, selection2: QItemSelection):
self.selection_changed.emit()
super().selectionChanged(selection1, selection2)
def dropEvent(self, event: QDropEvent):
if len(event.mimeData().urls()) > 0:
group_id = self.model().get_group_id_for_index(self.indexAt(event.pos()))
self.files_dropped_on_group.emit(event.mimeData().urls(), group_id)
else:
super().dropEvent(event)
@pyqtSlot()
def on_new_group_action_triggered(self):
self.model().addGroup()
self.model().update()
@pyqtSlot()
def on_move_to_group_action_triggered(self):
selected_items = [self.model().getItem(index) for index in self.selectionModel().selectedIndexes()]
i = self.move_to_group_actions[self.sender()]
self.model().move_to_group(selected_items, i)
@pyqtSlot()
def on_close_action_triggered(self):
selected_items = [self.model().getItem(index) for index in self.selectionModel().selectedIndexes()]
selected_protocols = [item.protocol for item in selected_items if not item.is_group]
self.close_wanted.emit(selected_protocols)
@pyqtSlot()
def on_delete_group_action_triggered(self):
item = self.model().getItem(self.indexAt(self.context_menu_pos))
self.model().delete_group(item)
@pyqtSlot()
def on_sort_group_elements_action_triggered(self):
item = self.model().getItem(self.indexAt(self.context_menu_pos))
if item.is_group:
sortgroup_id = self.model().rootItem.index_of(item)
else:
sortgroup_id = self.model().rootItem.index_of(item.parent())
self.model().sort_group(sortgroup_id)
|
2,848 |
test initial view
|
import pytest
import os
import numpy as np
from PySide6.QtWidgets import QPushButton
from PySide6.QtGui import QColor
from src.constants import DEFAULT_WINDOW_SIZE
from src.Model.PTCTDictContainer import PTCTDictContainer
from src.Model.CalculateImages import convert_pt_to_heatmap
from src.Controller.GUIController import MainWindow
from src.Model.PatientDictContainer import PatientDictContainer
from src.View.ImageLoader import ImageLoading
from pydicom import dcmread
from pydicom.errors import InvalidDicomError
from pathlib import Path
def get_dicom_files(directory):
"""
Function to find DICOM files in a given folder.
:param directory: File path of folder to search.
:return: List of file paths of DICOM files in given folder.
"""
dicom_files = []
# Walk through directory
for root, dirs, files in os.walk(directory, topdown=True):
for name in files:
# Attempt to open file as a DICOM file
try:
dcmread(os.path.join(root, name))
except (InvalidDicomError, FileNotFoundError):
pass
else:
dicom_files.append(os.path.join(root, name))
return dicom_files
class TestPETCT:
"""
Class to set up the OnkoDICOM main window for testing the structures tab.
"""
__test__ = False
def __init__(self):
# Load test DICOM files and set path variable
path = Path.cwd().joinpath('test', 'testdata')
files = get_dicom_files(path) # list of DICOM test files
file_path = os.path.dirname(os.path.commonprefix(files))
read_data_dict, file_names_dict = ImageLoading.get_datasets(files)
# Create patient dict container object
patient_dict_container = PatientDictContainer()
patient_dict_container.clear()
patient_dict_container.set_initial_values(
file_path, read_data_dict, file_names_dict)
# Set additional attributes in patient dict container
# This prevents crashes
if "rtss" in file_names_dict:
dataset_rtss = dcmread(file_names_dict['rtss'])
self.rois = ImageLoading.get_roi_info(dataset_rtss)
patient_dict_container.set("rois", self.rois)
# Open the main window
self.main_window = MainWindow()
self.main_window.right_panel.setCurrentWidget(
self.main_window.pet_ct_tab)
self.pet_ct = self.main_window.pet_ct_tab
@pytest.fixture
def test_obj(qtbot):
"""
Testing Object
:return:
"""
pet_ct = TestPETCT()
return pet_ct
def METHOD_NAME(test_obj):
"""
Tests the initial values are set properly
"""
assert not test_obj.pet_ct.initialised
assert isinstance(test_obj.pet_ct.load_pet_ct_button, QPushButton)
test_pc_dict_container = PTCTDictContainer()
assert test_pc_dict_container.is_empty()
def test_color_image():
"""
Tests the heat map function maps correctly
"""
# Generate two images that mirror each other
color_1 = convert_pt_to_heatmap(grey_image(False))
color_2 = convert_pt_to_heatmap(grey_image(True))
# Check values map to their mirrored values
for i in range(DEFAULT_WINDOW_SIZE):
for j in range(DEFAULT_WINDOW_SIZE):
assert color_1.pixel(i, j) \
== color_2.pixel(
DEFAULT_WINDOW_SIZE-i-1, DEFAULT_WINDOW_SIZE-j-1)
# Test end values are black and white
for k in range(DEFAULT_WINDOW_SIZE):
assert QColor(color_1.pixel(0, k)).getRgb() \
== QColor(color_2.pixel(DEFAULT_WINDOW_SIZE-1, k)).getRgb() \
== (0, 0, 0, 255)
assert QColor(color_1.pixel(DEFAULT_WINDOW_SIZE-1, k)).getRgb() \
== QColor(color_2.pixel(0, k)).getRgb() \
== (255, 255, 255, 255)
def grey_image(flipped):
"""
generates one greyscale image with dimensions
DEFAULT_WINDOW_SIZE x DEFAULT_WINDOW_SIZE
"""
if flipped:
tmp = np.linspace(255, 0, DEFAULT_WINDOW_SIZE)
else:
tmp = np.linspace(0, 255, DEFAULT_WINDOW_SIZE)
tmp_2 = np.stack(DEFAULT_WINDOW_SIZE * (tmp,))
return tmp_2
|
2,849 |
extend with default
|
#!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import uuid
import json
import jsonschema
from gns3server.schemas.cloud_template import CLOUD_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.ethernet_switch_template import ETHERNET_SWITCH_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.ethernet_hub_template import ETHERNET_HUB_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.docker_template import DOCKER_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.vpcs_template import VPCS_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.traceng_template import TRACENG_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.virtualbox_template import VIRTUALBOX_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.vmware_template import VMWARE_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.iou_template import IOU_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.qemu_template import QEMU_TEMPLATE_OBJECT_SCHEMA
from gns3server.schemas.dynamips_template import (
DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C7200_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C3745_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C3725_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C3600_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C2691_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C2600_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
C1700_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA
)
import logging
log = logging.getLogger(__name__)
# Add default values for missing entries in a request, largely taken from jsonschema documentation example
# https://python-jsonschema.readthedocs.io/en/latest/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance
def METHOD_NAME(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
if jsonschema.Draft4Validator(schema).is_valid(instance):
# only add default for the matching sub-schema (e.g. when using 'oneOf')
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(validator, properties, instance, schema,):
yield error
return jsonschema.validators.extend(
validator_class, {"properties" : set_defaults},
)
ValidatorWithDefaults = METHOD_NAME(jsonschema.Draft4Validator)
ID_TO_CATEGORY = {
3: "firewall",
2: "guest",
1: "switch",
0: "router"
}
TEMPLATE_TYPE_TO_SHEMA = {
"cloud": CLOUD_TEMPLATE_OBJECT_SCHEMA,
"ethernet_hub": ETHERNET_HUB_TEMPLATE_OBJECT_SCHEMA,
"ethernet_switch": ETHERNET_SWITCH_TEMPLATE_OBJECT_SCHEMA,
"docker": DOCKER_TEMPLATE_OBJECT_SCHEMA,
"dynamips": DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"vpcs": VPCS_TEMPLATE_OBJECT_SCHEMA,
"traceng": TRACENG_TEMPLATE_OBJECT_SCHEMA,
"virtualbox": VIRTUALBOX_TEMPLATE_OBJECT_SCHEMA,
"vmware": VMWARE_TEMPLATE_OBJECT_SCHEMA,
"iou": IOU_TEMPLATE_OBJECT_SCHEMA,
"qemu": QEMU_TEMPLATE_OBJECT_SCHEMA
}
DYNAMIPS_PLATFORM_TO_SHEMA = {
"c7200": C7200_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c3745": C3745_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c3725": C3725_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c3600": C3600_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c2691": C2691_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c2600": C2600_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA,
"c1700": C1700_DYNAMIPS_TEMPLATE_OBJECT_SCHEMA
}
class Template:
def __init__(self, template_id, settings, builtin=False):
if template_id is None:
self._id = str(uuid.uuid4())
elif isinstance(template_id, uuid.UUID):
self._id = str(template_id)
else:
self._id = template_id
self._settings = copy.deepcopy(settings)
# Version of the gui before 2.1 use linked_base
# and the server linked_clone
if "linked_base" in self.settings:
linked_base = self._settings.pop("linked_base")
if "linked_clone" not in self._settings:
self._settings["linked_clone"] = linked_base
# Convert old GUI category to text category
try:
self._settings["category"] = ID_TO_CATEGORY[self._settings["category"]]
except KeyError:
pass
# The "server" setting has been replaced by "compute_id" setting in version 2.2
if "server" in self._settings:
self._settings["compute_id"] = self._settings.pop("server")
# The "node_type" setting has been replaced by "template_type" setting in version 2.2
if "node_type" in self._settings:
self._settings["template_type"] = self._settings.pop("node_type")
# Remove an old IOU setting
if self._settings["template_type"] == "iou" and "image" in self._settings:
del self._settings["image"]
self._builtin = builtin
if builtin is False:
self.validate_and_apply_defaults(TEMPLATE_TYPE_TO_SHEMA[self.template_type])
if self.template_type == "dynamips":
# special case for Dynamips to cover all platform types that contain specific settings
self.validate_and_apply_defaults(DYNAMIPS_PLATFORM_TO_SHEMA[self._settings["platform"]])
log.debug('Template "{name}" [{id}] loaded'.format(name=self.name, id=self._id))
@property
def id(self):
return self._id
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, settings):
self._settings.update(settings)
@property
def name(self):
return self._settings["name"]
@property
def compute_id(self):
return self._settings["compute_id"]
@property
def template_type(self):
return self._settings["template_type"]
@property
def builtin(self):
return self._builtin
def update(self, **kwargs):
from gns3server.controller import Controller
controller = Controller.instance()
Controller.instance().check_can_write_config()
self._settings.update(kwargs)
controller.notification.controller_emit("template.updated", self.__json__())
controller.save()
def validate_and_apply_defaults(self, schema):
validator = ValidatorWithDefaults(schema)
try:
validator.validate(self.__json__())
except jsonschema.ValidationError as e:
message = "JSON schema error {}".format(e.message)
log.error(message)
log.debug("Input schema: {}".format(json.dumps(schema)))
raise
def __json__(self):
"""
Template settings.
"""
settings = self._settings
settings.update({"template_id": self._id,
"builtin": self.builtin})
if self.builtin:
# builin templates have compute_id set to None to tell clients
# to select a compute
settings["compute_id"] = None
else:
settings["compute_id"] = self.compute_id
return settings
|
2,850 |
test slowfast backbone
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmaction.models import FeatureHead
from mmaction.registry import MODELS
from mmaction.structures import ActionDataSample
from mmaction.testing import get_recognizer_cfg
from mmaction.utils import register_all_modules
class TestFeatureHead(TestCase):
def test_2d_recognizer(self):
register_all_modules()
config = get_recognizer_cfg(
'tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py' # noqa: E501
)
config.model['backbone']['pretrained'] = None
config.model['cls_head'] = dict(
type='FeatureHead', average_clips='score')
recognizer = MODELS.build(config.model)
input_shape = [3, 3, 32, 32]
data_batch = {
'inputs': [torch.randint(0, 256, input_shape)],
'data_samples': [ActionDataSample().set_gt_labels(2)]
}
feat = recognizer.test_step(data_batch)
assert isinstance(feat, torch.Tensor)
assert feat.shape == torch.Size([1, 2048])
def test_3d_recognizer(self):
register_all_modules()
config = get_recognizer_cfg(
'slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py')
config.model['backbone']['pretrained'] = None
config.model['backbone']['pretrained2d'] = False
config.model['cls_head'] = dict(
type='FeatureHead', average_clips='score')
recognizer = MODELS.build(config.model)
input_shape = [1, 3, 4, 32, 32]
data_batch = {
'inputs': [torch.randint(0, 256, input_shape)],
'data_samples': [ActionDataSample().set_gt_labels(2)]
}
feat = recognizer.test_step(data_batch)
assert isinstance(feat, torch.Tensor)
assert feat.shape == torch.Size([1, 2048])
def test_3d_backbone(self):
with pytest.raises(NotImplementedError):
head = FeatureHead(spatial_type='test')
head = FeatureHead(average_clips='score')
x = torch.rand(1, 64, 2, 7, 7)
feat = head(x)
assert feat.shape == torch.Size([1, 64])
head = FeatureHead(spatial_type=None, average_clips='score')
feat = head(x)
assert feat.shape == torch.Size([1, 64, 7, 7])
head = FeatureHead(temporal_type=None, average_clips='score')
feat = head(x)
assert feat.shape == torch.Size([1, 64, 2])
head = FeatureHead(
spatial_type=None, temporal_type=None, average_clips='score')
feat = head(x)
assert feat.shape == torch.Size([1, 64, 2, 7, 7])
def METHOD_NAME(self):
head = FeatureHead(backbone_name='slowfast', average_clips='score')
x_slow = torch.rand(1, 64, 2, 7, 7)
x_fast = torch.rand(1, 32, 6, 7, 7)
x = (x_slow, x_fast)
feat = head(x)
assert feat.shape == torch.Size([1, 96])
head = FeatureHead(
backbone_name='slowfast', spatial_type=None, average_clips='score')
feat = head(x)
assert feat.shape == torch.Size([1, 96, 7, 7])
with pytest.raises(AssertionError):
head = FeatureHead(
backbone_name='slowfast',
temporal_type=None,
average_clips='score')
feat = head(x)
def test_2d_backbone(self):
head = FeatureHead(average_clips='score')
x = torch.rand(2, 64, 7, 7)
with pytest.raises(AssertionError):
feat = head(x)
feat = head(x, num_segs=2)
assert feat.shape == torch.Size([1, 64])
x = torch.rand(2, 64, 7, 7)
head = FeatureHead(spatial_type=None, average_clips='score')
feat = head(x, num_segs=2)
assert feat.shape == torch.Size([1, 64, 7, 7])
head = FeatureHead(temporal_type=None, average_clips='score')
feat = head(x, num_segs=2)
assert feat.shape == torch.Size([1, 2, 64])
def test_tsm_backbone(self):
head = FeatureHead(backbone_name='tsm', average_clips='score')
x = torch.rand(2, 64, 7, 7)
with pytest.raises(AssertionError):
feat = head(x)
with pytest.raises(AssertionError):
feat = head(x, num_segs=2)
head = FeatureHead(num_segments=2, average_clips='score')
feat = head(x, num_segs=2)
assert feat.shape == torch.Size([1, 64])
x = torch.rand(2, 64, 7, 7)
head = FeatureHead(
num_segments=2, spatial_type=None, average_clips='score')
feat = head(x, num_segs=2)
assert feat.shape == torch.Size([1, 64, 7, 7])
def test_gcn_backbone(self):
# N, M, C, T, V
head = FeatureHead(backbone_name='gcn', average_clips='score')
x = torch.rand(1, 5, 64, 2, 7)
feat = head(x)
assert feat.shape == torch.Size([1, 64])
|
2,851 |
theme changed
|
# share_window.py
#
# Change the look of Adwaita, with ease
# Copyright (C) 2022 Gradience Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import time
from gi.repository import Gtk, Adw, Gio
from gradience.frontend.utils.run_async import RunAsync
from gradience.backend.constants import rootdir, app_id
from gradience.backend.logger import Logger
logging = Logger()
@Gtk.Template(resource_path=f"{rootdir}/ui/share_window.ui")
class GradienceShareWindow(Adw.Window):
__gtype_name__ = "GradienceShareWindow"
settings = Gtk.Settings.get_default()
btn_close = Gtk.Template.Child()
btn_back = Gtk.Template.Child()
btn_next = Gtk.Template.Child()
btn_install = Gtk.Template.Child()
btn_agree = Gtk.Template.Child()
carousel = Gtk.Template.Child()
switch_system = Gtk.Template.Child()
switch_adw_gtk3 = Gtk.Template.Child()
progressbar = Gtk.Template.Child()
img_welcome = Gtk.Template.Child()
label_skip = Gtk.Template.Child()
images = [
f"{rootdir}/images/welcome.svg",
f"{rootdir}/images/welcome-dark.svg",
]
carousel_pages = [
"welcome", # 0
"gradience", # 1
"configure", # 2
"download", # 3
"finish", # 4
]
page_welcome = Gtk.Template.Child()
page_release = Gtk.Template.Child()
def __init__(self, window, **kwargs) -> None:
super().__init__(**kwargs)
self.set_transient_for(window)
# common variables and references
self.window = window
self.gio_settings = Gio.Settings(app_id)
# connect signals
self.carousel.connect("page-changed", self.page_changed)
self.btn_close.connect("clicked", self.close_window)
self.btn_back.connect("clicked", self.previous_page)
self.btn_next.connect("clicked", self.next_page)
self.btn_install.connect("clicked", self.install_runner)
self.settings.connect(
"notify::gtk-application-prefer-dark-theme", self.METHOD_NAME
)
self.connect("close-request", self.quit)
self.btn_close.set_sensitive(False)
if self.settings.get_property("gtk-application-prefer-dark-theme"):
self.img_welcome.set_from_resource(self.images[1])
self.page_changed()
def METHOD_NAME(self, settings, key):
self.img_welcome.set_from_resource(
self.images[settings.get_property(
"gtk-application-prefer-dark-theme")]
)
def get_page(self, index):
return self.carousel_pages[index]
def page_changed(self, widget=False, index=0, *_args):
"""
This function is called on first load and when the user require
to change the page. It sets the widgets status according to
the step of the onboard progress.
"""
page = self.get_page(index)
if page == "finish":
self.btn_back.set_visible(False)
self.btn_next.set_visible(False)
self.carousel.set_interactive(False)
elif page == "download":
self.btn_back.set_visible(True)
self.btn_next.set_visible(False)
self.btn_install.set_visible(True)
self.carousel.set_interactive(False)
elif page == "welcome":
self.btn_back.set_visible(False)
self.btn_next.set_visible(True)
self.carousel.set_interactive(True)
else:
self.btn_back.set_visible(True)
self.btn_next.set_visible(True)
self.btn_install.set_visible(False)
self.carousel.set_interactive(True)
def quit(self, *args):
self.destroy()
def install_runner(self, widget):
def set_completed(result, error=False):
self.label_skip.set_visible(False)
self.btn_close.set_sensitive(True)
self.window.settings.set_boolean("first-run", False)
self.next_page()
self.installing = True
self.set_deletable(False)
def install():
logging.debug("Installing Gradience…")
RunAsync(self.pulse)
RunAsync(
install,
callback=set_completed,
)
def previous_page(self, widget=False, index=None):
if index is None:
index = int(self.carousel.get_position())
previous_page = self.carousel.get_nth_page(index - 1)
self.carousel.scroll_to(previous_page, True)
def next_page(self, widget=False, index=None):
if index is None:
index = int(self.carousel.get_position())
next_page = self.carousel.get_nth_page(index + 1)
self.carousel.scroll_to(next_page, True)
def pulse(self):
# This function updates the progress bar every 1s.
while True:
time.sleep(0.5)
self.progressbar.pulse()
def close_window(self, widget):
self.destroy()
self.window.present()
|
2,852 |
wait for pgbench status
|
"""
ScalePodPGSQL workload class for scale
"""
import logging
from ocs_ci.framework import config
from ocs_ci.helpers import helpers
from ocs_ci.ocs import constants, machine, node
from ocs_ci.ocs.exceptions import UnsupportedPlatformError
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.utility import templating
from ocs_ci.utility.utils import update_container_with_mirrored_image
log = logging.getLogger(__name__)
class ScalePodPGSQL(Postgresql):
"""
Scale Postgresql workload with scale parameters and functions
"""
def __init__(self, node_selector=constants.SCALE_NODE_SELECTOR, **kwargs):
"""
Initializer function
"""
super().__init__(**kwargs)
Postgresql.deploy(self)
self._node_selector = node_selector
def setup_postgresql(self, replicas, node_selector=None):
# Node selector for postgresql
pgsql_sset = templating.load_yaml(constants.PGSQL_STATEFULSET_YAML)
update_container_with_mirrored_image(pgsql_sset)
if node_selector is not None:
pgsql_sset["spec"]["template"]["spec"]["nodeSelector"] = node_selector
if helpers.storagecluster_independent_check():
pgsql_sset["spec"]["volumeClaimTemplates"][0]["metadata"]["annotations"][
"volume.beta.kubernetes.io/storage-class"
] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
Postgresql.setup_postgresql(self, replicas=replicas)
def _create_pgbench_benchmark(
self,
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
):
Postgresql.create_pgbench_benchmark(
self,
replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
def METHOD_NAME(self, status, timeout=None):
Postgresql.wait_for_postgres_status(self, status=status, timeout=timeout)
def _get_pgbench_pods(self):
Postgresql.get_pgbench_pods()
def _validate_pgbench_run(self, pgbench_pods, print_table=True):
Postgresql.validate_pgbench_run(self, pgbench_pods, print_table=True)
def cleanup(self):
# Cleanup postgresql
Postgresql.cleanup(self)
# Remove scale label and delete machineset
delete_worker_node()
def add_worker_node(instance_type=None):
global ms_name
ms_name = list()
worker_list = node.get_worker_nodes()
ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
if config.RUN.get("use_ocs_worker_for_scale"):
if not scale_worker:
helpers.label_worker_node(
node_list=worker_list, label_key="scale-label", label_value="app-scale"
)
else:
if not scale_worker:
for node_item in ocs_worker_list:
worker_list.remove(node_item)
if worker_list:
helpers.label_worker_node(
node_list=worker_list,
label_key="scale-label",
label_value="app-scale",
)
scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL)
log.info(f"Print existing scale worker {scale_worker_list}")
if (
config.ENV_DATA["deployment_type"] == "ipi"
and config.ENV_DATA["platform"].lower() == "aws"
):
log.info("Adding worker nodes on the current cluster")
labels = [("node-role.kubernetes.io/app", "app-scale")]
# Create machineset for app worker nodes on each zone
for obj in machine.get_machineset_objs():
if "app" in obj.name:
ms_name.append(obj.name)
if instance_type is not None:
instance_type = instance_type
else:
instance_type = "m5.4xlarge"
if not ms_name:
if len(machine.get_machineset_objs()) == 3:
for zone in ["a", "b", "c"]:
ms_name.append(
machine.create_custom_machineset(
instance_type=instance_type,
labels=labels,
zone=zone,
)
)
else:
ms_name.append(
machine.create_custom_machineset(
instance_type=instance_type,
labels=labels,
zone="a",
)
)
for ms in ms_name:
machine.wait_for_new_node_to_be_ready(ms)
worker_list = node.get_worker_nodes()
ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
scale_label_worker = machine.get_labeled_nodes(constants.SCALE_LABEL)
ocs_worker_list.extend(scale_label_worker)
final_list = list(dict.fromkeys(ocs_worker_list))
for node_item in final_list:
if node_item in worker_list:
worker_list.remove(node_item)
if worker_list:
helpers.label_worker_node(
node_list=worker_list, label_key="scale-label", label_value="app-scale"
)
return True
elif (
config.ENV_DATA["deployment_type"] == "upi"
and config.ENV_DATA["platform"].lower() == "vsphere"
):
log.info("Running scale test on existing worker nodes.")
elif (
config.ENV_DATA["deployment_type"] == "upi"
and config.ENV_DATA["platform"].lower() == "baremetal"
):
log.info("Running scale test on existing worker nodes.")
elif (
config.ENV_DATA["deployment_type"] == "upi"
and config.ENV_DATA["platform"].lower() == "azure"
):
raise UnsupportedPlatformError("Unsupported Platform")
def delete_worker_node():
# Remove scale label from worker nodes
scale_workers = machine.get_labeled_nodes(constants.SCALE_LABEL)
if scale_workers:
helpers.remove_label_from_worker_node(
node_list=scale_workers, label_key="scale-label"
)
# Delete machineset
if ms_name:
for name in ms_name:
machine.delete_custom_machineset(name)
|
2,853 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDatastoreResult',
'AwaitableGetDatastoreResult',
'get_datastore',
'get_datastore_output',
]
@pulumi.output_type
class GetDatastoreResult:
"""
A datastore resource
"""
def __init__(__self__, disk_pool_volume=None, id=None, name=None, net_app_volume=None, provisioning_state=None, status=None, METHOD_NAME=None):
if disk_pool_volume and not isinstance(disk_pool_volume, dict):
raise TypeError("Expected argument 'disk_pool_volume' to be a dict")
pulumi.set(__self__, "disk_pool_volume", disk_pool_volume)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if net_app_volume and not isinstance(net_app_volume, dict):
raise TypeError("Expected argument 'net_app_volume' to be a dict")
pulumi.set(__self__, "net_app_volume", net_app_volume)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter(name="diskPoolVolume")
def disk_pool_volume(self) -> Optional['outputs.DiskPoolVolumeResponse']:
"""
An iSCSI volume
"""
return pulumi.get(self, "disk_pool_volume")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="netAppVolume")
def net_app_volume(self) -> Optional['outputs.NetAppVolumeResponse']:
"""
An Azure NetApp Files volume
"""
return pulumi.get(self, "net_app_volume")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The state of the datastore provisioning
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> str:
"""
The operational status of the datastore
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDatastoreResult(GetDatastoreResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatastoreResult(
disk_pool_volume=self.disk_pool_volume,
id=self.id,
name=self.name,
net_app_volume=self.net_app_volume,
provisioning_state=self.provisioning_state,
status=self.status,
METHOD_NAME=self.METHOD_NAME)
def get_datastore(cluster_name: Optional[str] = None,
datastore_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatastoreResult:
"""
A datastore resource
Azure REST API version: 2022-05-01.
:param str cluster_name: Name of the cluster in the private cloud
:param str datastore_name: Name of the datastore in the private cloud cluster
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['datastoreName'] = datastore_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs:getDatastore', __args__, opts=opts, typ=GetDatastoreResult).value
return AwaitableGetDatastoreResult(
disk_pool_volume=pulumi.get(__ret__, 'disk_pool_volume'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
net_app_volume=pulumi.get(__ret__, 'net_app_volume'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
status=pulumi.get(__ret__, 'status'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_datastore)
def get_datastore_output(cluster_name: Optional[pulumi.Input[str]] = None,
datastore_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDatastoreResult]:
"""
A datastore resource
Azure REST API version: 2022-05-01.
:param str cluster_name: Name of the cluster in the private cloud
:param str datastore_name: Name of the datastore in the private cloud cluster
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,854 |
process request parameters
|
import time
from pyderivationagent import DerivationAgent
from pyderivationagent import DerivationInputs
from pyderivationagent import DerivationOutputs
from rxnoptgoaliteragent.kg_operations import *
from rxnoptgoaliteragent.data_model import *
class RxnOptGoalIterAgent(DerivationAgent):
def __init__(
self,
**kwargs
):
super().__init__(**kwargs)
self.sparql_client = self.get_sparql_client(RxnOptGoalIterSparqlClient)
def agent_input_concepts(self) -> list:
return [ONTOGOAL_GOALSET, ONTOREACTION_CHEMICALREACTION, ONTOREACTION_REACTIONEXPERIMENT, ONTOLAB_LABORATORY]
def agent_output_concepts(self) -> list:
return [ONTOGOAL_RESULT]
def validate_inputs(self, http_request) -> bool:
return super().validate_inputs(http_request)
def METHOD_NAME(self, derivation_inputs: DerivationInputs, derivation_outputs: DerivationOutputs):
# I. Get the goal set
list_goal_set_iri = derivation_inputs.getIris(ONTOGOAL_GOALSET)
if len(list_goal_set_iri) != 1:
raise Exception(f"Exactly one goal set is expected, but found: {list_goal_set_iri}")
goal_set_iri = list_goal_set_iri[0]
goal_set_instance = self.sparql_client.get_goal_set_instance(goal_set_iri)
# II. Get the chemical reaction and reaction experiment
# NOTE reaction experiment might not in the derivation inputs as this might be the first reaction where no prior data is available
# Check if the input is in correct format, and return OntoReaction.ReactionExperiment/ReactionVariation instance
list_chemical_reaction_iri = derivation_inputs.getIris(ONTOREACTION_CHEMICALREACTION)
if len(list_chemical_reaction_iri) != 1:
raise Exception(f"Exactly one chemical reaction is expected, but found: {list_chemical_reaction_iri}")
chem_rxn_iri = list_chemical_reaction_iri[0]
chem_rxn_instance = self.sparql_client.get_chemical_reaction_given_iri(chem_rxn_iri)
if chem_rxn_instance.hasDoETemplate is None:
raise Exception(f"ChemicalReaction {chem_rxn_iri} does not have a DoE template")
_full_derivation_inputs = derivation_inputs.getInputs()
list_rxn_exp_instance = None
if ONTOREACTION_REACTIONEXPERIMENT in _full_derivation_inputs:
list_rxn_exp_instance = self.sparql_client.getReactionExperiment(derivation_inputs.getIris(ONTOREACTION_REACTIONEXPERIMENT))
# III. Get the laboratory
# If not provied as input, then the vapourtec schedule agent will assume all laboratories in the knowledge graph are available
list_laboratory_iri = derivation_inputs.getIris(ONTOLAB_LABORATORY) if ONTOLAB_LABORATORY in derivation_inputs.getInputs() else []
# IV. Create a set of derivations
# Create and upload the DesignOfExperiment triples to triple store as pure input
# NOTE the timestamp will be added automatically when marking up the derivations
# NOTE not all list_rxn_exp_instance passed in will be used, only those have reaction conditions within the range will be used
doe_instance = self.sparql_client.generate_doe_instance_from_goal(
goal_set=goal_set_instance,
chem_rxn=chem_rxn_instance,
rxn_exp_as_beliefs=list_rxn_exp_instance,
)
g = Graph()
g = doe_instance.create_instance_for_kg(g)
self.sparql_client.uploadGraph(g)
# Get plan and steps
# TODO: [next iteration] implement a more generic way of processing plan and step, here we took a shortcut that only works for all goals have the same plan and step
plan = goal_set_instance.hasGoal[0].hasPlan[0]
doe_step = plan.get_step(ONTOGOAL_DESIGNOFEXPERIMENT)
exe_step = plan.get_step(ONTOGOAL_RXNEXPEXECUTION)
postpro_step = plan.get_step(ONTOGOAL_POSTPROCESSING)
# Create derivation instance for new information, the timestamp of this derivation is 0
# TODO: [next iteration] implement a more generic way of deciding the agent to perform the derivation, here we took a shortcut to use the first agent (Step.canBePerformedBy[0])
# TODO: consider if the list_laboratory_iri contains two or more laboratories, then how would DoEAgent handle this situation when locating possible chemical reactions (input amount, if different internal standard)
doe_derivation_iri = self.derivation_client.createAsyncDerivationForNewInfo(doe_step.canBePerformedBy[0], [doe_instance.instance_iri] + list_laboratory_iri)
self.logger.info(f"Initialised successfully, created asynchronous doe derivation instance: {doe_derivation_iri}")
exe_derivation_iri = self.derivation_client.createAsyncDerivationForNewInfo(exe_step.canBePerformedBy[0], [doe_derivation_iri] + list_laboratory_iri)
self.logger.info(f"Initialised successfully, created asynchronous exe derivation instance: {exe_derivation_iri}")
postpro_derivation_iri = self.derivation_client.createAsyncDerivationForNewInfo(postpro_step.canBePerformedBy[0], [exe_derivation_iri])
self.logger.info(f"Initialised successfully, created asynchronous postproc derivation instance: {postpro_derivation_iri}")
# V. Create derivation outputs after experiment is finished
# Monitor the status of the postpro_derivation_iri, until it produced outputs
interested_performance_indicators = [goal.desires().clz for goal in goal_set_instance.hasGoal]
new_rxn_exp = self.sparql_client.detect_postpro_derivation_result(postpro_derivation_iri, interested_performance_indicators)
while not new_rxn_exp:
time.sleep(30)
try:
new_rxn_exp = self.sparql_client.detect_postpro_derivation_result(postpro_derivation_iri, interested_performance_indicators)
except Exception as e:
self.logger.error(f"Error in detecting postpro derivation result: {e}")
new_rxn_exp = None
# Add the Result instances to the derivation_outputs
dct_desires_iri_clz = {goal.desires().instance_iri:goal.desires().clz for goal in goal_set_instance.hasGoal}
list_results = []
for desire_iri in dct_desires_iri_clz:
result_iri = initialiseInstanceIRI(getNameSpace(goal_set_iri), ONTOGOAL_RESULT)
derivation_outputs.createNewEntity(result_iri, ONTOGOAL_RESULT)
_goal = goal_set_instance.get_goal_given_desired_quantity(desire_iri).instance_iri
_quantity = new_rxn_exp.get_performance_indicator(dct_desires_iri_clz[desire_iri], None).instance_iri
derivation_outputs.addTriple(_goal, ONTOGOAL_HASRESULT, _quantity)
derivation_outputs.addTriple(result_iri, ONTOGOAL_REFERSTO, _quantity)
list_results.append(result_iri)
self.logger.info(f"The generated Result for GoalSet <{goal_set_iri}> can be identified as: {list_results}")
# Show an instructional message at the RxnOptGoalIterAgent servlet root
def default():
"""
Instructional message at the app root.
"""
msg = "This is an asynchronous agent that capable of conducting one iteration of pursuring the reaction optimisation goal.<BR>"
msg += "For more information, please visit https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/RxnOptGoalIterAgent#readme<BR>"
return msg
|
2,855 |
create ui commands
|
from openpilot.common.conversions import Conversions as CV
from openpilot.selfdrive.car.honda.values import HondaFlags, HONDA_BOSCH, HONDA_BOSCH_RADARLESS, CAR, CarControllerParams
# CAN bus layout with relay
# 0 = ACC-CAN - radar side
# 1 = F-CAN B - powertrain
# 2 = ACC-CAN - camera side
# 3 = F-CAN A - OBDII port
def get_pt_bus(car_fingerprint):
return 1 if car_fingerprint in (HONDA_BOSCH - HONDA_BOSCH_RADARLESS) else 0
def get_lkas_cmd_bus(car_fingerprint, radar_disabled=False):
no_radar = car_fingerprint in HONDA_BOSCH_RADARLESS
if radar_disabled or no_radar:
# when radar is disabled, steering commands are sent directly to powertrain bus
return get_pt_bus(car_fingerprint)
# normally steering commands are sent to radar, which forwards them to powertrain bus
return 0
def get_cruise_speed_conversion(car_fingerprint: str, is_metric: bool) -> float:
# on certain cars, CRUISE_SPEED changes to imperial with car's unit setting
return CV.MPH_TO_MS if car_fingerprint in HONDA_BOSCH_RADARLESS and not is_metric else CV.KPH_TO_MS
def create_brake_command(packer, apply_brake, pump_on, pcm_override, pcm_cancel_cmd, fcw, car_fingerprint, stock_brake):
# TODO: do we loose pressure if we keep pump off for long?
brakelights = apply_brake > 0
brake_rq = apply_brake > 0
pcm_fault_cmd = False
values = {
"COMPUTER_BRAKE": apply_brake,
"BRAKE_PUMP_REQUEST": pump_on,
"CRUISE_OVERRIDE": pcm_override,
"CRUISE_FAULT_CMD": pcm_fault_cmd,
"CRUISE_CANCEL_CMD": pcm_cancel_cmd,
"COMPUTER_BRAKE_REQUEST": brake_rq,
"SET_ME_1": 1,
"BRAKE_LIGHTS": brakelights,
"CHIME": stock_brake["CHIME"] if fcw else 0, # send the chime for stock fcw
"FCW": fcw << 1, # TODO: Why are there two bits for fcw?
"AEB_REQ_1": 0,
"AEB_REQ_2": 0,
"AEB_STATUS": 0,
}
bus = get_pt_bus(car_fingerprint)
return packer.make_can_msg("BRAKE_COMMAND", bus, values)
def create_acc_commands(packer, enabled, active, accel, gas, stopping_counter, car_fingerprint):
commands = []
bus = get_pt_bus(car_fingerprint)
min_gas_accel = CarControllerParams.BOSCH_GAS_LOOKUP_BP[0]
control_on = 5 if enabled else 0
gas_command = gas if active and accel > min_gas_accel else -30000
accel_command = accel if active else 0
braking = 1 if active and accel < min_gas_accel else 0
standstill = 1 if active and stopping_counter > 0 else 0
standstill_release = 1 if active and stopping_counter == 0 else 0
# common ACC_CONTROL values
acc_control_values = {
'ACCEL_COMMAND': accel_command,
'STANDSTILL': standstill,
}
if car_fingerprint in HONDA_BOSCH_RADARLESS:
acc_control_values.update({
"CONTROL_ON": enabled,
"IDLESTOP_ALLOW": stopping_counter > 200, # allow idle stop after 4 seconds (50 Hz)
})
else:
acc_control_values.update({
# setting CONTROL_ON causes car to set POWERTRAIN_DATA->ACC_STATUS = 1
"CONTROL_ON": control_on,
"GAS_COMMAND": gas_command, # used for gas
"BRAKE_LIGHTS": braking,
"BRAKE_REQUEST": braking,
"STANDSTILL_RELEASE": standstill_release,
})
acc_control_on_values = {
"SET_TO_3": 0x03,
"CONTROL_ON": enabled,
"SET_TO_FF": 0xff,
"SET_TO_75": 0x75,
"SET_TO_30": 0x30,
}
commands.append(packer.make_can_msg("ACC_CONTROL_ON", bus, acc_control_on_values))
commands.append(packer.make_can_msg("ACC_CONTROL", bus, acc_control_values))
return commands
def create_steering_control(packer, apply_steer, lkas_active, car_fingerprint, radar_disabled):
values = {
"STEER_TORQUE": apply_steer if lkas_active else 0,
"STEER_TORQUE_REQUEST": lkas_active,
}
bus = get_lkas_cmd_bus(car_fingerprint, radar_disabled)
return packer.make_can_msg("STEERING_CONTROL", bus, values)
def create_bosch_supplemental_1(packer, car_fingerprint):
# non-active params
values = {
"SET_ME_X04": 0x04,
"SET_ME_X80": 0x80,
"SET_ME_X10": 0x10,
}
bus = get_lkas_cmd_bus(car_fingerprint)
return packer.make_can_msg("BOSCH_SUPPLEMENTAL_1", bus, values)
def METHOD_NAME(packer, CP, enabled, pcm_speed, hud, is_metric, acc_hud, lkas_hud):
commands = []
bus_pt = get_pt_bus(CP.carFingerprint)
radar_disabled = CP.carFingerprint in (HONDA_BOSCH - HONDA_BOSCH_RADARLESS) and CP.openpilotLongitudinalControl
bus_lkas = get_lkas_cmd_bus(CP.carFingerprint, radar_disabled)
if CP.openpilotLongitudinalControl:
acc_hud_values = {
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1 if enabled else 0,
'HUD_DISTANCE': 0, # max distance setting on display
'IMPERIAL_UNIT': int(not is_metric),
'HUD_LEAD': 2 if enabled and hud.lead_visible else 1 if enabled else 0,
'SET_ME_X01_2': 1,
}
if CP.carFingerprint in HONDA_BOSCH:
acc_hud_values['ACC_ON'] = int(enabled)
acc_hud_values['FCM_OFF'] = 1
acc_hud_values['FCM_OFF_2'] = 1
else:
acc_hud_values['PCM_SPEED'] = pcm_speed * CV.MS_TO_KPH
acc_hud_values['PCM_GAS'] = hud.pcm_accel
acc_hud_values['SET_ME_X01'] = 1
acc_hud_values['FCM_OFF'] = acc_hud['FCM_OFF']
acc_hud_values['FCM_OFF_2'] = acc_hud['FCM_OFF_2']
acc_hud_values['FCM_PROBLEM'] = acc_hud['FCM_PROBLEM']
acc_hud_values['ICONS'] = acc_hud['ICONS']
commands.append(packer.make_can_msg("ACC_HUD", bus_pt, acc_hud_values))
lkas_hud_values = {
'SET_ME_X41': 0x41,
'STEERING_REQUIRED': hud.steer_required,
'SOLID_LANES': hud.lanes_visible,
'BEEP': 0,
}
if CP.carFingerprint in HONDA_BOSCH_RADARLESS:
lkas_hud_values['LANE_LINES'] = 3
lkas_hud_values['DASHED_LANES'] = hud.lanes_visible
# car likely needs to see LKAS_PROBLEM fall within a specific time frame, so forward from camera
lkas_hud_values['LKAS_PROBLEM'] = lkas_hud['LKAS_PROBLEM']
if not (CP.flags & HondaFlags.BOSCH_EXT_HUD):
lkas_hud_values['SET_ME_X48'] = 0x48
if CP.flags & HondaFlags.BOSCH_EXT_HUD and not CP.openpilotLongitudinalControl:
commands.append(packer.make_can_msg('LKAS_HUD_A', bus_lkas, lkas_hud_values))
commands.append(packer.make_can_msg('LKAS_HUD_B', bus_lkas, lkas_hud_values))
else:
commands.append(packer.make_can_msg('LKAS_HUD', bus_lkas, lkas_hud_values))
if radar_disabled:
radar_hud_values = {
'CMBS_OFF': 0x01,
'SET_TO_1': 0x01,
}
commands.append(packer.make_can_msg('RADAR_HUD', bus_pt, radar_hud_values))
if CP.carFingerprint == CAR.CIVIC_BOSCH:
commands.append(packer.make_can_msg("LEGACY_BRAKE_COMMAND", bus_pt, {}))
return commands
def spam_buttons_command(packer, button_val, car_fingerprint):
values = {
'CRUISE_BUTTONS': button_val,
'CRUISE_SETTING': 0,
}
# send buttons to camera on radarless cars
bus = 2 if car_fingerprint in HONDA_BOSCH_RADARLESS else get_pt_bus(car_fingerprint)
return packer.make_can_msg("SCM_BUTTONS", bus, values)
|
2,856 |
test allowed methods list is returned for
|
from unittest.mock import Mock
import pytest
from ariadne.constants import (
CONTENT_TYPE_TEXT_PLAIN,
HTTP_STATUS_200_OK,
HTTP_STATUS_405_METHOD_NOT_ALLOWED,
)
from ariadne.wsgi import GraphQL, GraphQLMiddleware
def test_request_to_app_root_path_is_forwarded(app_mock, middleware):
middleware({"PATH_INFO": "/"}, Mock())
assert app_mock.called
def test_request_to_app_sub_path_is_forwarded(app_mock, middleware):
middleware({"PATH_INFO": "/something/"}, Mock())
assert app_mock.called
def test_request_to_graphql_path_is_handled(app_mock, middleware):
handle_request = middleware.graphql_app.handle_request = Mock()
middleware({"PATH_INFO": "/graphql/"}, Mock())
assert handle_request.called
assert not app_mock.called
def test_request_to_graphql_server_root_path_is_handled(server):
server.handle_request = Mock()
server({"PATH_INFO": "/"}, Mock())
assert server.handle_request.called
def test_request_to_graphql_server_sub_path_is_handled(server):
server.handle_request = Mock()
server({"PATH_INFO": "/something/"}, Mock())
assert server.handle_request.called
def test_app_exceptions_are_not_handled(app_mock, schema):
exception = Exception("Test exception")
app_mock = Mock(side_effect=exception)
middleware = GraphQLMiddleware(app_mock, schema)
handle_request = middleware.graphql_app.handle_request = Mock()
with pytest.raises(Exception) as excinfo:
middleware({"PATH_INFO": "/"}, Mock())
assert excinfo.value is exception
assert not handle_request.called
def test_get_handler_is_called_for_get_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "GET"
handle_get = middleware.graphql_app.handle_get = Mock()
middleware(middleware_request, start_response)
handle_get.assert_called_once_with(middleware_request, start_response)
def test_post_handler_is_called_for_post_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "POST"
handle_post = middleware.graphql_app.handle_post = Mock()
middleware(middleware_request, start_response)
handle_post.assert_called_once_with(middleware_request, start_response)
def METHOD_NAME(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "OPTIONS"
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_200_OK,
[
("Content-Type", CONTENT_TYPE_TEXT_PLAIN),
("Content-Length", 0),
("Allow", "OPTIONS, POST, GET"),
],
)
def test_allowed_methods_list_returned_for_options_request_excludes_get(
app_mock, middleware_request, start_response, schema
):
middleware_request["REQUEST_METHOD"] = "OPTIONS"
server = GraphQL(schema, introspection=False)
middleware = GraphQLMiddleware(app_mock, server)
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_200_OK,
[
("Content-Type", CONTENT_TYPE_TEXT_PLAIN),
("Content-Length", 0),
("Allow", "OPTIONS, POST"),
],
)
METHOD_NOT_ALLOWED_HEADERS = [
("Content-Type", CONTENT_TYPE_TEXT_PLAIN),
("Content-Length", 0),
("Allow", "OPTIONS, POST, GET"),
]
def test_http_not_allowed_response_is_returned_for_delete_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "DELETE"
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_405_METHOD_NOT_ALLOWED, METHOD_NOT_ALLOWED_HEADERS
)
def test_http_not_allowed_response_is_returned_for_head_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "HEAD"
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_405_METHOD_NOT_ALLOWED, METHOD_NOT_ALLOWED_HEADERS
)
def test_http_not_allowed_response_is_returned_for_patch_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "PATCH"
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_405_METHOD_NOT_ALLOWED, METHOD_NOT_ALLOWED_HEADERS
)
def test_http_not_allowed_response_is_returned_for_put_request(
middleware, middleware_request, start_response
):
middleware_request["REQUEST_METHOD"] = "PUT"
middleware(middleware_request, start_response)
start_response.assert_called_once_with(
HTTP_STATUS_405_METHOD_NOT_ALLOWED, METHOD_NOT_ALLOWED_HEADERS
)
|
2,857 |
send
|
from test import test_support as support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import SocketServer
import time
from test_support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(SocketServer.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
timeout = 1
def METHOD_NAME(self, message):
if verbose: print "SENT:", message.strip()
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self.METHOD_NAME('* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = ''
while 1:
try:
part = self.rfile.read(1)
if part == '':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets raise exceptions.
return
if line.endswith('\r\n'):
break
if verbose: print 'GOT:', line.strip()
splitline = line.split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_%s' % (cmd,)):
getattr(self, 'cmd_%s' % (cmd,))(tag, args)
else:
self.METHOD_NAME('%s BAD %s unknown\r\n' % (tag, cmd))
def cmd_CAPABILITY(self, tag, args):
self.METHOD_NAME('* CAPABILITY IMAP4rev1\r\n')
self.METHOD_NAME('%s OK CAPABILITY completed\r\n' % (tag,))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print "creating server"
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", self.server_class
print "HDLR =", server.RequestHandlerClass
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
return server, t
def reap_server(self, server, thread):
if verbose: print "waiting for server"
server.shutdown()
thread.join()
if verbose: print "done"
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(SocketServer.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write('* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
self.server.logout()
def test_logincapa(self):
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
def test_anonlogin(self):
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main()
|
2,858 |
resolve command
|
import configparser
import os
import click
class Config:
"""The config in this example only holds aliases."""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def add_alias(self, alias, cmd):
self.aliases.update({alias: cmd})
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items("aliases"))
except configparser.NoSectionError:
pass
def write_config(self, filename):
parser = configparser.RawConfigParser()
parser.add_section("aliases")
for key, value in self.aliases.items():
parser.set("aliases", key, value)
with open(filename, "wb") as file:
parser.write(file)
pass_config = click.make_pass_decorator(Config, ensure=True)
class AliasedGroup(click.Group):
"""This subclass of a group supports looking up aliases in a config
file and with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
# Step one: bulitin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# Step two: find the config object and ensure it's there. This
# will create the config object is missing.
cfg = ctx.ensure_object(Config)
# Step three: look up an explicit command alias in the config
if cmd_name in cfg.aliases:
actual_cmd = cfg.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [
x for x in self.list_commands(ctx) if x.lower().startswith(cmd_name.lower())
]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail(f"Too many matches: {', '.join(sorted(matches))}")
def METHOD_NAME(self, ctx, args):
# always return the command's name, not the alias
_, cmd, args = super().METHOD_NAME(ctx, args)
return cmd.name, cmd, args
def read_config(ctx, param, value):
"""Callback that is used whenever --config is passed. We use this to
always load the correct config. This means that the config is loaded
even if the group itself never executes so our aliases stay always
available.
"""
cfg = ctx.ensure_object(Config)
if value is None:
value = os.path.join(os.path.dirname(__file__), "aliases.ini")
cfg.read_config(value)
return value
@click.command(cls=AliasedGroup)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
callback=read_config,
expose_value=False,
help="The config file to use instead of the default.",
)
def cli():
"""An example application that supports aliases."""
@cli.command()
def push():
"""Pushes changes."""
click.echo("Push")
@cli.command()
def pull():
"""Pulls changes."""
click.echo("Pull")
@cli.command()
def clone():
"""Clones a repository."""
click.echo("Clone")
@cli.command()
def commit():
"""Commits pending changes."""
click.echo("Commit")
@cli.command()
@pass_config
def status(config):
"""Shows the status."""
click.echo(f"Status for {config.path}")
@cli.command()
@pass_config
@click.argument("alias_", metavar="ALIAS", type=click.STRING)
@click.argument("cmd", type=click.STRING)
@click.option(
"--config_file", type=click.Path(exists=True, dir_okay=False), default="aliases.ini"
)
def alias(config, alias_, cmd, config_file):
"""Adds an alias to the specified configuration file."""
config.add_alias(alias_, cmd)
config.write_config(config_file)
click.echo(f"Added '{alias_}' as alias for '{cmd}'")
|
2,859 |
add network route
|
# -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import CsHelper
import logging
class CsRoute:
""" Manage routes """
def __init__(self):
self.table_prefix = "Table_"
def get_tablename(self, name):
return self.table_prefix + name
def add_table(self, devicename):
tablenumber = 100 + int(devicename[3:])
tablename = self.get_tablename(devicename)
str = "%s %s" % (tablenumber, tablename)
filename = "/etc/iproute2/rt_tables"
logging.info("Adding route table: " + str + " to " + filename + " if not present ")
if not CsHelper.definedinfile(filename, str):
CsHelper.execute("sudo echo " + str + " >> /etc/iproute2/rt_tables")
# remove "from all table tablename" if exists, else it will interfer with
# routing of unintended traffic
if self.findRule("from all lookup " + tablename):
CsHelper.execute("sudo ip rule delete from all table " + tablename)
def flush_table(self, tablename):
CsHelper.execute("ip route flush table %s" % (tablename))
CsHelper.execute("ip route flush cache")
def add_route(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
if not table or not address:
empty_param = "table" if not table else "address"
logging.info("Empty parameter received %s while trying to add route, skipping" % empty_param)
else:
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "default via %s table %s proto static" % (address, table)
self.set_route(cmd)
def METHOD_NAME(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip route add dev eth1 table Table_eth1 10.0.2.0/24
table = self.get_tablename(dev)
if not table or not address:
empty_param = "table" if not table else "address"
logging.info("Empty parameter received %s while trying to add network route, skipping" % empty_param)
else:
logging.info("Adding route: dev " + dev + " table: " +
table + " network: " + address + " if not present")
cmd = "throw %s table %s proto static" % (address, table)
self.set_route(cmd)
def set_route(self, cmd, method="add"):
""" Add a route if it is not already defined """
found = False
search = cmd
if "throw" in search:
search = search.replace("throw", "")
for i in CsHelper.execute("ip route show " + search):
found = True
if not found and method == "add":
logging.info("Add " + cmd)
cmd = "ip route add " + cmd
elif found and method == "delete":
logging.info("Delete " + cmd)
cmd = "ip route delete " + cmd
else:
return
CsHelper.execute(cmd)
def add_defaultroute(self, gateway):
""" Add a default route
:param str gateway
:return: bool
"""
if not gateway:
raise Exception("Gateway cannot be None.")
if self.defaultroute_exists():
return False
else:
cmd = "default via " + gateway
logging.info("Adding default route")
self.set_route(cmd)
return True
def defaultroute_exists(self):
""" Return True if a default route is present
:return: bool
"""
logging.info("Checking if default ipv4 route is present")
route_found = CsHelper.execute("ip -4 route list 0/0")
if len(route_found) > 0:
logging.info("Default route found: " + route_found[0])
return True
else:
logging.warn("No default route found!")
return False
def findRule(self, rule):
for i in CsHelper.execute("ip rule show"):
if rule in i.strip():
return True
return False
def set_route_v6(self, cmd, method="add"):
""" Add a IPv6 route if it is not already defined """
found = False
search = cmd
for i in CsHelper.execute("ip -6 route show " + search):
found = True
if not found and method == "add":
logging.info("Add " + cmd)
cmd = "ip -6 route add " + cmd
elif found and method == "delete":
logging.info("Delete " + cmd)
cmd = "ip -6 route delete " + cmd
else:
return
CsHelper.execute(cmd)
def add_defaultroute_v6(self, gateway):
""" Add a default route
# for example, ip -6 route add default via fd80:20:20:20::1
:param str gateway
:return: bool
"""
if not gateway:
raise Exception("Gateway cannot be None.")
logging.info("Checking if default ipv6 route is present")
route_found = CsHelper.execute("ip -6 route list default")
if len(route_found) > 0:
logging.info("Default IPv6 route found: " + route_found[0])
return False
else:
cmd = "default via " + gateway
logging.info("Adding default IPv6 route")
self.set_route_v6(cmd)
return True
def add_network_route_v6(self, dev, address):
""" Wrapper method that adds table name and device to route statement """
# ip -6 route add dev eth1 2021:10:10:10::1/64
logging.info("Adding IPv6 route: dev " + dev + " network: " + address + " if not present")
cmd = "%s dev %s proto kernel" % (address, dev)
self.set_route_v6(cmd)
def delete_network_route_v6(self, dev, address):
""" Wrapper method that deletes table name and device from route statement """
# ip -6 route del dev eth1 2021:10:10:10::1/64
logging.info("Deleting IPv6 route: dev " + dev + " network: " + address + " if present")
cmd = "%s dev %s" % (address, dev)
self.set_route_v6(cmd, method="delete")
|
2,860 |
test single lock
|
from __future__ import annotations
import asyncio
from time import sleep
import pytest
from distributed import MultiLock, get_client
from distributed.metrics import time
from distributed.utils import wait_for
from distributed.utils_test import gen_cluster
@gen_cluster(client=True, nthreads=[("127.0.0.1", 8)] * 2)
async def METHOD_NAME(c, s, a, b):
await c.set_metadata("locked", False)
def f(_):
client = get_client()
with MultiLock(names=["x"]):
assert client.get_metadata("locked") is False
client.set_metadata("locked", True)
sleep(0.05)
assert client.get_metadata("locked") is True
client.set_metadata("locked", False)
futures = c.map(f, range(20))
await c.gather(futures)
ext = s.extensions["multi_locks"]
assert not ext.events
assert not ext.requests
assert not ext.requests_left
assert all(len(l) == 0 for l in ext.locks.values())
@gen_cluster(client=True)
async def test_timeout(c, s, a, b):
ext = s.extensions["multi_locks"]
lock1 = MultiLock(names=["x"])
result = await lock1.acquire()
assert result is True
assert ext.requests_left[lock1.id] == 0
assert ext.locks["x"] == [lock1.id]
assert not ext.events
lock2 = MultiLock(names=["x"])
assert lock1.id != lock2.id
start = time()
result = await lock2.acquire(timeout=0.1)
stop = time()
assert stop - start < 0.3
assert result is False
assert ext.locks["x"] == [lock1.id]
assert not ext.events
await lock1.release()
@gen_cluster(client=True)
async def test_timeout_wake_waiter(c, s, a, b):
l1 = MultiLock(names=["x"])
l2 = MultiLock(names=["x", "y"])
l3 = MultiLock(names=["y"])
await l1.acquire()
l2_acquire = asyncio.ensure_future(l2.acquire(timeout=0.5))
with pytest.raises(asyncio.TimeoutError):
await wait_for(asyncio.shield(l2_acquire), 0.1)
l3_acquire = asyncio.ensure_future(l3.acquire())
with pytest.raises(asyncio.TimeoutError):
await wait_for(asyncio.shield(l3_acquire), 0.1)
assert await l2_acquire is False
assert await l3_acquire
await l1.release()
await l3.release()
@gen_cluster(client=True)
async def test_multiple_locks(c, s, a, b):
ext = s.extensions["multi_locks"]
l1 = MultiLock(names=["l1"])
l2 = MultiLock(names=["l2"])
l3 = MultiLock(names=["l1", "l2"])
# Both `l1` and `l2` are free to acquire
assert await l1.acquire()
assert await l2.acquire()
assert list(ext.locks.keys()) == ["l1", "l2"]
assert list(ext.locks.values()) == [[l1.id], [l2.id]]
assert list(ext.requests.keys()) == [l1.id, l2.id]
assert list(ext.requests_left.values()) == [0, 0]
assert not ext.events
# Since `l3` requires both `l1` and `l2`, it isn't available immediately
l3_acquire = asyncio.ensure_future(l3.acquire())
try:
await wait_for(asyncio.shield(l3_acquire), 0.1)
except asyncio.TimeoutError:
assert list(ext.locks.keys()) == ["l1", "l2"]
assert list(ext.locks.values()) == [[l1.id, l3.id], [l2.id, l3.id]]
assert ext.requests[l3.id] == {"l1", "l2"}
assert ext.requests_left[l3.id] == 2
assert l3.id in ext.events
else:
assert False # We except a TimeoutError since `l3` isn't available
# Releasing `l1` isn't enough since `l3` also requires `l2`
await l1.release()
try:
await wait_for(asyncio.shield(l3_acquire), 0.1)
except asyncio.TimeoutError:
# `l3` now only wait on `l2`
assert list(ext.locks.keys()) == ["l1", "l2"]
assert list(ext.locks.values()) == [[l3.id], [l2.id, l3.id]]
assert ext.requests[l3.id] == {"l1", "l2"}
assert ext.requests_left[l3.id] == 1
assert l3.id in ext.events
else:
assert False
# Releasing `l2` should make `l3` available
await l2.release()
assert list(ext.locks.keys()) == ["l1", "l2"]
assert list(ext.locks.values()) == [[l3.id], [l3.id]]
assert ext.requests[l3.id] == {"l1", "l2"}
assert ext.requests_left[l3.id] == 0
await l3.release()
assert not ext.events
assert not ext.requests
assert not ext.requests_left
assert all(len(l) == 0 for l in ext.locks.values())
@gen_cluster(client=True)
async def test_num_locks(c, s, a, b):
ext = s.extensions["multi_locks"]
l1 = MultiLock(names=["l1", "l2", "l3"])
l2 = MultiLock(names=["l1", "l2", "l3"])
l3 = MultiLock(names=["l1", "l2", "l3", "l4"])
# Even though `l1` and `l2` uses the same lock names they
# only requires a subset of the locks
assert await l1.acquire(num_locks=1)
assert await l2.acquire(num_locks=2)
assert list(ext.locks.keys()) == ["l1", "l2", "l3"]
assert list(ext.locks.values()) == [[l1.id], [l2.id], [l2.id]]
assert list(ext.requests.keys()) == [l1.id, l2.id]
assert list(ext.requests_left.values()) == [0, 0]
assert not ext.events
# Since `l3` requires three out of four locks it has to wait
l3_acquire = asyncio.ensure_future(l3.acquire(num_locks=3))
try:
await wait_for(asyncio.shield(l3_acquire), 0.1)
except asyncio.TimeoutError:
assert list(ext.locks.keys()) == ["l1", "l2", "l3", "l4"]
assert list(ext.locks.values()) == [
[l1.id, l3.id],
[l2.id, l3.id],
[l2.id, l3.id],
[l3.id],
]
assert list(ext.requests_left.values()) == [0, 0, 2]
assert l3.id in ext.events
else:
assert False # We except a TimeoutError since `l3` isn't available
# Releasing `l1` isn't enough since `l3` also requires three locks
await l1.release()
try:
await wait_for(asyncio.shield(l3_acquire), 0.1)
except asyncio.TimeoutError:
assert list(ext.locks.keys()) == ["l1", "l2", "l3", "l4"]
assert list(ext.locks.values()) == [
[l3.id],
[l2.id, l3.id],
[l2.id, l3.id],
[l3.id],
]
assert list(ext.requests.keys()) == [l2.id, l3.id]
assert list(ext.requests_left.values()) == [0, 1]
assert l3.id in ext.events
else:
assert False
# Releasing `l2` is enough to release `l3`
await l2.release()
await asyncio.sleep(0.1) # Give `l3` a change to wake up and acquire its locks
assert list(ext.locks.keys()) == ["l1", "l2", "l3", "l4"]
assert list(ext.locks.values()) == [[l3.id], [l3.id], [l3.id], [l3.id]]
assert list(ext.requests.keys()) == [l3.id]
assert list(ext.requests_left.values()) == [0]
assert l3.id not in ext.events
await l3.release()
assert not ext.events
assert not ext.requests
assert not ext.requests_left
assert all(len(l) == 0 for l in ext.locks.values())
|
2,861 |
test binary ew ops
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2023 www.open3d.org
# SPDX-License-Identifier: MIT
# ----------------------------------------------------------------------------
import open3d as o3d
import numpy as np
import pytest
import mltest
import torch
# skip all tests if the tf ops were not built and disable warnings caused by
# tensorflow
pytestmark = mltest.default_marks
# the supported dtypes for the values
dtypes = pytest.mark.parametrize('dtype',
[np.int32, np.int64, np.float32, np.float64])
# this class is only available for torch
@dtypes
@mltest.parametrize.ml_torch_only
def test_creation(dtype, ml):
values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype)
row_splits = np.array([0, 2, 4, 4, 5, 12, 13], dtype=np.int64)
# From numpy arrays
r_tensor = ml.classes.RaggedTensor.from_row_splits(values, row_splits)
for i, tensor in enumerate(r_tensor):
np.testing.assert_equal(mltest.to_numpy(tensor),
values[row_splits[i]:row_splits[i + 1]])
# From List
r_tensor = ml.classes.RaggedTensor.from_row_splits(list(values),
list(row_splits))
for i, tensor in enumerate(r_tensor):
np.testing.assert_equal(mltest.to_numpy(tensor),
values[row_splits[i]:row_splits[i + 1]])
# Incompatible tensors.
# Non zero first element.
row_splits = np.array([1, 2, 4, 4, 5, 12, 13], dtype=np.int64)
with np.testing.assert_raises(RuntimeError):
ml.classes.RaggedTensor.from_row_splits(values, row_splits)
# Rank > 1.
row_splits = np.array([[0, 2, 4, 4, 5, 12, 13]], dtype=np.int64)
with np.testing.assert_raises(RuntimeError):
ml.classes.RaggedTensor.from_row_splits(values, row_splits)
# Not increasing monotonically.
row_splits = np.array([[0, 2, 4, 6, 5, 12, 13]], dtype=np.int64)
with np.testing.assert_raises(RuntimeError):
ml.classes.RaggedTensor.from_row_splits(values, row_splits)
# Wrong dtype.
row_splits = np.array([0, 2, 4, 4, 5, 12, 13], dtype=np.float32)
with np.testing.assert_raises(RuntimeError):
ml.classes.RaggedTensor.from_row_splits(values, row_splits)
# test with more dimensions
@dtypes
@mltest.parametrize.ml_torch_only
def test_creation_more_dims(dtype, ml):
values = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6],
[7, 7], [8, 8], [9, 9], [10, 10], [11, 11], [12, 12]],
dtype=dtype)
row_splits = np.array([0, 2, 4, 4, 5, 12, 13], dtype=np.int64)
# From numpy arrays
r_tensor = ml.classes.RaggedTensor.from_row_splits(values, row_splits)
for i, tensor in enumerate(r_tensor):
np.testing.assert_equal(mltest.to_numpy(tensor),
values[row_splits[i]:row_splits[i + 1]])
# From List
r_tensor = ml.classes.RaggedTensor.from_row_splits(list(values),
list(row_splits))
for i, tensor in enumerate(r_tensor):
np.testing.assert_equal(mltest.to_numpy(tensor),
values[row_splits[i]:row_splits[i + 1]])
@mltest.parametrize.ml_torch_only
def test_backprop(ml):
# Create 3 different RaggedTensors and torch.tensor
t_1 = torch.randn(10, 3, requires_grad=True)
row_splits = torch.tensor([0, 4, 6, 6, 8, 10])
r_1 = ml.classes.RaggedTensor.from_row_splits(t_1.detach().numpy(),
row_splits)
r_1.requires_grad = True
t_2 = torch.randn(10, 3, requires_grad=True)
r_2 = ml.classes.RaggedTensor.from_row_splits(t_2.detach().numpy(),
row_splits)
r_2.requires_grad = True
t_3 = torch.randn(10, 3, requires_grad=True)
r_3 = ml.classes.RaggedTensor.from_row_splits(t_3.detach().numpy(),
row_splits)
r_3.requires_grad = True
r_ans = (r_1 + r_2) * r_3
t_ans = (t_1 + t_2) * t_3
np.testing.assert_equal(mltest.to_numpy(t_ans),
mltest.to_numpy(r_ans.values))
# Compute gradients
t_ans.sum().backward()
r_ans.values.sum().backward()
np.testing.assert_equal(mltest.to_numpy(t_1.grad),
mltest.to_numpy(r_1.values.grad))
@dtypes
@mltest.parametrize.ml_torch_only
def METHOD_NAME(dtype, ml):
# Binary Ops.
t_1 = torch.from_numpy(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtype)).to(ml.device)
t_2 = torch.from_numpy(
np.array([2, 3, 6, 3, 11, 3, 43, 12, 8, 15, 12, 87, 45],
dtype=dtype)).to(ml.device)
row_splits = torch.from_numpy(
np.array([0, 2, 4, 4, 5, 12, 13], dtype=np.int64)).to(ml.device)
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
b = ml.classes.RaggedTensor.from_row_splits(t_2, row_splits)
np.testing.assert_equal(
(a + b).values.cpu().numpy(),
np.array([2, 4, 8, 6, 15, 8, 49, 19, 16, 24, 22, 98, 57]))
np.testing.assert_equal(
(a - b).values.cpu().numpy(),
np.array([-2, -2, -4, 0, -7, 2, -37, -5, 0, -6, -2, -76, -33]))
np.testing.assert_equal(
(a * b).values.cpu().numpy(),
np.array([0, 3, 12, 9, 44, 15, 258, 84, 64, 135, 120, 957, 540]))
np.testing.assert_equal((a / b).values.cpu().numpy(),
(t_1 / t_2).cpu().numpy())
np.testing.assert_equal((a // b).values.cpu().numpy(),
np.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0]))
# Assignment Ops.
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
a += b
np.testing.assert_equal(
a.values.cpu().numpy(),
np.array([2, 4, 8, 6, 15, 8, 49, 19, 16, 24, 22, 98, 57]))
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
a -= b
np.testing.assert_equal(
a.values.cpu().numpy(),
np.array([-2, -2, -4, 0, -7, 2, -37, -5, 0, -6, -2, -76, -33]))
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
a *= b
np.testing.assert_equal(
a.values.cpu().numpy(),
np.array([0, 3, 12, 9, 44, 15, 258, 84, 64, 135, 120, 957, 540]))
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
a //= b
np.testing.assert_equal(a.values.cpu().numpy(),
np.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0]))
# Failure cases with incompatible shape.
# Different row_splits.
row_splits = [0, 4, 5, 13]
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
row_splits = [0, 4, 6, 13]
b = ml.classes.RaggedTensor.from_row_splits(t_2, row_splits)
with np.testing.assert_raises(ValueError):
a + b
with np.testing.assert_raises(ValueError):
a += b
# Different length
row_splits = [0, 4, 5, 13]
a = ml.classes.RaggedTensor.from_row_splits(t_1, row_splits)
row_splits = [0, 4, 13]
b = ml.classes.RaggedTensor.from_row_splits(t_2, row_splits)
with np.testing.assert_raises(ValueError):
a + b
with np.testing.assert_raises(ValueError):
a += b
|
2,862 |
build model
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tqdm import tqdm
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.modeling_roberta import (
RobertaConfig,
RobertaModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.tokenization_roberta import RobertaTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def load_crossencoder(params):
# Init model
crossencoder = CrossEncoderRanker(params)
return crossencoder
class CrossEncoderModule(torch.nn.Module):
def __init__(self, params, tokenizer):
super(CrossEncoderModule, self).__init__()
model_path = params["bert_model"]
if params.get("roberta"):
encoder_model = RobertaModel.from_pretrained(model_path)
else:
encoder_model = BertModel.from_pretrained(model_path)
encoder_model.resize_token_embeddings(len(tokenizer))
self.encoder = BertEncoder(
encoder_model,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = self.encoder.bert_model.config
def forward(
self, token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
):
embedding_ctxt = self.encoder(token_idx_ctxt, segment_idx_ctxt, mask_ctxt)
return embedding_ctxt.squeeze(-1)
class CrossEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(CrossEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
if params.get("roberta"):
self.tokenizer = RobertaTokenizer.from_pretrained(params["bert_model"],)
else:
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
special_tokens_dict = {
"additional_special_tokens": [
ENT_START_TAG,
ENT_END_TAG,
ENT_TITLE_TAG,
],
}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.NULL_IDX = self.tokenizer.pad_token_id
self.START_TOKEN = self.tokenizer.cls_token
self.END_TOKEN = self.tokenizer.sep_token
# init model
self.METHOD_NAME()
if params["path_to_model"] is not None:
self.load_model(params["path_to_model"])
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def save(self, output_dir):
self.save_model(output_dir)
self.tokenizer.save_vocabulary(output_dir)
def METHOD_NAME(self):
self.model = CrossEncoderModule(self.params, self.tokenizer)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def score_candidate(self, text_vecs, context_len):
# Encode contexts first
num_cand = text_vecs.size(1)
text_vecs = text_vecs.view(-1, text_vecs.size(-1))
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX, context_len,
)
embedding_ctxt = self.model(token_idx_ctxt, segment_idx_ctxt, mask_ctxt,)
return embedding_ctxt.view(-1, num_cand)
def forward(self, input_idx, label_input, context_len):
scores = self.score_candidate(input_idx, context_len)
loss = F.cross_entropy(scores, label_input, reduction="mean")
return loss, scores
def to_bert_input(token_idx, null_idx, segment_pos):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
if segment_pos > 0:
segment_idx[:, segment_pos:] = token_idx[:, segment_pos:] > 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
# token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
2,863 |
test categorical1
|
# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers:
# Wenyang Zhou <[email protected]>
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
import jittor.distributions as jd
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
torch = None
skip_this_test = True
class TestOneHot(unittest.TestCase):
def test_presum(self):
a = jt.array([[1,2,3,4]])
b = jd.simple_presum(a)
assert (b.data == [[0,1,3,6,10]]).all()
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_one_hot(self):
a = jd.OneHotCategorical(jt.array([0.25, 0.25, 0.25, 0.25]))
x = a.sample().numpy()
for i in range(1000):
x += a.sample().numpy()
assert (x > 200).all()
y = a.sample([2,3])
y.sync()
assert y.shape == [2,3,4]
probs,probs2 = np.random.uniform(0,1,(10)), np.random.uniform(0,1,(10))
probs,probs2 = probs / probs.sum(),probs2 / probs2.sum()
jc, jc2 = jd.OneHotCategorical(jt.array(probs)),jd.OneHotCategorical(jt.array(probs2))
tc, tc2 = torch.distributions.OneHotCategorical(torch.tensor(probs)),torch.distributions.OneHotCategorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data,tc.entropy().numpy())
x = np.zeros((4,10))
for _ in range(4):
nx = np.random.randint(0,9)
x[_,nx] = 1
np.testing.assert_allclose(jc.log_prob(jt.array(x)),tc.log_prob(torch.tensor(x)), atol=1e-5)
assert np.allclose(jd.kl_divergence(jc,jc2),torch.distributions.kl_divergence(tc,tc2))
def test_cate(self):
a = jd.Categorical(jt.array([0.25, 0.25, 0.25, 0.25]))
x =np.array([0,0,0,0])
for i in range(1000):
x[a.sample().item()]+=1
assert (x > 200).all()
y = a.sample([2,3])
y.sync()
assert y.shape == [2,3]
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_normal(self):
for _ in range(4):
mu = np.random.uniform(-1,1)
sigma = np.random.uniform(0,2)
jn = jd.Normal(mu,sigma)
tn = torch.distributions.Normal(mu,sigma)
assert np.allclose(jn.entropy().data,tn.entropy().numpy())
x = np.random.uniform(-1,1)
np.testing.assert_allclose(jn.log_prob(x),tn.log_prob(torch.tensor(x)))
mu2 = np.random.uniform(-1,1)
sigma2 = np.random.uniform(0,2)
jn2 = jd.Normal(mu2,sigma2)
tn2 = torch.distributions.Normal(mu2,sigma2)
assert np.allclose(jd.kl_divergence(jn,jn2).data,torch.distributions.kl_divergence(tn,tn2).numpy())
@unittest.skipIf(skip_this_test, "No Torch Found")
def METHOD_NAME(self):
for _ in range(4):
probs,probs2 = np.random.uniform(0,1,(10)), np.random.uniform(0,1,(10))
probs,probs2 = probs / probs.sum(),probs2 / probs2.sum()
jc, jc2 = jd.Categorical(jt.array(probs)),jd.Categorical(jt.array(probs2))
tc, tc2 = torch.distributions.Categorical(torch.tensor(probs)),torch.distributions.Categorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data, tc.entropy().numpy()), (jc.entropy().data, tc.entropy().numpy())
x = np.random.randint(0,10,(4))
np.testing.assert_allclose(jc.log_prob(x), tc.log_prob(torch.tensor(x)), atol=1e-5)
assert np.allclose(jd.kl_divergence(jc,jc2),torch.distributions.kl_divergence(tc,tc2))
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_categorical2(self):
def check(prob_shape, sample_shape):
for _ in range(4):
probs,probs2 = np.random.uniform(0,1,prob_shape), np.random.uniform(0,1, prob_shape)
jc, jc2 = jd.Categorical(jt.array(probs)),jd.Categorical(jt.array(probs2))
tc, tc2 = torch.distributions.Categorical(torch.tensor(probs)),torch.distributions.Categorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data, tc.entropy().numpy()), (jc.entropy().data, tc.entropy().numpy())
x1 = jc.sample(sample_shape)
x2 = tc.sample(sample_shape)
assert tuple(x1.shape) == tuple(x2.shape)
x = np.random.randint(0,prob_shape[-1], tuple(x1.shape))
np.testing.assert_allclose(jc.log_prob(x), tc.log_prob(torch.tensor(x)), atol=1e-5)
np.testing.assert_allclose(jd.kl_divergence(jc,jc2), torch.distributions.kl_divergence(tc,tc2), atol=1e-5)
check((10,), (4,))
check((2,3), (4,))
check((3,4,5,6), (2,))
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_one_hot_categorical2(self):
def check(prob_shape, sample_shape):
for _ in range(4):
probs,probs2 = np.random.uniform(0,1,prob_shape), np.random.uniform(0,1, prob_shape)
jc, jc2 = jd.OneHotCategorical(jt.array(probs)),jd.OneHotCategorical(jt.array(probs2))
tc, tc2 = torch.distributions.OneHotCategorical(torch.tensor(probs)),torch.distributions.OneHotCategorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data, tc.entropy().numpy()), (jc.entropy().data, tc.entropy().numpy())
x1 = jc.sample(sample_shape)
x2 = tc.sample(sample_shape)
assert tuple(x1.shape) == tuple(x2.shape)
x = np.random.randint(0,prob_shape[-1], tuple(x1.shape))
np.testing.assert_allclose(jc.log_prob(x), tc.log_prob(torch.tensor(x)), atol=1e-5)
np.testing.assert_allclose(jd.kl_divergence(jc,jc2), torch.distributions.kl_divergence(tc,tc2), atol=1e-5)
check((10,), (4,))
check((2,3), (4,))
check((3,4,5,6), (2,))
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_uniform(self):
for _ in range(4):
low, low2 = np.random.randint(-1,2), np.random.randint(-1,2)
leng, leng2 = np.random.uniform(0,2), np.random.uniform(0,2)
high, high2 = low + leng, low2 + leng2
ju, ju2 = jd.Uniform(low,high),jd.Uniform(low2,high2)
tu, tu2 = torch.distributions.Uniform(low,high),torch.distributions.Uniform(low2,high2)
assert np.allclose(ju.entropy().data,tu.entropy().numpy())
x = np.random.uniform(low,high)
assert np.allclose(ju.log_prob(x),tu.log_prob(torch.tensor(x)))
assert np.allclose(jd.kl_divergence(ju,ju2),torch.distributions.kl_divergence(tu,tu2))
@unittest.skipIf(skip_this_test, "No Torch Found")
def test_geometric(self):
for _ in range(4):
prob, prob2 = np.random.uniform(0,1), np.random.uniform(0,1)
jg, jg2 = jd.Geometric(prob),jd.Geometric(prob2)
tg, tg2 = torch.distributions.Geometric(prob),torch.distributions.Geometric(prob2)
np.testing.assert_allclose(jg.entropy().data,tg.entropy().numpy(), atol=1e-4)
x = np.random.randint(1,10)
np.testing.assert_allclose(jg.log_prob(x),tg.log_prob(torch.tensor(x)), atol=1e-4)
# print(jd.kl_divergence(jg,jg2),torch.distributions.kl_divergence(tg,tg2))
np.testing.assert_allclose(jd.kl_divergence(jg,jg2),torch.distributions.kl_divergence(tg,tg2), atol=1e-4)
if __name__ == "__main__":
unittest.main(
|
2,864 |
test log iteration
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
#
# Development of this module was conducted as part of the Institute for
# the Design of Advanced Energy Systems (IDAES) with support through the
# Simulation-Based Engineering, Crosscutting Research Program within the
# U.S. Department of Energy’s Office of Fossil Energy and Carbon Management.
#
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
from io import StringIO
import sys
import logging
import pyomo.common.unittest as unittest
from pyomo.contrib.trustregion.util import IterationLogger, minIgnoreNone, maxIgnoreNone
from pyomo.common.log import LoggingIntercept
class TestUtil(unittest.TestCase):
def setUp(self):
self.iterLogger = IterationLogger()
self.iteration = 0
self.thetak = 10.0
self.objk = 5.0
self.radius = 1.0
self.stepNorm = 0.25
def tearDown(self):
pass
def test_minIgnoreNone(self):
a = 1
b = 2
self.assertEqual(minIgnoreNone(a, b), a)
a = None
self.assertEqual(minIgnoreNone(a, b), b)
a = 1
b = None
self.assertEqual(minIgnoreNone(a, b), a)
a = None
self.assertEqual(minIgnoreNone(a, b), None)
def test_maxIgnoreNone(self):
a = 1
b = 2
self.assertEqual(maxIgnoreNone(a, b), b)
a = None
self.assertEqual(maxIgnoreNone(a, b), b)
a = 1
b = None
self.assertEqual(maxIgnoreNone(a, b), a)
a = None
self.assertEqual(maxIgnoreNone(a, b), None)
def test_IterationRecord(self):
self.iterLogger.newIteration(
self.iteration, self.thetak, self.objk, self.radius, self.stepNorm
)
self.assertEqual(len(self.iterLogger.iterations), 1)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 5.0)
def METHOD_NAME(self):
self.iterLogger.newIteration(
self.iteration, self.thetak, self.objk, self.radius, self.stepNorm
)
OUTPUT = StringIO()
with LoggingIntercept(OUTPUT, 'pyomo.contrib.trustregion', logging.INFO):
self.iterLogger.logIteration()
self.assertIn('Iteration 0', OUTPUT.getvalue())
self.assertIn('feasibility =', OUTPUT.getvalue())
self.assertIn('stepNorm =', OUTPUT.getvalue())
def test_updateIteration(self):
self.iterLogger.newIteration(
self.iteration, self.thetak, self.objk, self.radius, self.stepNorm
)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, self.objk)
self.assertEqual(self.iterLogger.iterations[0].feasibility, self.thetak)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, self.radius)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, self.stepNorm)
self.iterLogger.updateIteration(feasibility=5.0)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, self.objk)
self.assertEqual(self.iterLogger.iterations[0].feasibility, 5.0)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, self.radius)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, self.stepNorm)
self.iterLogger.updateIteration(objectiveValue=0.1)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 0.1)
self.assertEqual(self.iterLogger.iterations[0].feasibility, 5.0)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, self.radius)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, self.stepNorm)
self.iterLogger.updateIteration(trustRadius=100)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 0.1)
self.assertEqual(self.iterLogger.iterations[0].feasibility, 5.0)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, 100)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, self.stepNorm)
self.iterLogger.updateIteration(stepNorm=1)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 0.1)
self.assertEqual(self.iterLogger.iterations[0].feasibility, 5.0)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, 100)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, 1)
self.iterLogger.updateIteration(
feasibility=10.0, objectiveValue=0.2, trustRadius=1000, stepNorm=10
)
self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 0.2)
self.assertEqual(self.iterLogger.iterations[0].feasibility, 10.0)
self.assertEqual(self.iterLogger.iterations[0].trustRadius, 1000)
self.assertEqual(self.iterLogger.iterations[0].stepNorm, 10)
def test_printIteration(self):
self.iterLogger.newIteration(
self.iteration, self.thetak, self.objk, self.radius, self.stepNorm
)
OUTPUT = StringIO()
sys.stdout = OUTPUT
self.iterLogger.printIteration()
sys.stdout = sys.__stdout__
self.assertIn(str(self.radius), OUTPUT.getvalue())
self.assertIn(str(self.iteration), OUTPUT.getvalue())
self.assertIn(str(self.thetak), OUTPUT.getvalue())
self.assertIn(str(self.objk), OUTPUT.getvalue())
self.assertIn(str(self.stepNorm), OUTPUT.getvalue())
|
2,865 |
prepare request
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2015-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.EventHub/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.eventhub.v2015_08_01.EventHubManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Event Hub REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventhub.v2015_08_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2015-08-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.EventHub/operations"}
|
2,866 |
critical
|
"""
This type stub file was generated by pyright.
"""
"""
Logger implementation loosely modeled on PEP 282. We don't use the
PEP 282 logger implementation in the stdlib ('logging') because it's
idiosyncratic and a bit slow for our purposes (we don't use threads).
"""
class LevelsByName:
CRIT = ...
ERRO = ...
WARN = ...
INFO = ...
DEBG = ...
TRAC = ...
BLAT = ...
class LevelsByDescription:
METHOD_NAME = ...
error = ...
warn = ...
info = ...
debug = ...
trace = ...
blather = ...
LOG_LEVELS_BY_NUM = ...
def getLevelNumByDescription(description): # -> Any | None:
...
class Handler:
fmt = ...
level = ...
def __init__(self, stream=...) -> None:
...
def setFormat(self, fmt): # -> None:
...
def setLevel(self, level): # -> None:
...
def flush(self): # -> None:
...
def close(self): # -> None:
...
def emit(self, record): # -> None:
...
def handleError(self): # -> None:
...
class StreamHandler(Handler):
def __init__(self, strm=...) -> None:
...
def remove(self): # -> None:
...
def reopen(self): # -> None:
...
class BoundIO:
def __init__(self, maxbytes, buf=...) -> None:
...
def flush(self): # -> None:
...
def close(self): # -> None:
...
def write(self, b): # -> None:
...
def getvalue(self): # -> Unknown | bytes:
...
def clear(self): # -> None:
...
class FileHandler(Handler):
"""File handler which supports reopening of logs.
"""
def __init__(self, filename, mode=...) -> None:
...
def reopen(self): # -> None:
...
def remove(self): # -> None:
...
class RotatingFileHandler(FileHandler):
def __init__(self, filename, mode=..., maxBytes=..., backupCount=...) -> None:
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
...
def emit(self, record): # -> None:
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
...
def removeAndRename(self, sfn, dfn): # -> None:
...
def doRollover(self): # -> None:
"""
Do a rollover, as described in __init__().
"""
...
class LogRecord:
def __init__(self, level, msg, **kw) -> None:
...
def asdict(self): # -> dict[str, str | Unknown]:
...
class Logger:
def __init__(self, level=..., handlers=...) -> None:
...
def close(self): # -> None:
...
def blather(self, msg, **kw): # -> None:
...
def trace(self, msg, **kw): # -> None:
...
def debug(self, msg, **kw): # -> None:
...
def info(self, msg, **kw): # -> None:
...
def warn(self, msg, **kw): # -> None:
...
def error(self, msg, **kw): # -> None:
...
def METHOD_NAME(self, msg, **kw): # -> None:
...
def log(self, level, msg, **kw): # -> None:
...
def addHandler(self, hdlr): # -> None:
...
def getvalue(self):
...
class SyslogHandler(Handler):
def __init__(self) -> None:
...
def close(self): # -> None:
...
def reopen(self): # -> None:
...
def emit(self, record): # -> None:
...
def getLogger(level=...): # -> Logger:
...
_2MB = ...
def handle_boundIO(logger, fmt, maxbytes=...): # -> None:
"""Attach a new BoundIO handler to an existing Logger"""
...
def handle_stdout(logger, fmt): # -> None:
"""Attach a new StreamHandler with stdout handler to an existing Logger"""
...
def handle_syslog(logger, fmt): # -> None:
"""Attach a new Syslog handler to an existing Logger"""
...
def handle_file(logger, filename, fmt, rotating=..., maxbytes=..., backups=...): # -> None:
"""Attach a new file handler to an existing Logger. If the filename
is the magic name of 'syslog' then make it a syslog handler instead."""
...
|
2,867 |
convert test
|
# -*- coding: utf-8 -*-
# flake8: noqa T001
import argparse
import os
import glob
from ruamel.yaml.comments import CommentedSeq
from openfisca_core.scripts import (
add_tax_benefit_system_arguments,
build_tax_benefit_system,
)
from ruamel.yaml import YAML
yaml = YAML()
yaml.default_flow_style = False
yaml.width = 4096
TEST_METADATA = {
"period",
"name",
"reforms",
"only_variables",
"ignore_variables",
"absolute_error_margin",
"relative_error_margin",
"description",
"keywords",
}
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"path", help="paths (files or directories) of tests to execute", nargs="+"
)
parser = add_tax_benefit_system_arguments(parser)
return parser
class Migrator(object):
def __init__(self, tax_benefit_system):
self.tax_benefit_system = tax_benefit_system
self.entities_by_plural = {
entity.plural: entity for entity in self.tax_benefit_system.entities
}
def migrate(self, path):
if isinstance(path, list):
for item in path:
self.migrate(item)
return
if os.path.isdir(path):
yaml_paths = glob.glob(os.path.join(path, "*.yaml"))
subdirectories = glob.glob(os.path.join(path, "*/"))
for yaml_path in yaml_paths:
self.migrate(yaml_path)
for subdirectory in subdirectories:
self.migrate(subdirectory)
return
print("Migrating {}.".format(path))
with open(path) as yaml_file:
tests = yaml.safe_load(yaml_file)
if isinstance(tests, CommentedSeq):
migrated_tests = [self.METHOD_NAME(test) for test in tests]
else:
migrated_tests = self.METHOD_NAME(tests)
with open(path, "w") as yaml_file:
yaml.dump(migrated_tests, yaml_file)
def METHOD_NAME(self, test):
if test.get("output"):
# This test is already converted, ignoring it
return test
result = {}
outputs = test.pop("output_variables")
inputs = test.pop("input_variables", {})
for key, value in test.items():
if key in TEST_METADATA:
result[key] = value
else:
inputs[key] = value
result["input"] = self.convert_inputs(inputs)
result["output"] = outputs
return result
def convert_inputs(self, inputs):
first_key = next(iter(inputs.keys()), None)
if first_key not in self.entities_by_plural:
return inputs
results = {}
for entity_plural, entities_description in inputs.items():
entity = self.entities_by_plural[entity_plural]
if not isinstance(entities_description, (CommentedSeq, list)):
entities_description = [entities_description]
if not entity.is_person and len(entities_description) == 1:
results[entity.key] = remove_id(entities_description[0])
continue
results[entity_plural] = self.convert_entities(entity, entities_description)
results = self.generate_missing_entities(results)
return results
def convert_entities(self, entity, entities_description):
return {
entity_description.get("id", "{}_{}".format(entity.key, index)): remove_id(
entity_description
)
for index, entity_description in enumerate(entities_description)
}
def generate_missing_entities(self, inputs):
for entity in self.tax_benefit_system.entities:
if entity.plural in inputs or entity.key in inputs:
continue
persons = inputs[self.tax_benefit_system.person_entity.plural]
if len(persons) == 1:
person_id = next(iter(persons))
inputs[entity.key] = {
entity.roles[0].plural or entity.roles[0].key: [person_id]
}
else:
inputs[entity.plural] = {
"{}_{}".format(entity.key, index): {
entity.roles[0].plural or entity.roles[0].key: [person_id]
}
for index, person_id in enumerate(persons.keys())
}
return inputs
def remove_id(input_dict):
return {key: value for (key, value) in input_dict.items() if key != "id"}
def main():
parser = build_parser()
args = parser.parse_args()
paths = [os.path.abspath(path) for path in args.path]
tax_benefit_system = build_tax_benefit_system(
args.country_package, args.extensions, args.reforms
)
Migrator(tax_benefit_system).migrate(paths)
if __name__ == "__main__":
main()
|
2,868 |
get private link scope
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateLinkScopeResult',
'AwaitableGetPrivateLinkScopeResult',
'get_private_link_scope',
'get_private_link_scope_output',
]
@pulumi.output_type
class GetPrivateLinkScopeResult:
"""
An Azure Arc PrivateLinkScope definition.
"""
def __init__(__self__, id=None, location=None, name=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
"""
List of private endpoint connections.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Current state of this PrivateLinkScope: whether or not is has been provisioned within the resource group it is defined. Users cannot change this value but are able to read from it. Values will include Provisioning ,Succeeded, Canceled and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Indicates whether machines associated with the private link scope can also use public Azure Arc service endpoints.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkScopeResult(GetPrivateLinkScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkScopeResult(
id=self.id,
location=self.location,
name=self.name,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
tags=self.tags,
type=self.type)
def METHOD_NAME(resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkScopeResult:
"""
Returns a Azure Arc PrivateLinkScope.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybridcompute/v20200815preview:getPrivateLinkScope', __args__, opts=opts, typ=GetPrivateLinkScopeResult).value
return AwaitableGetPrivateLinkScopeResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
private_endpoint_connections=pulumi.get(__ret__, 'private_endpoint_connections'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_network_access=pulumi.get(__ret__, 'public_network_access'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_private_link_scope_output(resource_group_name: Optional[pulumi.Input[str]] = None,
scope_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkScopeResult]:
"""
Returns a Azure Arc PrivateLinkScope.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
...
|
2,869 |
build sep conv
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import (
AveragePooling2D,
BatchNormalization,
Conv2D,
Dense,
Dropout,
GlobalAveragePooling2D,
MaxPool2D,
ReLU,
SeparableConv2D,
)
from nni.nas.tensorflow.mutables import InputChoice, LayerChoice, MutableScope
def build_conv_1x1(filters, name=None):
return Sequential([
Conv2D(filters, kernel_size=1, use_bias=False),
BatchNormalization(trainable=False),
ReLU(),
], name)
def METHOD_NAME(filters, kernel_size, name=None):
return Sequential([
ReLU(),
SeparableConv2D(filters, kernel_size, padding='same'),
BatchNormalization(trainable=True),
], name)
class FactorizedReduce(Model):
def __init__(self, filters):
super().__init__()
self.conv1 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False)
self.conv2 = Conv2D(filters // 2, kernel_size=1, strides=2, use_bias=False)
self.bn = BatchNormalization(trainable=False)
def call(self, x):
out1 = self.conv1(x)
out2 = self.conv2(x[:, 1:, 1:, :])
out = tf.concat([out1, out2], axis=3)
out = self.bn(out)
return out
class ReductionLayer(Model):
def __init__(self, filters):
super().__init__()
self.reduce0 = FactorizedReduce(filters)
self.reduce1 = FactorizedReduce(filters)
def call(self, prevprev, prev):
return self.reduce0(prevprev), self.reduce1(prev)
class Calibration(Model):
def __init__(self, filters):
super().__init__()
self.filters = filters
self.process = None
def build(self, shape):
assert len(shape) == 4 # batch_size, width, height, filters
if shape[3] != self.filters:
self.process = build_conv_1x1(self.filters)
def call(self, x):
if self.process is None:
return x
return self.process(x)
class Cell(Model):
def __init__(self, cell_name, prev_labels, filters):
super().__init__()
self.input_choice = InputChoice(choose_from=prev_labels, n_chosen=1, return_mask=True, key=cell_name + '_input')
self.op_choice = LayerChoice([
METHOD_NAME(filters, 3),
METHOD_NAME(filters, 5),
AveragePooling2D(pool_size=3, strides=1, padding='same'),
MaxPool2D(pool_size=3, strides=1, padding='same'),
Sequential(), # Identity
], key=cell_name + '_op')
def call(self, prev_layers):
chosen_input, chosen_mask = self.input_choice(prev_layers)
cell_out = self.op_choice(chosen_input)
return cell_out, chosen_mask
class Node(MutableScope):
def __init__(self, node_name, prev_node_names, filters):
super().__init__(node_name)
self.cell_x = Cell(node_name + '_x', prev_node_names, filters)
self.cell_y = Cell(node_name + '_y', prev_node_names, filters)
def call(self, prev_layers):
out_x, mask_x = self.cell_x(prev_layers)
out_y, mask_y = self.cell_y(prev_layers)
return out_x + out_y, mask_x | mask_y
class ENASLayer(Model):
def __init__(self, num_nodes, filters, reduction):
super().__init__()
self.preproc0 = Calibration(filters)
self.preproc1 = Calibration(filters)
self.nodes = []
node_labels = [InputChoice.NO_KEY, InputChoice.NO_KEY]
name_prefix = 'reduce' if reduction else 'normal'
for i in range(num_nodes):
node_labels.append('{}_node_{}'.format(name_prefix, i))
self.nodes.append(Node(node_labels[-1], node_labels[:-1], filters))
self.conv_ops = [Conv2D(filters, kernel_size=1, padding='same', use_bias=False) for _ in range(num_nodes + 2)]
self.bn = BatchNormalization(trainable=False)
def call(self, prevprev, prev):
prev_nodes_out = [self.preproc0(prevprev), self.preproc1(prev)]
nodes_used_mask = tf.zeros(len(self.nodes) + 2, dtype=tf.bool)
for i, node in enumerate(self.nodes):
node_out, mask = node(prev_nodes_out)
nodes_used_mask |= tf.pad(mask, [[0, nodes_used_mask.shape[0] - mask.shape[0]]])
prev_nodes_out.append(node_out)
outputs = []
for used, out, conv in zip(nodes_used_mask.numpy(), prev_nodes_out, self.conv_ops):
if not used:
outputs.append(conv(out))
out = tf.add_n(outputs)
return prev, self.bn(out)
class MicroNetwork(Model):
def __init__(self, num_layers=6, num_nodes=5, out_channels=20, num_classes=10, dropout_rate=0.1):
super().__init__()
self.num_layers = num_layers
self.stem = Sequential([
Conv2D(out_channels * 3, kernel_size=3, padding='same', use_bias=False),
BatchNormalization(),
])
pool_distance = num_layers // 3
pool_layer_indices = [pool_distance, 2 * pool_distance + 1]
self.enas_layers = []
filters = out_channels
for i in range(num_layers + 2):
if i in pool_layer_indices:
reduction = True
filters *= 2
self.enas_layers.append(ReductionLayer(filters))
else:
reduction = False
self.enas_layers.append(ENASLayer(num_nodes, filters, reduction))
self.gap = GlobalAveragePooling2D()
self.dropout = Dropout(dropout_rate)
self.dense = Dense(num_classes)
def call(self, x):
prev = cur = self.stem(x)
for layer in self.enas_layers:
prev, cur = layer(prev, cur)
cur = tf.keras.activations.relu(cur)
cur = self.gap(cur)
cur = self.dropout(cur)
logits = self.dense(cur)
return logits
|
2,870 |
add
|
import datetime
from pathlib import Path
from sqlalchemy import Column, event, Integer, String
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.sql.sqltypes import DateTime, JSON
from pbench.server.database.database import Database
from pbench.server.database.models import decode_sql_error
class TemplateError(Exception):
"""A base class for errors reported by the Template class.
It is never raised directly, but may be used in "except" clauses.
"""
pass
class TemplateSqlError(TemplateError):
"""SQLAlchemy errors reported through Template operations.
The exception will identify the base name of the template index, along
with the operation being attempted; the __cause__ will specify the
original SQLAlchemy exception.
"""
def __init__(self, cause: Exception, **kwargs):
super().__init__(
f"Error on {kwargs.get('operation')} index {kwargs.get('name')!r}: '{cause}'"
)
self.cause = cause
self.kwargs = kwargs
class TemplateFileMissing(TemplateError):
"""Template requires a file name."""
def __init__(self, name: str):
super().__init__(f"Template {name!r} is missing required file")
self.name = name
class TemplateNotFound(TemplateError):
"""Attempt to find a Template that doesn't exist."""
def __init__(self, name: str):
super().__init__(
f"Document template for index {name!r} not found in the database"
)
self.name = name
class TemplateDuplicate(TemplateError):
"""Attempt to commit a duplicate Template."""
def __init__(self, cause: Exception, **kwargs):
super().__init__(f"Duplicate template {kwargs.get('name')!r}: '{cause}'")
self.cause = cause
self.kwargs = kwargs
class TemplateMissingParameter(TemplateError):
"""Attempt to commit a Template with missing parameters."""
def __init__(self, cause: Exception, **kwargs):
super().__init__(
f"Missing required parameters in {kwargs.get('name')!r}: '{cause}'"
)
self.cause = cause
self.kwargs = kwargs
class Template(Database.Base):
"""Identify a Pbench Elasticsearch document template.
Columns:
id Generated unique ID of table row
name Index name key (e.g., "fio")
idxname Base index name (e.g., "tool-data-fio")
template_name The Elasticsearch template name
file The source JSON mapping file
mtime Template file modification timestamp
template_pattern The template for the Elasticsearch index name
index_template The full index name template "p.v.i.y-m[-d]"
settings The JSON settings payload
mappings The JSON mappings payload
version The template version metadata
"""
__tablename__ = "templates"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255), unique=True, nullable=False)
idxname = Column(String(255), unique=True, nullable=False)
template_name = Column(String(255), unique=True, nullable=False)
file = Column(String(255), unique=False, nullable=False)
mtime = Column(DateTime, unique=False, nullable=False)
template_pattern = Column(String(255), unique=False, nullable=False)
index_template = Column(String(225), unique=False, nullable=False)
settings = Column(JSON, unique=False, nullable=False)
mappings = Column(JSON, unique=False, nullable=False)
version = Column(String(255), unique=False, nullable=False)
@staticmethod
def create(**kwargs) -> "Template":
"""A simple factory method to construct a new Template object and
add it to the database.
Args:
kwargs : any of the column names defined above
Returns:
A new Template object initialized with the keyword parameters.
"""
template = Template(**kwargs)
template.METHOD_NAME()
return template
@staticmethod
def find(name: str) -> "Template":
"""Return a Template object with the specified base name.
For example, find("run-data").
Args:
name : Base index name
Raises:
TemplateSqlError : problem interacting with Database
TemplateNotFound : the specified template doesn't exist
Returns:
Template : a template object with the specified base name
"""
try:
template = Database.db_session.query(Template).filter_by(name=name).first()
except SQLAlchemyError as e:
raise TemplateSqlError(e, operation="find", name=name)
if template is None:
raise TemplateNotFound(name)
return template
def __str__(self) -> str:
"""Return a string representation of the template.
Returns:
A string representation of the template.
"""
return f"{self.name}: {self.index_template}"
def METHOD_NAME(self):
"""Add the Template object to the database."""
try:
Database.db_session.METHOD_NAME(self)
Database.db_session.commit()
except Exception as e:
Database.db_session.rollback()
raise decode_sql_error(
e,
on_duplicate=TemplateDuplicate,
on_null=TemplateMissingParameter,
fallback=TemplateSqlError,
operation="add",
name=self.name,
) from e
def update(self):
"""Update the database row with the modified version of the
Template object.
"""
try:
Database.db_session.commit()
except Exception as e:
Database.db_session.rollback()
raise decode_sql_error(
e,
on_duplicate=TemplateDuplicate,
on_null=TemplateMissingParameter,
fallback=TemplateSqlError,
operation="update",
name=self.name,
) from e
@event.listens_for(Template, "init")
def check_required(target, args, kwargs):
"""Listen for an init event on Template to validate that a filename was
specified.
Automatically capture the file's modification timestamp if it wasn't given.
"""
if "file" not in kwargs:
raise TemplateFileMissing(kwargs["name"])
if "mtime" not in kwargs:
kwargs["mtime"] = datetime.datetime.fromtimestamp(
Path(kwargs["file"]).stat().st_mtime
)
|
2,871 |
makedirs
|
# type: ignore
import posixpath
import shutil
from pathlib import Path
from unittest.mock import patch
import fsspec
import pytest
from fsspec.implementations.local import (
AbstractFileSystem,
LocalFileSystem,
stringify_path,
)
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def METHOD_NAME(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.METHOD_NAME(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] # noqa: E203
else:
return [name[len(self.local_root_dir) :] for name in out] # noqa: E203
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :] # noqa: E203
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
fsspec.register_implementation("tmp", TmpDirFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
shutil.rmtree(tmp_fs_dir)
|
2,872 |
build arguments schema
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"communication link-notification-hub",
)
class LinkNotificationHub(AAZCommand):
"""Links an Azure Notification Hub to this communication service.
:example: Create a communication resource
az communication link-notification-hub -n ResourceName -g ResourceGroup --resource-id /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testResource/providers/Microsoft.NotificationHubs/namespaces/my-hub/NotificationHubs/testHub
"""
_aaz_info = {
"version": "2023-04-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.communication/communicationservices/{}/linknotificationhub", "2023-04-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the CommunicationService resource.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[-\w]+$",
max_length=63,
min_length=1,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
# define Arg Group "LinkNotificationHubParameters"
_args_schema = cls._args_schema
_args_schema.connection_string = AAZStrArg(
options=["--connection-string"],
arg_group="LinkNotificationHubParameters",
help="Connection string for the notification hub",
)
_args_schema.resource_id = AAZStrArg(
options=["--resource-id"],
arg_group="LinkNotificationHubParameters",
help="The resource ID of the notification hub",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.CommunicationServicesLinkNotificationHub(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class CommunicationServicesLinkNotificationHub(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/communicationServices/{communicationServiceName}/linkNotificationHub",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"communicationServiceName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"client_flatten": True}}
)
_builder.set_prop("connectionString", AAZStrType, ".connection_string", typ_kwargs={"flags": {"required": True}})
_builder.set_prop("resourceId", AAZStrType, ".resource_id", typ_kwargs={"flags": {"required": True}})
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.resource_id = AAZStrType(
serialized_name="resourceId",
)
return cls._schema_on_200
class _LinkNotificationHubHelper:
"""Helper class for LinkNotificationHub"""
__all__ = ["LinkNotificationHub"]
|
2,873 |
inject or
|
"""Injection context implementation."""
from collections import namedtuple
import copy
from typing import Mapping, Optional, Type
from .base import BaseInjector, InjectionError
from .injector import Injector, InjectType
from .settings import Settings
Scope = namedtuple("Scope", "name injector")
class InjectionContextError(InjectionError):
"""Base class for issues in the injection context."""
class InjectionContext(BaseInjector):
"""Manager for configuration settings and class providers."""
ROOT_SCOPE = "application"
def __init__(
self, *, settings: Mapping[str, object] = None, enforce_typing: bool = True
):
"""Initialize a `ServiceConfig`."""
self._injector = Injector(settings, enforce_typing=enforce_typing)
self._scope_name = InjectionContext.ROOT_SCOPE
self._scopes = []
@property
def injector(self) -> Injector:
"""Accessor for scope-specific injector."""
return self._injector
@injector.setter
def injector(self, injector: Injector):
"""Setter for scope-specific injector."""
self._injector = injector
@property
def scope_name(self) -> str:
"""Accessor for the current scope name."""
return self._scope_name
@scope_name.setter
def scope_name(self, scope_name: str):
"""Accessor for the current scope name."""
self._scope_name = scope_name
@property
def settings(self) -> Settings:
"""Accessor for scope-specific settings."""
return self.injector.settings
@settings.setter
def settings(self, settings: Settings):
"""Setter for scope-specific settings."""
self.injector.settings = settings
def update_settings(self, settings: Mapping[str, object]):
"""Update the scope with additional settings."""
if settings:
self.injector.settings.update(settings)
def start_scope(
self, scope_name: str, settings: Mapping[str, object] = None
) -> "InjectionContext":
"""Begin a new named scope.
Args:
scope_name: The unique name for the scope being entered
settings: An optional mapping of additional settings to apply
Returns:
A new injection context representing the scope
"""
if not scope_name:
raise InjectionContextError("Scope name must be non-empty")
if self._scope_name == scope_name:
raise InjectionContextError("Cannot re-enter scope: {}".format(scope_name))
for scope in self._scopes:
if scope.name == scope_name:
raise InjectionContextError(
"Cannot re-enter scope: {}".format(scope_name)
)
result = self.copy()
result._scopes.append(Scope(name=self.scope_name, injector=self.injector))
result._scope_name = scope_name
if settings:
result.update_settings(settings)
return result
def injector_for_scope(self, scope_name: str) -> Injector:
"""Fetch the injector for a specific scope.
Args:
scope_name: The unique scope identifier
"""
if scope_name == self.scope_name:
return self.injector
for scope in self._scopes:
if scope.name == scope_name:
return scope.injector
return None
def inject(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
) -> InjectType:
"""Get the provided instance of a given class identifier.
Args:
cls: The base class to retrieve an instance of
settings: An optional mapping providing configuration to the provider
Returns:
An instance of the base class, or None
"""
return self.injector.inject(base_cls, settings)
def METHOD_NAME(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
default: Optional[InjectType] = None,
) -> Optional[InjectType]:
"""Get the provided instance of a given class identifier or default if not found.
Args:
base_cls: The base class to retrieve an instance of
settings: An optional dict providing configuration to the provider
default: default return value if no instance is found
Returns:
An instance of the base class, or None
"""
return self.injector.METHOD_NAME(base_cls, settings, default)
def copy(self) -> "InjectionContext":
"""Produce a copy of the injector instance."""
result = copy.copy(self)
result._injector = self.injector.copy()
result._scopes = self._scopes.copy()
return result
|
2,874 |
test 1d
|
import unittest
import numpy
import cupy
from cupy import testing
from cupy.lib import stride_tricks
import pytest
class TestAsStrided(unittest.TestCase):
def test_as_strided(self):
a = cupy.array([1, 2, 3, 4])
a_view = stride_tricks.as_strided(
a, shape=(2,), strides=(2 * a.itemsize,))
expected = cupy.array([1, 3])
testing.assert_array_equal(a_view, expected)
a = cupy.array([1, 2, 3, 4])
a_view = stride_tricks.as_strided(
a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = cupy.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
testing.assert_array_equal(a_view, expected)
@testing.numpy_cupy_array_equal()
def test_rolling_window(self, xp):
a = testing.shaped_arange((3, 4), xp)
a_rolling = rolling_window(a, 2, 0)
return a_rolling
class TestSlidingWindowView(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def METHOD_NAME(self, xp):
arr = testing.shaped_arange((3, 4), xp)
window_size = 2
arr_view = xp.lib.stride_tricks.sliding_window_view(
arr, window_size, 0)
assert arr_view.strides == (16, 4, 16)
return arr_view
@testing.numpy_cupy_array_equal()
def test_2d(self, xp):
arr = testing.shaped_arange((3, 4), xp)
window_shape = (2, 2)
arr_view = xp.lib.stride_tricks.sliding_window_view(
arr, window_shape=window_shape
)
assert arr_view.strides == (16, 4, 16, 4)
return arr_view
@testing.numpy_cupy_array_equal()
def test_2d_with_axis(self, xp):
arr = testing.shaped_arange((3, 4), xp)
window_shape = 3
axis = 1
arr_view = xp.lib.stride_tricks.sliding_window_view(
arr, window_shape, axis)
assert arr_view.strides == (16, 4, 4)
return arr_view
@testing.numpy_cupy_array_equal()
def test_2d_multi_axis(self, xp):
arr = testing.shaped_arange((3, 4), xp)
window_shape = (2, 3)
axis = (0, 1)
arr_view = xp.lib.stride_tricks.sliding_window_view(
arr, window_shape, axis)
assert arr_view.strides == (16, 4, 16, 4)
return arr_view
def test_0d(self):
for xp in (numpy, cupy):
# Create a 0-D array (scalar) for testing
arr = xp.array(42)
# Sliding window with window size 1
window_size = 1
# Test if the correct ValueError is raised!
with pytest.raises(ValueError, match="axis 0 is out of bounds"):
xp.lib.stride_tricks.sliding_window_view(arr, window_size, 0)
def test_window_shape_axis_length_mismatch(self):
for xp in (numpy, cupy):
x = xp.arange(24).reshape((2, 3, 4))
window_shape = (2, 2)
axis = None
# Test if ValueError is raised when len(window_shape) != len(axis)
with pytest.raises(ValueError, match="Since axis is `None`"):
xp.lib.stride_tricks.sliding_window_view(x, window_shape, axis)
@testing.numpy_cupy_array_equal()
def test_arraylike_input(self, xp):
x = [0., 1., 2., 3., 4.]
arr_view = xp.lib.stride_tricks.sliding_window_view(x, 2)
assert arr_view.strides == (8, 8)
return arr_view
def test_writeable_views_not_supported(self):
x = cupy.arange(24).reshape((2, 3, 4))
window_shape = (2, 2)
axis = None
writeable = True
with self.assertRaises(NotImplementedError):
stride_tricks.sliding_window_view(
x, window_shape, axis, writeable=writeable
)
def rolling_window(a, window, axis=-1):
"""
Make an ndarray with a rolling window along axis.
This function is taken from https://github.com/numpy/numpy/pull/31
but slightly modified to accept axis option.
"""
a = numpy.swapaxes(a, axis, -1)
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
if isinstance(a, numpy.ndarray):
rolling = numpy.lib.stride_tricks.as_strided(
a, shape=shape, strides=strides)
elif isinstance(a, cupy.ndarray):
rolling = stride_tricks.as_strided(a, shape=shape, strides=strides)
return rolling.swapaxes(-2, axis)
|
2,875 |
check file
|
from __future__ import annotations
import argparse
import concurrent.futures
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from typing import Any, BinaryIO, List, NamedTuple, Optional, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: str
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
SYNTAX_ERROR_ARROW_RE: Pattern[str] = re.compile(
r"(?m)^( +--> )(.+)(:(?P<line>\d+):(?P<column>\d+))\n"
)
SYNTAX_ERROR_PARSE_RE: Pattern[str] = re.compile(r"(?m)^failed to parse .*\n")
def strip_path_from_error(error: str) -> str:
# Remove full paths from the description to have deterministic messages.
error = SYNTAX_ERROR_ARROW_RE.sub("", error, count=1)
error = SYNTAX_ERROR_PARSE_RE.sub("", error, count=1)
return error
def run_command(
args: list[str],
*,
stdin: BinaryIO | None = None,
check: bool = False,
) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
shell=False,
stdin=stdin,
check=check,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def METHOD_NAME(
filename: str,
binary: str,
config_path: str,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
with open(filename, "rb") as f:
proc = run_command(
[
binary,
"--config-path",
config_path,
"--emit=stdout",
"--quiet",
],
stdin=f,
check=True,
)
except (OSError, subprocess.CalledProcessError) as err:
# https://github.com/rust-lang/rustfmt#running
# TODO: Fix the syntax error regexp to handle multiple issues and
# to handle the empty result case.
if (
isinstance(err, subprocess.CalledProcessError)
and err.returncode == 1
and err.stderr
):
line = None
char = None
description = err.stderr.decode("utf-8")
match = SYNTAX_ERROR_ARROW_RE.search(description)
if match:
line = int(match["line"])
char = int(match["column"])
description = strip_path_from_error(description)
return [
LintMessage(
path=filename,
line=line,
char=char,
code="RUSTFMT",
severity=LintSeverity.ERROR,
name="parsing-error",
original=None,
replacement=None,
description=description,
)
]
return [
LintMessage(
path=filename,
line=None,
char=None,
code="RUSTFMT",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
if proc.stderr.startswith(b"error: "):
clean_err = strip_path_from_error(proc.stderr.decode("utf-8")).strip()
return [
LintMessage(
path=filename,
line=None,
char=None,
code="RUSTFMT",
severity=LintSeverity.WARNING,
name="rustfmt-bug",
original=None,
replacement=None,
description=(
"Possible rustfmt bug. "
"rustfmt returned error output but didn't fail:\n{}"
).format(clean_err),
)
]
return [
LintMessage(
path=filename,
line=1,
char=1,
code="RUSTFMT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="See https://github.com/rust-lang/rustfmt#tips",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format rust files with rustfmt.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="rustfmt binary path",
)
parser.add_argument(
"--config-path",
required=True,
help="rustfmt config path",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(METHOD_NAME, x, args.binary, args.config_path): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
2,876 |
app
|
import io
import json
import uuid
from contextlib import redirect_stdout
from datetime import datetime
from pathlib import Path
import boltons.urlutils
import jwt
import pytest
import yaml
from decouple import config
from psycopg2 import IntegrityError
from sqlalchemy.exc import ProgrammingError
from ereuse_devicehub import ereuse_utils
from ereuse_devicehub.api.views import api
from ereuse_devicehub.client import Client, UserClient, UserClientFlask
from ereuse_devicehub.config import DevicehubConfig
from ereuse_devicehub.db import db
from ereuse_devicehub.devicehub import Devicehub
from ereuse_devicehub.inventory.views import devices
from ereuse_devicehub.labels.views import labels
from ereuse_devicehub.mail.flask_mail import Mail
from ereuse_devicehub.resources.agent.models import Person
from ereuse_devicehub.resources.enums import SessionType
from ereuse_devicehub.resources.tag import Tag
from ereuse_devicehub.resources.user.models import Session, User
from ereuse_devicehub.views import core
from ereuse_devicehub.workbench.views import workbench
STARTT = datetime(year=2000, month=1, day=1, hour=1)
"""A dummy starting time to use in tests."""
ENDT = datetime(year=2000, month=1, day=1, hour=2)
"""A dummy ending time to use in tests."""
T = {'start_time': STARTT, 'end_time': ENDT}
"""A dummy start_time/end_time to use as function keywords."""
P = config('JWT_PASS', '')
class TestConfig(DevicehubConfig):
SQLALCHEMY_DATABASE_URI = 'postgresql://dhub:ereuse@localhost/dh_test'
TESTING = True
SERVER_NAME = 'localhost'
TMP_SNAPSHOTS = '/tmp/snapshots'
TMP_LIVES = '/tmp/lives'
EMAIL_ADMIN = '[email protected]'
PATH_DOCUMENTS_STORAGE = '/tmp/trade_documents'
JWT_PASS = config('JWT_PASS', '')
MAIL_SUPPRESS_SEND = True
@pytest.fixture(scope='session')
def config():
return TestConfig()
@pytest.fixture(scope='session')
def _app(config: TestConfig) -> Devicehub:
# dh_config = DevicehubConfig()
# config = TestConfig(dh_config)
METHOD_NAME = Devicehub(inventory='test', config=config, db=db)
METHOD_NAME.register_blueprint(core)
METHOD_NAME.register_blueprint(devices)
METHOD_NAME.register_blueprint(labels)
METHOD_NAME.register_blueprint(api)
METHOD_NAME.register_blueprint(workbench)
METHOD_NAME.config["SQLALCHEMY_RECORD_QUERIES"] = True
METHOD_NAME.config['PROFILE'] = True
METHOD_NAME.config['SCHEMA'] = 'test'
# app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
mail = Mail(METHOD_NAME)
METHOD_NAME.mail = mail
return METHOD_NAME
@pytest.fixture(scope='session')
def _app2(config: TestConfig) -> Devicehub:
return Devicehub(inventory='test', config=config, db=db)
@pytest.fixture()
def METHOD_NAME(request, _app: Devicehub) -> Devicehub:
# More robust than 'yield'
def _drop(*args, **kwargs):
with _app.app_context():
db.drop_all()
def _init():
_app.init_db(
name='Test Inventory',
org_name='FooOrg',
org_id='foo-org-id',
tag_url=boltons.urlutils.URL('https://example.com'),
tag_token=uuid.UUID('52dacef0-6bcb-4919-bfed-f10d2c96ecee'),
erase=False,
common=True,
)
with _app.app_context():
try:
with redirect_stdout(io.StringIO()):
_init()
except (ProgrammingError, IntegrityError, AssertionError):
print('Database was not correctly emptied. Re-empty and re-installing...')
_drop()
_init()
request.addfinalizer(_drop)
return _app
@pytest.fixture()
def client(METHOD_NAME: Devicehub) -> Client:
return METHOD_NAME.test_client()
@pytest.fixture()
def app_context(METHOD_NAME: Devicehub):
with METHOD_NAME.app_context():
yield
@pytest.fixture()
def user(METHOD_NAME: Devicehub) -> UserClient:
"""Gets a client with a logged-in dummy user."""
with METHOD_NAME.app_context():
password = 'foo'
user = create_user(password=password)
client = UserClient(
METHOD_NAME, user.email, password, response_wrapper=METHOD_NAME.response_class
)
client.login()
return client
@pytest.fixture()
def user2(METHOD_NAME: Devicehub) -> UserClient:
"""Gets a client with a logged-in dummy user."""
with METHOD_NAME.app_context():
password = 'foo'
email = '[email protected]'
user = create_user(email=email, password=password)
client = UserClient(
METHOD_NAME, user.email, password, response_wrapper=METHOD_NAME.response_class
)
client.login()
return client
@pytest.fixture()
def user3(METHOD_NAME: Devicehub) -> UserClientFlask:
"""Gets a client with a logged-in dummy user."""
with METHOD_NAME.app_context():
password = 'foo'
user = create_user(password=password)
client = UserClientFlask(METHOD_NAME, user.email, password)
return client
@pytest.fixture()
def user4(METHOD_NAME: Devicehub) -> UserClient:
"""Gets a client with a logged-in dummy user."""
with METHOD_NAME.app_context():
password = 'foo'
email = '[email protected]'
user = create_user(email=email, password=password)
client = UserClientFlask(METHOD_NAME, user.email, password)
return client
def create_user(email='[email protected]', password='foo') -> User:
user = User(email=email, password=password)
user.individuals.add(Person(name='Timmy'))
session_external = Session(user=user, type=SessionType.External)
session_internal = Session(user=user, type=SessionType.Internal)
db.session.add(user)
db.session.add(session_internal)
db.session.add(session_external)
db.session.commit()
return user
@pytest.fixture()
def auth_app_context(METHOD_NAME: Devicehub):
"""Creates an app context with a set user."""
with METHOD_NAME.app_context():
user = create_user()
class Auth: # Mock
username = user.token
password = ''
METHOD_NAME.auth.perform_auth(Auth())
yield METHOD_NAME
def json_encode(dev: str) -> dict:
"""Encode json."""
data = {"type": "Snapshot"}
data['data'] = jwt.encode(
dev, P, algorithm="HS256", json_encoder=ereuse_utils.JSONEncoder
)
return data
def yaml2json(name: str) -> dict:
"""Opens and parses a YAML file from the ``files`` subdir."""
with Path(__file__).parent.joinpath('files').joinpath(name + '.yaml').open() as f:
return yaml.load(f)
def file(name: str) -> dict:
"""Opens and parses a YAML file from the ``files`` subdir. And decode"""
return json_encode(yaml2json(name))
def file_json(name):
with Path(__file__).parent.joinpath('files').joinpath(name).open() as f:
return json.loads(f.read())
def file_workbench(name: str) -> dict:
"""Opens and parses a YAML file from the ``files`` subdir."""
with Path(__file__).parent.joinpath('workbench_files').joinpath(
name + '.json'
).open() as f:
return yaml.load(f)
@pytest.fixture()
def tag_id(METHOD_NAME: Devicehub) -> str:
"""Creates a tag and returns its id."""
with METHOD_NAME.app_context():
if User.query.count():
user = User.query.one()
else:
user = create_user()
t = Tag(id='foo', owner_id=user.id)
db.session.add(t)
db.session.commit()
return t.id
|
2,877 |
invoices search results line
|
from django.template import Library
from django.conf import settings
from tendenci.apps.site_settings.utils import get_setting
register = Library()
@register.inclusion_tag("invoices/nav.html", takes_context=True)
def invoice_nav(context, invoice=None):
context.update({
"nav_object": invoice,
})
return context
@register.inclusion_tag("invoices/invoice_item.html")
def METHOD_NAME(request, invoice):
obj = invoice.get_object()
search_line_display = None
if invoice.object_type:
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
app_label = invoice.object_type.app_label
template_name = "%s/invoice_search_result_line.html" % (app_label)
try:
search_line_display = render_to_string(
template_name=template_name,
context={'obj':obj,'invoice':invoice},
request=request
)
except (TemplateDoesNotExist, IOError):
pass
return {'request':request, 'invoice':invoice, 'obj':obj, 'search_line_display':search_line_display}
@register.inclusion_tag("invoices/search_line_header.html", takes_context=True)
def invoices_search_line_header(context, request, invoice, obj_color):
context.update({'request': request,
'invoice': invoice,
'obj_color': obj_color})
return context
@register.inclusion_tag("invoices/search-form.html", takes_context=True)
def invoice_search(context):
return context
@register.inclusion_tag("invoices/top_nav_items.html", takes_context=True)
def invoice_current_app(context, user, invoice=None):
context.update({
"app_object": invoice,
"user": user
})
return context
# display object on invoice view
@register.inclusion_tag("invoices/object_display.html")
def invoice_object_display(request, invoice):
obj = invoice.get_object()
object_display = None
if invoice.object_type:
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
app_label = invoice.object_type.app_label
model = invoice.object_type.model
# since membership app has 2 different associated invoices
if app_label == 'memberships' and model == 'membershipset':
template_name = "%s/invoice_view_display2.html" % (app_label)
else:
template_name = "%s/invoice_view_display.html" % (app_label)
try:
object_display = render_to_string(
template_name=template_name,
context={'obj': obj, 'invoice': invoice},
request=request
)
except (TemplateDoesNotExist, IOError):
pass
context = {
'request': request,
'invoice': invoice,
'obj': obj,
'object_display': object_display
}
return context
# display invoice total on invoice view
@register.inclusion_tag("invoices/total_display.html")
def invoice_total_display(request, invoice):
tmp_total = 0
payment_method = ""
if invoice.variance and invoice.variance != 0:
tmp_total = invoice.subtotal
if invoice.tax:
tmp_total += invoice.tax
if invoice.shipping:
tmp_total += invoice.shipping
if invoice.shipping_surcharge:
tmp_total += invoice.shipping_surcharge
if invoice.box_and_packing:
tmp_total += invoice.box_and_packing
if invoice.balance <= 0:
if invoice.payment_set:
payment_set = invoice.payment_set.order_by('-id')
if payment_set:
payment = payment_set[0]
payment_method = payment.method
if payment.check_number:
payment_method = f'{payment_method} ({payment.check_number})'
merchant_login = get_setting("site", "global", "merchantaccount") != 'asdf asdf asdf'
context = {
'request': request,
'invoice': invoice,
'tmp_total': tmp_total,
'payment_method': payment_method,
'merchant_login': merchant_login
}
return context
# display payment history on invoice view
@register.inclusion_tag("invoices/payment_history.html")
def payment_history_display(request, invoice):
payments = invoice.payment_set.order_by('-id')
refunds = invoice.refund_set.order_by('-id')
return {'request':request,
'invoice':invoice,
'payments': payments,
'refunds': refunds}
|
2,878 |
visit function def
|
"""
Patch Hygiene Lint
^^^^^^^^^^^^^^^^^^
Ensures that the order of identifiers between each hardfork is consistent.
"""
import ast
from typing import List, Optional, OrderedDict, Sequence
from ethereum_spec_tools.forks import Hardfork
from ethereum_spec_tools.lint import Diagnostic, Lint, walk_sources
class PatchHygiene(Lint):
"""
Ensures that the order of identifiers between each hardfork is consistent.
"""
def lint(
self, forks: List[Hardfork], position: int
) -> Sequence[Diagnostic]:
"""
Walks the sources for each hardfork and emits Diagnostic messages.
"""
if position == 0:
# Nothing to compare against!
return []
all_previous = dict(walk_sources(forks[position - 1]))
all_current = dict(walk_sources(forks[position]))
items = (
(k, v, all_previous.get(k, None)) for (k, v) in all_current.items()
)
diagnostics: List[Diagnostic] = []
for (name, current, previous) in items:
diagnostics += self.compare(name, current, previous)
return diagnostics
def compare(
self, name: str, current_source: str, previous_source: Optional[str]
) -> List[Diagnostic]:
"""
Compares two strings containing Python source and emits diagnostic
messages if any identifiers have changed relative positions.
"""
if previous_source is None:
# Entire file is new, so nothing to compare!
return []
current_nodes = self._parse(current_source, _Visitor(), "items")
previous_nodes = {
item: idx
for (idx, item) in enumerate(
self._parse(previous_source, _Visitor(), "items")
)
}
diagnostics: List[Diagnostic] = []
maximum = None
for item in current_nodes:
previous_position = previous_nodes.get(item)
if previous_position is None:
continue
if maximum is None or previous_position > maximum:
maximum = previous_position
elif previous_position <= maximum:
diagnostic = Diagnostic(
message=(
f"the item `{item}` in `{name}` has changed "
"relative positions"
)
)
diagnostics.append(diagnostic)
return diagnostics
class _Visitor(ast.NodeVisitor):
"""
Visits nodes in a syntax tree and collects functions, classes, and
assignments.
"""
path: List[str]
_items: "OrderedDict[str, None]"
in_assign: int
def __init__(self) -> None:
self.path = []
self._items = OrderedDict()
self.in_assign = 0
def _insert(self, item: str) -> None:
item = ".".join(self.path + [item])
if item in self._items:
raise ValueError(f"duplicate path {item}")
self._items[item] = None
@property
def items(self) -> Sequence[str]:
"""
Sequence of all identifiers found while visiting the source.
"""
return list(self._items.keys())
def visit_AsyncFunctionDef(self, function: ast.AsyncFunctionDef) -> None:
"""
Visit an asynchronous function.
"""
self._insert(function.name)
# Explicitly don't visit the children of functions.
def METHOD_NAME(self, function: ast.FunctionDef) -> None:
"""
Visit a function.
"""
self._insert(function.name)
# Explicitly don't visit the children of functions.
def visit_ClassDef(self, klass: ast.ClassDef) -> None:
"""
Visit a class.
"""
self._insert(klass.name)
self.path.append(klass.name)
self.generic_visit(klass)
got = self.path.pop()
assert klass.name == got
def visit_Assign(self, assign: ast.Assign) -> None:
"""
Visit an assignment.
"""
self.in_assign += 1
for target in assign.targets:
self.visit(target)
self.in_assign -= 1
self.visit(assign.value)
def visit_AnnAssign(self, assign: ast.AnnAssign) -> None:
"""
Visit an annotated assignment.
"""
self.in_assign += 1
self.visit(assign.target)
self.in_assign -= 1
self.visit(assign.annotation)
if assign.value is not None:
self.visit(assign.value)
def visit_Name(self, identifier: ast.Name) -> None:
"""
Visit an identifier.
"""
if self.in_assign > 0:
self._insert(identifier.id)
|
2,879 |
register elementwise op
|
import torch
import torch.nn.functional as F
from torch import Tensor
from colossalai.tensor import ColoTensor, ColoTensorSpec
from colossalai.tensor.op_wrapper import colo_op_impl
from ._utils import GeneralTensor, convert_to_colo_tensor
def METHOD_NAME(op):
@colo_op_impl(op)
def elementwise_op(input_tensor: GeneralTensor, *args, **kwargs):
"""
Handles ``__torch_function__`` dispatch for the elementwise op such
as ``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``.
This method computes on either a normal tensor or a sharded tensor.
"""
if 'inplace' in kwargs:
# TODO(jiaruifang) inplace will cause bugs
input_tensor = input_tensor.clone()
return op(input_tensor, *args, **kwargs)
else:
output = op(input_tensor, *args, **kwargs)
# return output
if isinstance(input_tensor, ColoTensor):
if isinstance(output, str):
return output
if not isinstance(output, torch.Tensor):
raise NotImplementedError
return ColoTensor.from_torch_tensor(output,
spec=ColoTensorSpec(input_tensor.get_process_group(),
dist_attr=input_tensor.dist_spec))
# @colo_op_impl(torch.relu_)
# def elementwise_op(input_tensor):
# torch.relu_(input_tensor.data)
# return input_tensor
# @colo_op_impl(Tensor.add_)
# def elementwise_op(input_tensor: ColoTensor, *args, **kwargs):
# input_tensor = input_tensor.data.add_(*args, **kwargs)
# return input_tensor
# Tensor op
METHOD_NAME(Tensor.abs)
METHOD_NAME(Tensor.absolute)
METHOD_NAME(Tensor.acos)
METHOD_NAME(Tensor.arccos)
METHOD_NAME(Tensor.angle)
METHOD_NAME(Tensor.asin)
METHOD_NAME(Tensor.arcsin)
METHOD_NAME(Tensor.atan)
METHOD_NAME(Tensor.arctan)
METHOD_NAME(Tensor.all)
METHOD_NAME(Tensor.any)
METHOD_NAME(Tensor.bernoulli)
METHOD_NAME(Tensor.bfloat16)
METHOD_NAME(Tensor.bitwise_not)
METHOD_NAME(Tensor.bool)
METHOD_NAME(Tensor.byte)
METHOD_NAME(Tensor.ceil)
METHOD_NAME(Tensor.char)
METHOD_NAME(Tensor.clamp)
METHOD_NAME(Tensor.clamp_max)
METHOD_NAME(Tensor.clamp_min)
METHOD_NAME(Tensor.clip)
METHOD_NAME(Tensor.clone)
METHOD_NAME(Tensor.contiguous)
METHOD_NAME(Tensor.copysign)
METHOD_NAME(Tensor.cos)
METHOD_NAME(Tensor.cosh)
METHOD_NAME(Tensor.acosh)
METHOD_NAME(Tensor.arccosh)
METHOD_NAME(Tensor.cpu)
METHOD_NAME(Tensor.cuda)
METHOD_NAME(Tensor.deg2rad)
METHOD_NAME(Tensor.detach)
METHOD_NAME(Tensor.digamma)
METHOD_NAME(Tensor.double)
METHOD_NAME(Tensor.erf)
METHOD_NAME(Tensor.erfc)
METHOD_NAME(Tensor.erfinv)
METHOD_NAME(Tensor.exp)
METHOD_NAME(Tensor.expm1)
METHOD_NAME(Tensor.fix)
METHOD_NAME(Tensor.trunc)
METHOD_NAME(Tensor.float)
METHOD_NAME(Tensor.float_power)
METHOD_NAME(Tensor.floor)
METHOD_NAME(Tensor.frac)
METHOD_NAME(Tensor.half)
METHOD_NAME(Tensor.hardshrink)
METHOD_NAME(Tensor.heaviside)
METHOD_NAME(Tensor.i0)
METHOD_NAME(Tensor.int)
METHOD_NAME(Tensor.isfinite)
METHOD_NAME(Tensor.isinf)
METHOD_NAME(Tensor.isposinf)
METHOD_NAME(Tensor.isneginf)
METHOD_NAME(Tensor.isnan)
METHOD_NAME(Tensor.lgamma)
METHOD_NAME(Tensor.log)
METHOD_NAME(Tensor.log10)
METHOD_NAME(Tensor.log1p)
METHOD_NAME(Tensor.log2)
METHOD_NAME(Tensor.logical_not)
METHOD_NAME(Tensor.logit)
METHOD_NAME(Tensor.long)
METHOD_NAME(Tensor.nan_to_num)
METHOD_NAME(Tensor.neg)
METHOD_NAME(Tensor.negative)
METHOD_NAME(Tensor.positive)
METHOD_NAME(Tensor.pow)
METHOD_NAME(Tensor.rad2deg)
METHOD_NAME(Tensor.reciprocal)
METHOD_NAME(Tensor.round)
METHOD_NAME(Tensor.rsqrt)
METHOD_NAME(Tensor.short)
METHOD_NAME(Tensor.sigmoid)
METHOD_NAME(Tensor.sign)
METHOD_NAME(Tensor.signbit)
METHOD_NAME(Tensor.sgn)
METHOD_NAME(Tensor.sin)
METHOD_NAME(Tensor.sinc)
METHOD_NAME(Tensor.sinh)
METHOD_NAME(Tensor.asinh)
METHOD_NAME(Tensor.arcsinh)
METHOD_NAME(Tensor.sqrt)
METHOD_NAME(Tensor.square)
METHOD_NAME(Tensor.to)
METHOD_NAME(Tensor.tan)
METHOD_NAME(Tensor.tanh)
METHOD_NAME(Tensor.atanh)
METHOD_NAME(Tensor.arctanh)
METHOD_NAME(Tensor.type)
METHOD_NAME(Tensor.type_as)
# torch OP
METHOD_NAME(torch.abs)
METHOD_NAME(torch.absolute)
METHOD_NAME(torch.acos)
METHOD_NAME(torch.arccos)
METHOD_NAME(torch.angle)
METHOD_NAME(torch.asin)
METHOD_NAME(torch.arcsin)
METHOD_NAME(torch.atan)
METHOD_NAME(torch.arctan)
METHOD_NAME(torch.all)
METHOD_NAME(torch.any)
METHOD_NAME(torch.bernoulli)
METHOD_NAME(torch.bitwise_not)
METHOD_NAME(torch.ceil)
METHOD_NAME(torch.clamp)
METHOD_NAME(torch.clamp_max)
METHOD_NAME(torch.clamp_min)
METHOD_NAME(torch.clip)
METHOD_NAME(torch.clone)
METHOD_NAME(torch.copysign)
METHOD_NAME(torch.cos)
METHOD_NAME(torch.cosh)
METHOD_NAME(torch.acosh)
METHOD_NAME(torch.arccosh)
METHOD_NAME(torch.deg2rad)
METHOD_NAME(torch.digamma)
METHOD_NAME(torch.erf)
METHOD_NAME(torch.erfc)
METHOD_NAME(torch.erfinv)
METHOD_NAME(torch.exp)
METHOD_NAME(torch.expm1)
METHOD_NAME(torch.fix)
METHOD_NAME(torch.trunc)
METHOD_NAME(torch.float_power)
METHOD_NAME(torch.floor)
METHOD_NAME(torch.frac)
METHOD_NAME(torch.hardshrink)
METHOD_NAME(torch.heaviside)
METHOD_NAME(torch.i0)
METHOD_NAME(torch.isfinite)
METHOD_NAME(torch.isinf)
METHOD_NAME(torch.isposinf)
METHOD_NAME(torch.isneginf)
METHOD_NAME(torch.isnan)
METHOD_NAME(torch.lgamma)
METHOD_NAME(torch.log)
METHOD_NAME(torch.log10)
METHOD_NAME(torch.log1p)
METHOD_NAME(torch.log2)
METHOD_NAME(torch.logical_not)
METHOD_NAME(torch.logit)
METHOD_NAME(torch.nan_to_num)
METHOD_NAME(torch.neg)
METHOD_NAME(torch.negative)
METHOD_NAME(torch.positive)
METHOD_NAME(torch.pow)
METHOD_NAME(torch.rad2deg)
METHOD_NAME(torch.reciprocal)
METHOD_NAME(torch.round)
METHOD_NAME(torch.rsqrt)
METHOD_NAME(torch.sigmoid)
METHOD_NAME(torch.sign)
METHOD_NAME(torch.signbit)
METHOD_NAME(torch.sgn)
METHOD_NAME(torch.sin)
METHOD_NAME(torch.sinc)
METHOD_NAME(torch.sinh)
METHOD_NAME(torch.asinh)
METHOD_NAME(torch.arcsinh)
METHOD_NAME(torch.sqrt)
METHOD_NAME(torch.square)
METHOD_NAME(torch.tan)
METHOD_NAME(torch.tanh)
METHOD_NAME(torch.atanh)
METHOD_NAME(torch.arctanh)
METHOD_NAME(torch.zeros_like)
# nn.functional OP
METHOD_NAME(F.threshold)
METHOD_NAME(F.relu)
METHOD_NAME(F.hardtanh)
METHOD_NAME(F.hardswish)
METHOD_NAME(F.relu6)
METHOD_NAME(F.elu)
METHOD_NAME(F.selu)
METHOD_NAME(F.celu)
METHOD_NAME(F.leaky_relu)
METHOD_NAME(F.prelu)
METHOD_NAME(F.rrelu)
METHOD_NAME(F.gelu)
METHOD_NAME(F.logsigmoid)
METHOD_NAME(F.hardshrink)
METHOD_NAME(F.tanhshrink)
METHOD_NAME(F.softsign)
METHOD_NAME(F.softplus)
METHOD_NAME(F.softmin)
METHOD_NAME(F.softmax)
METHOD_NAME(F.softshrink)
METHOD_NAME(F.gumbel_softmax)
METHOD_NAME(F.log_softmax)
METHOD_NAME(F.tanh)
METHOD_NAME(F.sigmoid)
METHOD_NAME(F.hardsigmoid)
METHOD_NAME(F.silu)
METHOD_NAME(F.mish)
# TODO(ver217): dropout handles seed
METHOD_NAME(F.dropout)
METHOD_NAME(F.alpha_dropout)
METHOD_NAME(F.feature_alpha_dropout)
|
2,880 |
test get
|
# SPDX-License-Identifier: GPL-2.0-or-later
import unittest
import openrazer_daemon.device
DEVICE1_SERIAL = 'XX000000'
DEVICE1_ID = '0000:0000:0000.0000'
DEVICE2_SERIAL = 'XX000001'
DEVICE2_ID = '0000:0000:0000.0001'
class DummyDBusObject(object):
def __init__(self):
self.notify_msg = None
self.parent = None
def notify(self, msg):
self.notify_msg = msg
def register_parent(self, parent):
self.parent = parent
def notify_parent(self, msg):
self.parent.notify_parent(msg)
class DummyParentObject(object):
def __init__(self):
self.notify_msg = None
self.notify_device = None
def notify(self, device_object, msg):
self.notify_device = device_object
self.notify_msg = msg
# TODO move device_object creation to setUp
class DeviceTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_device_properties(self):
dbus_object = DummyDBusObject()
device_object = openrazer_daemon.device.Device(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
self.assertEqual(device_object.device_id, DEVICE1_ID)
self.assertEqual(device_object.serial, DEVICE1_SERIAL)
self.assertEqual(device_object.dbus, dbus_object)
def test_device_register_parent(self):
dbus_object = DummyDBusObject()
parent_object = DummyParentObject()
device_object = openrazer_daemon.device.Device(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_object.register_parent(parent_object)
self.assertEqual(device_object._parent, parent_object)
def test_device_notify_child(self):
msg = ('test', 1)
dbus_object = DummyDBusObject()
device_object = openrazer_daemon.device.Device(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_object.notify_child(msg)
self.assertEqual(dbus_object.notify_msg, msg)
def test_device_notify_parent(self):
msg = ('test', 1)
dbus_object = DummyDBusObject()
parent_object = DummyParentObject()
device_object = openrazer_daemon.device.Device(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_object.register_parent(parent_object)
device_object.notify_parent(msg)
self.assertEqual(parent_object.notify_msg, msg)
self.assertEqual(parent_object.notify_device, device_object)
class DeviceCollectionTest(unittest.TestCase):
def setUp(self):
self.device_collection = openrazer_daemon.device.DeviceCollection()
def test_add(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
self.assertIn(DEVICE1_ID, self.device_collection._id_map)
self.assertIn(DEVICE1_SERIAL, self.device_collection._serial_map)
device_obj_from_id = self.device_collection._id_map[DEVICE1_ID]
device_obj_from_serial = self.device_collection._serial_map[DEVICE1_SERIAL]
self.assertIs(device_obj_from_id, device_obj_from_serial)
def METHOD_NAME(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_obj_by_id = self.device_collection[DEVICE1_ID]
device_obj_by_serial = self.device_collection[DEVICE1_SERIAL]
self.assertIs(device_obj_by_id, device_obj_by_serial)
def test_invalid_get(self):
try:
device = self.device_collection.get('INVALID')
except IndexError:
pass
def test_contains(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
self.assertIn(DEVICE1_ID, self.device_collection)
self.assertIn(DEVICE1_SERIAL, self.device_collection)
def test_remove(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
self.assertIn(DEVICE1_ID, self.device_collection)
self.device_collection.remove(DEVICE1_ID)
self.assertNotIn(DEVICE1_ID, self.device_collection)
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
self.assertIn(DEVICE1_ID, self.device_collection)
self.device_collection.remove(DEVICE1_SERIAL)
self.assertNotIn(DEVICE1_SERIAL, self.device_collection)
def test_items(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
device_id, device_obj1 = list(self.device_collection.id_items())[0]
device_serial, device_obj2 = list(self.device_collection.serial_items())[0]
self.assertEqual(device_id, DEVICE1_ID)
self.assertEqual(device_serial, DEVICE1_SERIAL)
self.assertIs(device_obj1, device_obj2)
def test_iter(self):
dbus_object = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object)
devices = [self.device_collection.get(DEVICE1_ID)]
for device in self.device_collection:
devices.remove(device)
self.assertEqual(len(devices), 0)
def test_serials(self):
dbus_object1 = DummyDBusObject()
dbus_object2 = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object1)
self.device_collection.add(DEVICE2_ID, DEVICE2_SERIAL, dbus_object2)
serials = self.device_collection.serials()
self.assertIn(DEVICE1_SERIAL, serials)
self.assertIn(DEVICE2_SERIAL, serials)
def test_devices(self):
dbus_object1 = DummyDBusObject()
dbus_object2 = DummyDBusObject()
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object1)
self.device_collection.add(DEVICE2_ID, DEVICE2_SERIAL, dbus_object2)
device_list = self.device_collection.devices
available_dbus = [dbus_object1, dbus_object2]
for device in device_list:
available_dbus.remove(device.dbus)
# Ensure both dbus objects have been seen
self.assertEqual(len(available_dbus), 0)
def test_cross_device_notify(self):
dbus_object1 = DummyDBusObject()
dbus_object2 = DummyDBusObject()
msg = ('test', 1)
self.device_collection.add(DEVICE1_ID, DEVICE1_SERIAL, dbus_object1)
self.device_collection.add(DEVICE2_ID, DEVICE2_SERIAL, dbus_object2)
self.assertIs(dbus_object1.notify_msg, None)
self.assertIs(dbus_object2.notify_msg, None)
dbus_object1.notify_parent(msg)
# Ensure message gets sent to other devices and not itself
self.assertIs(dbus_object1.notify_msg, None)
self.assertIs(dbus_object2.notify_msg, msg)
|
2,881 |
patch redis client
|
from __future__ import absolute_import
from sentry_sdk import Hub
from sentry_sdk.consts import OP, SPANDATA
from sentry_sdk.hub import _should_send_default_pii
from sentry_sdk.utils import (
SENSITIVE_DATA_SUBSTITUTE,
capture_internal_exceptions,
logger,
)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Sequence
from sentry_sdk.tracing import Span
_SINGLE_KEY_COMMANDS = frozenset(
["decr", "decrby", "get", "incr", "incrby", "pttl", "set", "setex", "setnx", "ttl"]
)
_MULTI_KEY_COMMANDS = frozenset(["del", "touch", "unlink"])
_COMMANDS_INCLUDING_SENSITIVE_DATA = [
"auth",
]
_MAX_NUM_ARGS = 10 # Trim argument lists to this many values
_MAX_NUM_COMMANDS = 10 # Trim command lists to this many values
_DEFAULT_MAX_DATA_SIZE = 1024
def _get_safe_command(name, args):
# type: (str, Sequence[Any]) -> str
command_parts = [name]
for i, arg in enumerate(args):
if i > _MAX_NUM_ARGS:
break
name_low = name.lower()
if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA:
command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
continue
arg_is_the_key = i == 0
if arg_is_the_key:
command_parts.append(repr(arg))
else:
if _should_send_default_pii():
command_parts.append(repr(arg))
else:
command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
command = " ".join(command_parts)
return command
def _set_pipeline_data(
span, is_cluster, get_command_args_fn, is_transaction, command_stack
):
# type: (Span, bool, Any, bool, Sequence[Any]) -> None
span.set_tag("redis.is_cluster", is_cluster)
transaction = is_transaction if not is_cluster else False
span.set_tag("redis.transaction", transaction)
commands = []
for i, arg in enumerate(command_stack):
if i >= _MAX_NUM_COMMANDS:
break
command = get_command_args_fn(arg)
commands.append(_get_safe_command(command[0], command[1:]))
span.set_data(
"redis.commands",
{
"count": len(command_stack),
"first_ten": commands,
},
)
def patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):
# type: (Any, bool, Any) -> None
old_execute = pipeline_cls.execute
def sentry_patched_execute(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
hub = Hub.current
if hub.get_integration(RedisIntegration) is None:
return old_execute(self, *args, **kwargs)
with hub.start_span(
op=OP.DB_REDIS, description="redis.pipeline.execute"
) as span:
with capture_internal_exceptions():
_set_pipeline_data(
span,
is_cluster,
get_command_args_fn,
self.transaction,
self.command_stack,
)
span.set_data(SPANDATA.DB_SYSTEM, "redis")
return old_execute(self, *args, **kwargs)
pipeline_cls.execute = sentry_patched_execute
def _get_redis_command_args(command):
# type: (Any) -> Sequence[Any]
return command[0]
def _parse_rediscluster_command(command):
# type: (Any) -> Sequence[Any]
return command.args
def _patch_redis(StrictRedis, client): # noqa: N803
# type: (Any, Any) -> None
METHOD_NAME(StrictRedis, is_cluster=False)
patch_redis_pipeline(client.Pipeline, False, _get_redis_command_args)
try:
strict_pipeline = client.StrictPipeline
except AttributeError:
pass
else:
patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)
try:
import redis.asyncio
except ImportError:
pass
else:
from sentry_sdk.integrations.redis.asyncio import (
patch_redis_async_client,
patch_redis_async_pipeline,
)
patch_redis_async_client(redis.asyncio.client.StrictRedis)
patch_redis_async_pipeline(redis.asyncio.client.Pipeline)
def _patch_rb():
# type: () -> None
try:
import rb.clients # type: ignore
except ImportError:
pass
else:
METHOD_NAME(rb.clients.FanoutClient, is_cluster=False)
METHOD_NAME(rb.clients.MappingClient, is_cluster=False)
METHOD_NAME(rb.clients.RoutingClient, is_cluster=False)
def _patch_rediscluster():
# type: () -> None
try:
import rediscluster # type: ignore
except ImportError:
return
METHOD_NAME(rediscluster.RedisCluster, is_cluster=True)
# up to v1.3.6, __version__ attribute is a tuple
# from v2.0.0, __version__ is a string and VERSION a tuple
version = getattr(rediscluster, "VERSION", rediscluster.__version__)
# StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0
# https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst
if (0, 2, 0) < version < (2, 0, 0):
pipeline_cls = rediscluster.pipeline.StrictClusterPipeline
METHOD_NAME(rediscluster.StrictRedisCluster, is_cluster=True)
else:
pipeline_cls = rediscluster.pipeline.ClusterPipeline
patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)
class RedisIntegration(Integration):
identifier = "redis"
def __init__(self, max_data_size=_DEFAULT_MAX_DATA_SIZE):
# type: (int) -> None
self.max_data_size = max_data_size
@staticmethod
def setup_once():
# type: () -> None
try:
from redis import StrictRedis, client
except ImportError:
raise DidNotEnable("Redis client not installed")
_patch_redis(StrictRedis, client)
_patch_rb()
try:
_patch_rediscluster()
except Exception:
logger.exception("Error occurred while patching `rediscluster` library")
def _get_span_description(name, *args):
# type: (str, *Any) -> str
description = name
with capture_internal_exceptions():
description = _get_safe_command(name, args)
return description
def _set_client_data(span, is_cluster, name, *args):
# type: (Span, bool, str, *Any) -> None
span.set_data(SPANDATA.DB_SYSTEM, "redis")
span.set_tag("redis.is_cluster", is_cluster)
if name:
span.set_tag("redis.command", name)
span.set_tag(SPANDATA.DB_OPERATION, name)
if name and args:
name_low = name.lower()
if (name_low in _SINGLE_KEY_COMMANDS) or (
name_low in _MULTI_KEY_COMMANDS and len(args) == 1
):
span.set_tag("redis.key", args[0])
def METHOD_NAME(cls, is_cluster):
# type: (Any, bool) -> None
"""
This function can be used to instrument custom redis client classes or
subclasses.
"""
old_execute_command = cls.execute_command
def sentry_patched_execute_command(self, name, *args, **kwargs):
# type: (Any, str, *Any, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(RedisIntegration)
if integration is None:
return old_execute_command(self, name, *args, **kwargs)
description = _get_span_description(name, *args)
data_should_be_truncated = (
integration.max_data_size and len(description) > integration.max_data_size
)
if data_should_be_truncated:
description = description[: integration.max_data_size - len("...")] + "..."
with hub.start_span(op=OP.DB_REDIS, description=description) as span:
_set_client_data(span, is_cluster, name, *args)
return old_execute_command(self, name, *args, **kwargs)
cls.execute_command = sentry_patched_execute_command
|
2,882 |
list by invoice
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._transactions_operations import build_list_by_invoice_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TransactionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billing.aio.BillingManagementClient`'s
:attr:`transactions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(
self, billing_account_name: str, invoice_name: str, **kwargs: Any
) -> AsyncIterable["_models.Transaction"]:
"""Lists the transactions for an invoice. Transactions include purchases, refunds and Azure usage
charges.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Transaction or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.billing.models.Transaction]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.TransactionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_invoice_request(
billing_account_name=billing_account_name,
invoice_name=invoice_name,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TransactionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/transactions"} # type: ignore
|
2,883 |
callback
|
#!/usr/bin/env python3
import enum
import os
from math import nan
import numpy as np
from matplotlib import pyplot as plt
from rospy.impl.tcpros_base import DEFAULT_BUFF_SIZE
if "ROS_NAMESPACE" not in os.environ:
os.environ["ROS_NAMESPACE"] = "/robot1"
from argparse import ArgumentParser
import cv2
import rospy
import torch
from cv_bridge import CvBridge
from rospy import ROSException
from sensor_msgs.msg import Image
from soccer_common.camera import Camera
from soccer_msgs.msg import BoundingBox, BoundingBoxes, GameState, RobotState
class Label(enum.IntEnum):
# Defines output channels of model
# Refer to class name enumeration in soccer_object_detection/config/Torso21.yaml
BALL = 0
GOALPOST = 1
ROBOT = 2
L_INTERSECTION = 3
T_INTERSECTION = 4
X_INTERSECTION = 5
TOPBAR = 6
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
MAGENTA = "\u001b[35m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class ObjectDetectionNode(object):
"""
Detect ball, robot
publish bounding boxes
input: 480x640x4 bgra8 -> output: 3x200x150
"""
def __init__(self, model_path):
self.SOCCER_BALL = 0
self.CONFIDENCE_THRESHOLD = rospy.get_param("~ball_confidence_threshold", 0.75)
torch.hub._validate_not_a_forked_repo = (
lambda a, b, c: True
) # https://discuss.pytorch.org/t/help-for-http-error-403-rate-limit-exceeded/125907
self.model = torch.hub.load("ultralytics/yolov5", "custom", path=model_path)
if torch.cuda.is_available():
rospy.loginfo(f"{bcolors.OKGREEN}Using CUDA for object detection{bcolors.ENDC}")
self.model.cuda()
else:
rospy.logwarn("Not using CUDA")
self.robot_name = rospy.get_namespace()[1:-1] # remove '/'
self.camera = Camera(self.robot_name)
self.camera.reset_position()
# Params
self.br = CvBridge()
self.pub_detection = rospy.Publisher("detection_image", Image, queue_size=1, latch=True)
self.pub_boundingbox = rospy.Publisher("object_bounding_boxes", BoundingBoxes, queue_size=1, latch=True)
self.image_subscriber = rospy.Subscriber(
"camera/image_raw", Image, self.METHOD_NAME, queue_size=1, buff_size=DEFAULT_BUFF_SIZE * 64
) # Large buff size (https://answers.ros.org/question/220502/image-subscriber-lag-despite-queue-1/)
self.robot_state_subscriber = rospy.Subscriber("state", RobotState, self.robot_state_callback)
self.robot_state = RobotState()
self.game_state_subscriber = rospy.Subscriber("gamestate", GameState, self.game_state_callback)
self.game_state = GameState()
def game_state_callback(self, game_state: GameState):
self.game_state = game_state
def robot_state_callback(self, robot_state: RobotState):
self.robot_state = robot_state
def METHOD_NAME(self, msg: Image):
# webots: 480x640x4pixels
if self.robot_state.status not in [
RobotState.STATUS_LOCALIZING,
RobotState.STATUS_READY,
RobotState.ROLE_UNASSIGNED,
]:
return
if self.game_state.gameState != GameState.GAMESTATE_PLAYING:
return
rospy.loginfo_once("Object Detection Receiving image")
# width x height x channels (bgra8)
image = self.br.imgmsg_to_cv2(msg)
self.camera.reset_position(timestamp=msg.header.stamp)
# cover horizon to help robot ignore things outside field
cover_horizon_up_threshold = rospy.get_param("cover_horizon_up_threshold", 30)
h = max(self.camera.calculateHorizonCoverArea() - cover_horizon_up_threshold, 0)
if image is not None:
# 1. preprocess image
img = image[:, :, :3] # get rid of alpha channel
img = img[..., ::-1] # convert bgr to rgb
img = img[max(0, h + 1) :, :]
# 2. inference
results = self.model(img)
bbs_msg = BoundingBoxes()
id = 0
for prediction in results.xyxy[0]:
x1, y1, x2, y2, confidence, img_class = prediction.cpu().numpy()
y1 += h + 1
y2 += h + 1
if img_class in [label.value for label in Label] and confidence > self.CONFIDENCE_THRESHOLD:
bb_msg = BoundingBox()
bb_msg.xmin = round(x1) # top left of bounding box
bb_msg.ymin = round(y1)
bb_msg.xmax = round(x2) # bottom right of bounding box
bb_msg.ymax = round(y2)
bb_msg.probability = confidence
bb_msg.id = id
bb_msg.Class = str(int(img_class))
# TODO Joanne look the pixels of the image in addition to the bounding box,
# calculate likely foot coordinate xy
if bb_msg.Class == "2":
# --- simple version just draw the box in bottom ratio of box to detect feet position---
# only look at bottom 1/3 of bounding box (assumption: bounding box is of a standing robot)
# Calculate ymin value to start checking for black pixels
if bb_msg.ymax < self.camera.resolution_y - 5:
temp_ymin = round(bb_msg.ymax * 0.85 + bb_msg.ymin * 0.15)
midpoint = [(bb_msg.xmax + bb_msg.xmin) / 2, (bb_msg.ymax + temp_ymin) / 2]
bb_msg.ybase = round(midpoint[1])
bb_msg.xbase = round(midpoint[0])
bb_msg.obstacle_detected = True
bbs_msg.bounding_boxes.append(bb_msg)
id += 1
bbs_msg.header = msg.header
try:
if self.pub_detection.get_num_connections() > 0:
detection_image = np.squeeze(results.render())
detection_image = np.concatenate((np.zeros((h + 1, msg.width, 3), detection_image.dtype), detection_image))
detection_image = detection_image[..., ::-1] # convert rgb to bgr
self.pub_detection.publish(self.br.cv2_to_imgmsg(detection_image, encoding="bgr8"))
if self.pub_boundingbox.get_num_connections() > 0 and len(bbs_msg.bounding_boxes) > 0:
self.pub_boundingbox.publish(bbs_msg)
except ROSException as re:
print(re)
exit(0)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model", dest="model_path", default="../../models/July14.pt", help="pytorch model")
parser.add_argument("--num-feat", dest="num_feat", default=10, help="specify model size of the neural network")
args, unknown = parser.parse_known_args()
rospy.init_node("object_detector")
my_node = ObjectDetectionNode(args.model_path)
try:
rospy.spin()
except ROSException as rx:
exit(0)
|
2,884 |
test check post table version
|
import pytest
from fontTools.ttLib import TTFont
from fontbakery.status import WARN, FAIL, PASS
from fontbakery.codetesting import (
assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE,
)
from fontbakery.profiles import opentype as opentype_profile
mada_fonts = [
TEST_FILE("mada/Mada-Black.ttf"),
TEST_FILE("mada/Mada-ExtraLight.ttf"),
TEST_FILE("mada/Mada-Medium.ttf"),
TEST_FILE("mada/Mada-SemiBold.ttf"),
TEST_FILE("mada/Mada-Bold.ttf"),
TEST_FILE("mada/Mada-Light.ttf"),
TEST_FILE("mada/Mada-Regular.ttf"),
]
@pytest.fixture
def mada_ttFonts():
return [TTFont(path) for path in mada_fonts]
def test_check_family_underline_thickness(mada_ttFonts):
"""Fonts have consistent underline thickness ?"""
check = CheckTester(
opentype_profile, "com.google.fonts/check/family/underline_thickness"
)
# We start with our reference Mada font family,
# which we know has the same value of post.underlineThickness
# across all of its font files, based on our inspection
# of the file contents using TTX.
#
# So the check should PASS in this case:
assert_PASS(check(mada_ttFonts), "with a good family.")
# Then we introduce the issue by setting a
# different underlineThickness value in just
# one of the font files:
value = mada_ttFonts[0]["post"].underlineThickness
incorrect_value = value + 1
mada_ttFonts[0]["post"].underlineThickness = incorrect_value
# And now re-running the check on the modified
# family should result in a FAIL:
assert_results_contain(
check(mada_ttFonts),
FAIL,
"inconsistent-underline-thickness",
"with an inconsistent family.",
)
def METHOD_NAME():
"""Font has acceptable post format version table?"""
check = CheckTester(opentype_profile, "com.google.fonts/check/post_table_version")
# create mock fonts for post format testing
base_tt_font = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
#
# post format 2 mock font test
#
mock_post_2 = base_tt_font
mock_post_2["post"].formatType = 2
mock_post_2.reader.file.name = "post 2 mock font"
assert_PASS(check(mock_post_2), reason="with a post 2 mock font")
#
# post format 2.5 mock font test
#
mock_post_2_5 = base_tt_font
mock_post_2_5["post"].formatType = 2.5
mock_post_2_5.reader.file.name = "post 2.5 mock font"
assert_results_contain(
check(mock_post_2_5),
FAIL,
"post-table-version",
"with a font that has post format 2.5 table",
)
#
# post format 3 mock font test
#
mock_post_3 = base_tt_font
mock_post_3["post"].formatType = 3
mock_post_3.reader.file.name = "post 3 mock font"
assert_results_contain(
check(mock_post_3),
WARN,
"post-table-version",
"with a font that has post format 3 table",
)
#
# post format 4 mock font test
#
mock_post_4 = base_tt_font
mock_post_4["post"].formatType = 4
mock_post_4.reader.file.name = "post 4 mock font"
assert_results_contain(
check(mock_post_4),
FAIL,
"post-table-version",
"with a font that has post format 4 table",
)
#
# post format 2/3 OTF CFF mock font test
#
mock_cff_post_2 = TTFont(TEST_FILE("source-sans-pro/OTF/SourceSansPro-Regular.otf"))
mock_cff_post_2["post"].formatType = 2
assert "CFF " in mock_cff_post_2
assert "CFF2" not in mock_cff_post_2
mock_cff_post_2.reader.file.name = "post 2 CFF mock font"
assert_results_contain(
check(mock_cff_post_2),
FAIL,
"post-table-version",
"with a CFF font that has post format 2 table",
)
mock_cff_post_3 = mock_cff_post_2
mock_cff_post_3["post"].formatType = 3
assert_PASS(check(mock_cff_post_3), reason="with a post 3 CFF mock font.")
def test_check_italic_angle():
"""Checking post.italicAngle value."""
check = CheckTester(opentype_profile, "com.google.fonts/check/italic_angle")
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# italic-angle, style, fail_message
test_cases = [
[1, "Italic", WARN, "positive"],
[0, "Regular", PASS, None], # This must PASS as it is a non-italic
[-21, "ThinItalic", WARN, "over-20-degrees"],
[-30, "ThinItalic", WARN, "over-20-degrees"],
[-31, "ThinItalic", WARN, "over-30-degrees"],
[-91, "ThinItalic", FAIL, "over-90-degrees"],
[0, "Italic", FAIL, "zero-italic"],
[-1, "ExtraBold", FAIL, "non-zero-upright"],
]
for value, style, expected_result, expected_msg in test_cases:
ttFont["post"].italicAngle = value
if expected_result != PASS:
assert_results_contain(
check(ttFont, {"style": style}),
expected_result,
expected_msg,
f"with italic-angle:{value} style:{style}...",
)
else:
assert_PASS(
check(ttFont, {"style": style}),
f"with italic-angle:{value} style:{style}...",
)
# Cairo, check left and right-leaning explicitly
ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.rightslanted.ttf"))
assert_PASS(check(ttFont, {"style": "Italic"}))
ttFont["post"].italicAngle *= -1
assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "positive")
ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.leftslanted.ttf"))
assert_PASS(check(ttFont, {"style": "Italic"}))
ttFont["post"].italicAngle *= -1
assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "negative")
ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.rightslanted.ttf"))
assert_PASS(check(ttFont, {"style": "Italic"}))
ttFont["glyf"]["I"].endPtsOfContours = []
ttFont["glyf"]["I"].coordinates = []
ttFont["glyf"]["I"].flags = []
ttFont["glyf"]["I"].numberOfContours = 0
assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "empty-glyphs")
|
2,885 |
pending
|
import sys
import traceback
import types
import warnings
from eventlet.support import greenlets as greenlet
import six
from eventlet.hubs.hub import BaseHub, READ, WRITE
try:
import event
except ImportError:
event = None
def is_available():
return event is not None
class event_wrapper(object):
def __init__(self, impl=None, seconds=None):
self.impl = impl
self.seconds = seconds
def __repr__(self):
if self.impl is not None:
return repr(self.impl)
else:
return object.__repr__(self)
def __str__(self):
if self.impl is not None:
return str(self.impl)
else:
return object.__str__(self)
def cancel(self):
if self.impl is not None:
self.impl.delete()
self.impl = None
@property
def METHOD_NAME(self):
return bool(self.impl and self.impl.METHOD_NAME())
class Hub(BaseHub):
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
def __init__(self):
super(Hub, self).__init__()
event.init()
self.signal_exc_info = None
self.signal(
2,
lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt))
self.events_to_add = []
warnings.warn(
"ACTION REQUIRED eventlet pyevent hub is deprecated and will be removed soon",
DeprecationWarning,
)
def dispatch(self):
loop = event.loop
while True:
for e in self.events_to_add:
if e is not None and e.impl is not None and e.seconds is not None:
e.impl.add(e.seconds)
e.seconds = None
self.events_to_add = []
result = loop()
if getattr(event, '__event_exc', None) is not None:
# only have to do this because of bug in event.loop
t = getattr(event, '__event_exc')
setattr(event, '__event_exc', None)
assert getattr(event, '__event_exc') is None
six.reraise(t[0], t[1], t[2])
if result != 0:
return result
def run(self):
while True:
try:
self.dispatch()
except greenlet.GreenletExit:
break
except self.SYSTEM_EXCEPTIONS:
raise
except:
if self.signal_exc_info is not None:
self.schedule_call_global(
0, greenlet.getcurrent().parent.throw, *self.signal_exc_info)
self.signal_exc_info = None
else:
self.squelch_timer_exception(None, sys.exc_info())
def abort(self, wait=True):
self.schedule_call_global(0, self.greenlet.throw, greenlet.GreenletExit)
if wait:
assert self.greenlet is not greenlet.getcurrent(
), "Can't abort with wait from inside the hub's greenlet."
self.switch()
def _getrunning(self):
return bool(self.greenlet)
def _setrunning(self, value):
pass # exists for compatibility with BaseHub
running = property(_getrunning, _setrunning)
def add(self, evtype, fileno, real_cb, real_tb, mac):
# this is stupid: pyevent won't call a callback unless it's a function,
# so we have to force it to be one here
if isinstance(real_cb, types.BuiltinMethodType):
def cb(_d):
real_cb(_d)
else:
cb = real_cb
if evtype is READ:
evt = event.read(fileno, cb, fileno)
elif evtype is WRITE:
evt = event.write(fileno, cb, fileno)
return super(Hub, self).add(evtype, fileno, evt, real_tb, mac)
def signal(self, signalnum, handler):
def wrapper():
try:
handler(signalnum, None)
except:
self.signal_exc_info = sys.exc_info()
event.abort()
return event_wrapper(event.signal(signalnum, wrapper))
def remove(self, listener):
super(Hub, self).remove(listener)
listener.cb.delete()
def remove_descriptor(self, fileno):
for lcontainer in six.itervalues(self.listeners):
listener = lcontainer.pop(fileno, None)
if listener:
try:
listener.cb.delete()
except self.SYSTEM_EXCEPTIONS:
raise
except:
traceback.print_exc()
def schedule_call_local(self, seconds, cb, *args, **kwargs):
current = greenlet.getcurrent()
if current is self.greenlet:
return self.schedule_call_global(seconds, cb, *args, **kwargs)
event_impl = event.event(_scheduled_call_local, (cb, args, kwargs, current))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
schedule_call = schedule_call_local
def schedule_call_global(self, seconds, cb, *args, **kwargs):
event_impl = event.event(_scheduled_call, (cb, args, kwargs))
wrapper = event_wrapper(event_impl, seconds=seconds)
self.events_to_add.append(wrapper)
return wrapper
def _version_info(self):
baseversion = event.__version__
return baseversion
def _scheduled_call(event_impl, handle, evtype, arg):
cb, args, kwargs = arg
try:
cb(*args, **kwargs)
finally:
event_impl.delete()
def _scheduled_call_local(event_impl, handle, evtype, arg):
cb, args, kwargs, caller_greenlet = arg
try:
if not caller_greenlet.dead:
cb(*args, **kwargs)
finally:
event_impl.delete()
|
2,886 |
test create invited user without auth type
|
import json
import pytest
from app.models import EMAIL_AUTH_TYPE, Notification
from tests import create_authorization_header
from tests.app.conftest import create_sample_invited_user
@pytest.mark.parametrize(
"extra_args, expected_start_of_invite_url",
[
({}, "http://localhost:6012/invitation/"),
(
{"invite_link_host": "https://www.example.com"},
"https://www.example.com/invitation/",
),
],
)
def test_create_invited_user(
admin_request, sample_service, mocker, invitation_email_template, extra_args, expected_start_of_invite_url
):
mocked = mocker.patch("app.celery.provider_tasks.deliver_email.apply_async")
email_address = "[email protected]"
invite_from = sample_service.users[0]
data = dict(
service=str(sample_service.id),
email_address=email_address,
from_user=str(invite_from.id),
permissions="send_messages,manage_service,manage_api_keys",
auth_type=EMAIL_AUTH_TYPE,
folder_permissions=["folder_1", "folder_2", "folder_3"],
**extra_args,
)
json_resp = admin_request.post(
"invite.create_invited_user",
service_id=sample_service.id,
_data=data,
_expected_status=201,
)
assert json_resp["data"]["service"] == str(sample_service.id)
assert json_resp["data"]["email_address"] == email_address
assert json_resp["data"]["from_user"] == str(invite_from.id)
assert json_resp["data"]["permissions"] == "send_messages,manage_service,manage_api_keys"
assert json_resp["data"]["auth_type"] == EMAIL_AUTH_TYPE
assert json_resp["data"]["id"]
assert json_resp["data"]["folder_permissions"] == [
"folder_1",
"folder_2",
"folder_3",
]
notification = Notification.query.first()
assert notification.reply_to_text == invite_from.email_address
assert len(notification.personalisation.keys()) == 3
assert notification.personalisation["service_name"] == "Sample service"
assert notification.personalisation["user_name"] == "Test User"
assert notification.personalisation["url"].startswith(expected_start_of_invite_url)
assert len(notification.personalisation["url"]) > len(expected_start_of_invite_url)
mocked.assert_called_once_with([(str(notification.id))], queue="notify-internal-tasks")
def METHOD_NAME(admin_request, sample_service, mocker, invitation_email_template):
mocker.patch("app.celery.provider_tasks.deliver_email.apply_async")
email_address = "[email protected]"
invite_from = sample_service.users[0]
data = {
"service": str(sample_service.id),
"email_address": email_address,
"from_user": str(invite_from.id),
"permissions": "send_messages,manage_service,manage_api_keys",
"folder_permissions": [],
}
json_resp = admin_request.post(
"invite.create_invited_user",
service_id=sample_service.id,
_data=data,
_expected_status=201,
)
assert json_resp["data"]["auth_type"] == EMAIL_AUTH_TYPE
def test_create_invited_user_invalid_email(client, sample_service, mocker, fake_uuid):
mocked = mocker.patch("app.celery.provider_tasks.deliver_email.apply_async")
email_address = "notanemail"
invite_from = sample_service.users[0]
data = {
"service": str(sample_service.id),
"email_address": email_address,
"from_user": str(invite_from.id),
"permissions": "send_messages,manage_service,manage_api_keys",
"folder_permissions": [fake_uuid, fake_uuid],
}
data = json.dumps(data)
auth_header = create_authorization_header()
response = client.post(
"/service/{}/invite".format(sample_service.id),
headers=[("Content-Type", "application/json"), auth_header],
data=data,
)
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp["result"] == "error"
assert json_resp["message"] == {"email_address": ["Not a valid email address"]}
assert mocked.call_count == 0
def test_get_all_invited_users_by_service(client, notify_db, notify_db_session, sample_service):
invites = []
for i in range(0, 5):
email = "invited_user_{}@service.gov.uk".format(i)
invited_user = create_sample_invited_user(notify_db, notify_db_session, sample_service, email)
invites.append(invited_user)
url = "/service/{}/invite".format(sample_service.id)
auth_header = create_authorization_header()
response = client.get(url, headers=[("Content-Type", "application/json"), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
invite_from = sample_service.users[0]
for invite in json_resp["data"]:
assert invite["service"] == str(sample_service.id)
assert invite["from_user"] == str(invite_from.id)
assert invite["auth_type"] == EMAIL_AUTH_TYPE
assert invite["id"]
def test_get_invited_users_by_service_with_no_invites(client, notify_db, notify_db_session, sample_service):
url = "/service/{}/invite".format(sample_service.id)
auth_header = create_authorization_header()
response = client.get(url, headers=[("Content-Type", "application/json"), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp["data"]) == 0
def test_update_invited_user_set_status_to_cancelled(client, sample_invited_user):
data = {"status": "cancelled"}
url = "/service/{0}/invite/{1}".format(sample_invited_user.service_id, sample_invited_user.id)
auth_header = create_authorization_header()
response = client.post(
url,
data=json.dumps(data),
headers=[("Content-Type", "application/json"), auth_header],
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))["data"]
assert json_resp["status"] == "cancelled"
def test_update_invited_user_for_wrong_service_returns_404(client, sample_invited_user, fake_uuid):
data = {"status": "cancelled"}
url = "/service/{0}/invite/{1}".format(fake_uuid, sample_invited_user.id)
auth_header = create_authorization_header()
response = client.post(
url,
data=json.dumps(data),
headers=[("Content-Type", "application/json"), auth_header],
)
assert response.status_code == 404
json_response = json.loads(response.get_data(as_text=True))["message"]
assert json_response == "No result found"
def test_update_invited_user_for_invalid_data_returns_400(client, sample_invited_user):
data = {"status": "garbage"}
url = "/service/{0}/invite/{1}".format(sample_invited_user.service_id, sample_invited_user.id)
auth_header = create_authorization_header()
response = client.post(
url,
data=json.dumps(data),
headers=[("Content-Type", "application/json"), auth_header],
)
assert response.status_code == 400
|
2,887 |
loss fn
|
"""Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def METHOD_NAME(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(METHOD_NAME, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_clip = hyperparameters.grad_clip
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
scaled_grad = jax.tree_map(
lambda x: x / (grad_norm + _GRAD_CLIP_EPS) * grad_clip, grad)
grad = jax.lax.cond(grad_norm > grad_clip,
lambda _: scaled_grad,
lambda _: grad,
None)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = get_learning_rate(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'train_step_ctc_loss': loss.mean(),
'grad_norm': grad_norm.mean(),
'learning_rate': lr,
},
global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
2,888 |
handle message receive model from client
|
import logging
from .message_define import MyMessage
from .utils import transform_tensor_to_list, post_complete_message_to_sweep_process
from ....core.distributed.fedml_comm_manager import FedMLCommManager
from ....core.distributed.communication.message import Message
class FedProxServerManager(FedMLCommManager):
def __init__(
self,
args,
aggregator,
comm=None,
rank=0,
size=0,
backend="MPI",
is_preprocessed=False,
preprocessed_client_lists=None,
):
super().__init__(args, comm, rank, size, backend)
self.args = args
self.aggregator = aggregator
self.round_num = args.comm_round
self.args.round_idx = 0
self.is_preprocessed = is_preprocessed
self.preprocessed_client_lists = preprocessed_client_lists
def run(self):
super().run()
def send_init_msg(self):
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.args.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
global_model_params = self.aggregator.get_global_model_params()
global_model_params = transform_tensor_to_list(global_model_params)
for process_id in range(1, self.size):
self.send_message_init_config(
process_id, global_model_params, client_indexes[process_id - 1]
)
def register_message_receive_handlers(self):
self.register_message_receive_handler(
MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER,
self.METHOD_NAME,
)
def METHOD_NAME(self, msg_params):
sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER)
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
local_sample_number = msg_params.get(MyMessage.MSG_ARG_KEY_NUM_SAMPLES)
self.aggregator.add_local_trained_result(
sender_id - 1, model_params, local_sample_number
)
b_all_received = self.aggregator.check_whether_all_receive()
logging.info("b_all_received = " + str(b_all_received))
if b_all_received:
global_model_params = self.aggregator.aggregate()
self.aggregator.test_on_server_for_all_clients(self.args.round_idx)
# start the next round
self.args.round_idx += 1
if self.args.round_idx == self.round_num:
post_complete_message_to_sweep_process(self.args)
self.finish()
print("here")
return
if self.is_preprocessed:
if self.preprocessed_client_lists is None:
# sampling has already been done in data preprocessor
client_indexes = [self.args.round_idx] * self.args.client_num_per_round
else:
client_indexes = self.preprocessed_client_lists[self.args.round_idx]
else:
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.args.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
print("indexes of clients: " + str(client_indexes))
print("size = %d" % self.size)
for receiver_id in range(1, self.size):
self.send_message_sync_model_to_client(
receiver_id, global_model_params, client_indexes[receiver_id - 1]
)
def send_message_init_config(self, receive_id, global_model_params, client_index):
message = Message(
MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.get_sender_id(), receive_id
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
self.send_message(message)
def send_message_sync_model_to_client(
self, receive_id, global_model_params, client_index
):
logging.info("send_message_sync_model_to_client. receive_id = %d" % receive_id)
message = Message(
MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,
self.get_sender_id(),
receive_id,
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
self.send_message(message)
|
2,889 |
resolve schema reference
|
import json
from urllib.parse import urlsplit
import requests
import yaml
from loguru import logger
from oasst_shared.schemas import inference
def fetch_openapi_spec(url):
response = requests.get(url)
if response.status_code != 200:
raise Exception(f"Failed to fetch data from URL: {url}. Status code: {response.status_code}")
content_type = response.headers.get("Content-Type")
if "application/json" in content_type or url.endswith(".json"):
return json.loads(response.text)
elif (
"application/yaml" in content_type
or "application/x-yaml" in content_type
or url.endswith(".yaml")
or url.endswith(".yml")
):
return yaml.safe_load(response.text)
else:
raise Exception(f"Unsupported content type: {content_type}. Only JSON and YAML are supported.")
def get_plugin_config(url: str) -> inference.PluginConfig | None:
try:
response = requests.get(url)
response.raise_for_status()
plugin_dict = response.json()
logger.info(f"Plugin config downloaded {plugin_dict}")
plugin_config = inference.PluginConfig.parse_obj(plugin_dict)
return plugin_config
except (requests.RequestException, ValueError) as e:
logger.warning(f"Error downloading or parsing Plugin config: {e}")
return None
def METHOD_NAME(ref: str, openapi_dict: dict):
if not ref.startswith("#/"):
raise ValueError(f"Invalid reference format: {ref}")
components = ref.split("/")
schema = openapi_dict
for component in components[1:]:
if component not in schema:
raise ValueError(f"Reference component not found: {component}")
schema = schema[component]
return schema
def parse_plugin_endpoint(
api_url: str,
method: str,
details: dict,
base_url: str,
path: str,
openapi_dict: dict,
) -> inference.PluginOpenAPIEndpoint:
"""
Parse details of a single plugin endpoint from OpenAPI spec.
Args:
api_url: URL of the plugin API.
method: HTTP method of the endpoint.
details: Details of the endpoint from OpenAPI spec.
base_url: Base URL of the plugin.
path: Path of the endpoint.
openapi_dict: Full OpenAPI spec of the plugin.
"""
split_result = urlsplit(api_url)
backup_url = f"{split_result.scheme}://{split_result.netloc}"
params_list = []
parameters = details.get("parameters", [])
if parameters is not None:
for param in parameters:
schema = None
if "$ref" in param["schema"]:
schema = METHOD_NAME(param["schema"]["$ref"], openapi_dict)
params_list.append(
inference.PluginOpenAPIParameter(
name=param.get("name", ""),
in_=param.get("in", "query"),
description=param.get("description", ""),
required=param.get("required", False),
schema_=schema,
)
)
# Check if the method is POST and extract request body schema
payload = None
if "requestBody" in details:
content = details["requestBody"].get("content", {})
for media_type, media_schema in content.items():
if media_type == "application/json":
if "$ref" in media_schema["schema"]:
payload = METHOD_NAME(media_schema["schema"]["$ref"], openapi_dict)
else:
payload = media_schema["schema"]
endpoint_data = {
"type": method,
"summary": details.get("summary", ""),
"operation_id": details.get("operationId", ""),
"url": f"{base_url}{path}" if base_url is not None else f"{backup_url}{path}",
"path": path,
"params": params_list,
"payload": payload,
}
if "tags" in details:
tag_name = details["tags"][0]
endpoint_data["tag"] = tag_name
endpoint = inference.PluginOpenAPIEndpoint(**endpoint_data)
return endpoint
def get_plugin_endpoints(api_url: str, openapi_dict: dict) -> list[inference.PluginOpenAPIEndpoint]:
endpoints = []
base_url = openapi_dict.get("servers", [{}])[0].get("url")
if base_url is not None:
parsed_link = urlsplit(api_url)
base_url = (
f"{parsed_link.scheme}://{parsed_link.netloc}{base_url}" if not urlsplit(base_url).scheme else base_url
)
for path, methods in openapi_dict.get("paths", {}).items():
for method, details in methods.items():
endpoints.append(parse_plugin_endpoint(api_url, method, details, base_url, path, openapi_dict))
return endpoints
def prepare_plugin_for_llm(plugin_url: str) -> inference.PluginConfig | None:
plugin_config = get_plugin_config(plugin_url)
if not plugin_config:
return None
try:
parsed_url = urlsplit(plugin_config.api.url)
if parsed_url.scheme == "":
api_url = urlsplit(plugin_url)._replace(path=parsed_url.path).geturl()
else:
api_url = plugin_config.api.url
openapi_dict = fetch_openapi_spec(api_url)
plugin_config.endpoints = get_plugin_endpoints(api_url, openapi_dict)
return plugin_config
except Exception:
logger.debug(f"Plugin preparation error: {plugin_url}")
return None
|
2,890 |
test bilinear cell integral
|
import pytest
import numpy as np
from firedrake import *
@pytest.fixture(params=[False, True])
def f(request):
quadrilateral = request.param
m = UnitSquareMesh(1, 1, quadrilateral=quadrilateral)
fs = FunctionSpace(m, "CG", 1)
f = Function(fs)
x = SpatialCoordinate(m)
f.interpolate(x[0])
return f
@pytest.fixture(scope='module')
def dg_trial_test():
# Interior facet tests hard code order in which cells were
# numbered, so don't reorder this mesh.
m = UnitSquareMesh(1, 1, reorder=False)
V = FunctionSpace(m, "DG", 0)
u = TrialFunction(V)
v = TestFunction(V)
return u, v
def test_external_integral(f):
assert abs(assemble(f * ds) - 2.0) < 1.0e-14
def test_bottom_external_integral(f):
assert abs(assemble(f * ds(3)) - 0.5) < 1.0e-14
def test_top_external_integral(f):
assert abs(assemble(f * ds(4)) - 0.5) < 1.0e-14
def test_left_external_integral(f):
assert abs(assemble(f * ds(1))) < 1.0e-14
def test_right_external_integral(f):
assert abs(assemble(f * ds(2)) - 1.0) < 1.0e-14
def test_internal_integral(f):
if f.function_space().mesh().num_cells() == 1:
# Quadrilateral case, no internal facet
assert abs(assemble(f('+') * dS)) < 1.0e-14
else:
# Triangle case, one internal facet
assert abs(assemble(f('+') * dS) - 1.0 / (2.0 ** 0.5)) < 1.0e-14
def test_facet_integral_with_argument(f):
v = TestFunction(f.function_space())
assert np.allclose(assemble(inner(f, v) * ds).dat.data_ro.sum(), 2.0)
def METHOD_NAME(dg_trial_test):
u, v = dg_trial_test
cell = assemble(inner(u, v) * dx).M.values
# each diagonal entry should be volume of cell
assert np.allclose(np.diag(cell), 0.5)
# all off-diagonals should be zero
cell[range(2), range(2)] = 0.0
assert np.allclose(cell, 0.0)
def test_bilinear_exterior_facet_integral(dg_trial_test):
u, v = dg_trial_test
outer_facet = assemble(inner(u, v) * ds).M.values
# each diagonal entry should be length of exterior facet in this
# cell (2)
assert np.allclose(np.diag(outer_facet), 2.0)
# all off-diagonals should be zero
outer_facet[range(2), range(2)] = 0.0
assert np.allclose(outer_facet, 0.0)
def test_vector_bilinear_exterior_facet_integral():
mesh = IntervalMesh(5, 5)
V = VectorFunctionSpace(mesh, "CG", 1, dim=2)
u = TrialFunction(V)
v = TestFunction(V)
a = inner(u, v) * ds
A = assemble(a)
values = A.M.values
# Only the first and last vertices should contain nonzeros. Since these are
# blocked that means that the first two entries and the last two entries
# should be nonzero.
nonzeros = [[0, 0], [1, 1], [-2, -2], [-1, -1]]
assert all(np.allclose(values[row, col], 1.0) for row, col in nonzeros)
# the remaining entries should all be zero
for row, col in nonzeros:
values[row, col] = 0.0
assert np.allclose(values, 0.0)
@pytest.mark.parametrize('restrictions',
# ((trial space restrictions), (test space restrictions))
[(('+', ), ('+', )),
(('+', ), ('-', )),
(('-', ), ('+', )),
(('-', '+'), ('+', '+')),
(('-', '+'), ('-', '+')),
(('-', '+'), ('+', '-')),
(('-', '+'), ('-', '-')),
(('+', '+'), ('+', '+')),
(('+', '+'), ('-', '+')),
(('+', '+'), ('+', '-')),
(('+', '+'), ('-', '-')),
(('-', '-'), ('+', '+')),
(('-', '-'), ('-', '+')),
(('-', '-'), ('+', '-')),
(('-', '-'), ('-', '-')),
(('+', '-'), ('+', '+')),
(('+', '-'), ('-', '+')),
(('+', '-'), ('+', '-')),
(('+', '-'), ('-', '-')),
(('+', '+', '-', '-'), ('+', '-', '+', '-'))])
def test_bilinear_interior_facet_integral(dg_trial_test, restrictions):
u, v = dg_trial_test
trial_r, test_r = restrictions
idx = {'+': 0, '-': 1}
exact = np.zeros((2, 2), dtype=float)
form = 0
for u_r, v_r in zip(trial_r, test_r):
form = form + inner(u(u_r), v(v_r)) * dS
exact[idx[v_r], idx[u_r]] += sqrt(2)
interior_facet = assemble(form).M.values
assert np.allclose(interior_facet - exact, 0.0)
@pytest.mark.parametrize('space', ["RT", "BDM"])
def test_contravariant_piola_facet_integral(space):
m = UnitSquareMesh(1, 1)
V = FunctionSpace(m, space, 1)
u = project(Constant((0.0, 1.0)), V)
assert abs(assemble(inner(u('+'), u('+'))*dS) - sqrt(2)) < 1.0e-13
assert abs(assemble(inner(u('-'), u('-'))*dS) - sqrt(2)) < 1.0e-13
assert abs(assemble(inner(u('+'), u('-'))*dS) - sqrt(2)) < 1.0e-13
@pytest.mark.parametrize('space', ["N1curl", "N2curl"])
def test_covariant_piola_facet_integral(space):
m = UnitSquareMesh(1, 1)
V = FunctionSpace(m, space, 1)
u = project(Constant((0.0, 1.0)), V)
assert abs(assemble(inner(u('+'), u('+'))*dS) - sqrt(2)) < 1.0e-13
assert abs(assemble(inner(u('-'), u('-'))*dS) - sqrt(2)) < 1.0e-13
assert abs(assemble(inner(u('+'), u('-'))*dS) - sqrt(2)) < 1.0e-13
def test_internal_integral_unit_tri():
t = UnitTriangleMesh()
V = FunctionSpace(t, 'CG', 1)
u = Function(V)
x = SpatialCoordinate(t)
u.interpolate(x[0])
assert abs(assemble(u('+') * dS)) < 1.0e-14
def test_internal_integral_unit_tet():
t = UnitTetrahedronMesh()
V = FunctionSpace(t, 'CG', 1)
u = Function(V)
x = SpatialCoordinate(t)
u.interpolate(x[0])
assert abs(assemble(u('+') * dS)) < 1.0e-14
def test_facet_map_no_reshape():
m = UnitSquareMesh(1, 1)
V = FunctionSpace(m, "DG", 0)
efnm = V.exterior_facet_node_map()
assert efnm.values_with_halo.shape == (4, 1)
def test_mesh_with_no_facet_markers():
mesh = UnitTriangleMesh()
mesh.init()
with pytest.raises(LookupError):
mesh.exterior_facets.subset((10,))
|
2,891 |
test not ii
|
#!/usr/bin/env python
#
# Copyright 2004,2007,2008,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
class test_boolean_operators (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def help_ss(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_s(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_s()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_bb(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_b(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_b()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_ii(self, src_data, exp_data, op):
for s in zip(list(range(len(src_data))), src_data):
src = blocks.vector_source_i(s[1])
self.tb.connect(src, (op, s[0]))
dst = blocks.vector_sink_i()
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def test_xor_ss(self):
src1_data = [1, 2, 3, 0x5004, 0x1150]
src2_data = [8, 2, 1, 0x0508, 0x1105]
expected_result = [9, 0, 2, 0x550C, 0x0055]
op = blocks.xor_ss()
self.help_ss((src1_data, src2_data),
expected_result, op)
def test_xor_bb(self):
src1_data = [1, 2, 3, 4, 0x50]
src2_data = [8, 2, 1, 8, 0x05]
expected_result = [9, 0, 2, 0xC, 0x55]
op = blocks.xor_bb()
self.help_bb((src1_data, src2_data),
expected_result, op)
def test_xor_ii(self):
src1_data = [1, 2, 3, 0x5000004, 0x11000050]
src2_data = [8, 2, 1, 0x0500008, 0x11000005]
expected_result = [9, 0, 2, 0x550000C, 0x00000055]
op = blocks.xor_ii()
self.help_ii((src1_data, src2_data),
expected_result, op)
def test_and_ss(self):
src1_data = [1, 2, 3, 0x5004, 0x1150]
src2_data = [8, 2, 1, 0x0508, 0x1105]
expected_result = [0, 2, 1, 0x0000, 0x1100]
op = blocks.and_ss()
self.help_ss((src1_data, src2_data),
expected_result, op)
def test_and_bb(self):
src1_data = [1, 2, 2, 3, 0x04, 0x50]
src2_data = [8, 2, 2, 1, 0x08, 0x05]
src3_data = [8, 2, 1, 1, 0x08, 0x05]
expected_result = [0, 2, 0, 1, 0x00, 0x00]
op = blocks.and_bb()
self.help_bb((src1_data, src2_data, src3_data),
expected_result, op)
def test_and_ii(self):
src1_data = [1, 2, 3, 0x50005004, 0x11001150]
src2_data = [8, 2, 1, 0x05000508, 0x11001105]
expected_result = [0, 2, 1, 0x00000000, 0x11001100]
op = blocks.and_ii()
self.help_ii((src1_data, src2_data),
expected_result, op)
def test_and_const_ss(self):
src_data = [1, 2, 3, 0x5004, 0x1150]
expected_result = [0, 2, 2, 0x5000, 0x1100]
src = blocks.vector_source_s(src_data)
op = blocks.and_const_ss(0x55AA)
dst = blocks.vector_sink_s()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(dst.data(), expected_result)
def test_and_const_bb(self):
src_data = [1, 2, 3, 0x50, 0x11]
expected_result = [0, 2, 2, 0x00, 0x00]
src = blocks.vector_source_b(src_data)
op = blocks.and_const_bb(0xAA)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(dst.data(), expected_result)
def test_and_const_ii(self):
src_data = [1, 2, 3, 0x5004, 0x1150]
expected_result = [0, 2, 2, 0x5000, 0x1100]
src = blocks.vector_source_i(src_data)
op = blocks.and_const_ii(0x55AA)
dst = blocks.vector_sink_i()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(dst.data(), expected_result)
def test_or_ss(self):
src1_data = [1, 2, 3, 0x5004, 0x1150]
src2_data = [8, 2, 1, 0x0508, 0x1105]
expected_result = [9, 2, 3, 0x550C, 0x1155]
op = blocks.or_ss()
self.help_ss((src1_data, src2_data),
expected_result, op)
def test_or_bb(self):
src1_data = [1, 2, 2, 3, 0x04, 0x50]
src2_data = [8, 2, 2, 1, 0x08, 0x05]
src3_data = [8, 2, 1, 1, 0x08, 0x05]
expected_result = [9, 2, 3, 3, 0x0C, 0x55]
op = blocks.or_bb()
self.help_bb((src1_data, src2_data, src3_data),
expected_result, op)
def test_or_ii(self):
src1_data = [1, 2, 3, 0x50005004, 0x11001150]
src2_data = [8, 2, 1, 0x05000508, 0x11001105]
expected_result = [9, 2, 3, 0x5500550C, 0x11001155]
op = blocks.or_ii()
self.help_ii((src1_data, src2_data),
expected_result, op)
def test_not_ss(self):
src1_data = [1, 2, 3, 0x5004, 0x1150]
expected_result = [~1, ~2, ~3, ~0x5004, ~0x1150]
op = blocks.not_ss()
self.help_ss((((src1_data),)),
expected_result, op)
def test_not_bb(self):
src1_data = [1, 2, 2, 3, 0x04, 0x50]
expected_result = [0xFE, 0xFD, 0xFD, 0xFC, 0xFB, 0xAF]
op = blocks.not_bb()
self.help_bb(((src1_data), ),
expected_result, op)
def METHOD_NAME(self):
src1_data = [1, 2, 3, 0x50005004, 0x11001150]
expected_result = [~1, ~2, ~3, ~0x50005004, ~0x11001150]
op = blocks.not_ii()
self.help_ii(((src1_data),),
expected_result, op)
if __name__ == '__main__':
gr_unittest.run(test_boolean_operators)
|
2,892 |
product
|
"""
JBoss version
=============
Provide information about the versions of all running Jboss on a system.
"""
import json
from collections import namedtuple
from insights import Parser, parser
from insights.specs import Specs
# define namedtuple to store the property of version
_VersionNameTuple = namedtuple("_VersionNameTuple", ["file_path", "product", "version", "code_name", "major", "minor", "release"])
def _get_version_tuple(version_line, i_file_path):
"""
Perform the version line parsing, returning a nametuple of the values of one jboss version
"""
METHOD_NAME, _, version_name = [v.strip() for v in version_line.partition("- Version")]
if " GA" in version_name:
# handle Red Hat JBoss Web Server - Version 5.6 GA
version = version_name.split(' GA')[0]
code_name = "GA"
updated_version = version + ".0"
major, minor, release = updated_version.split('.')[0:3]
else:
# add empty code name for Red Hat Data Grid - Version 7.3.0
version_name = version_name.strip() + "."
major, minor, release, code_name = version_name.split(".")[0:4]
version = '.'.join([major, minor, release])
return _VersionNameTuple(i_file_path, METHOD_NAME, version, code_name, int(major), int(minor), int(release))
@parser(Specs.jboss_version)
class JbossVersion(Parser):
"""
This class is to access to file ``$JBOSS_HOME/version.txt``
Typical content of file ``$JBOSS_HOME/version.txt`` is::
Red Hat JBoss Enterprise Application Platform - Version 6.4.3.GA
This class parses the file content and stores data in the dict ``self.parsed``.
The version info can also be got via ``obj.major`` and ``obj.minor``, etc.
Examples:
>>> jboss_version.file_path
'/home/test/jboss/jboss-eap-6.4/version.txt'
>>> jboss_version.raw
'Red Hat JBoss Enterprise Application Platform - Version 6.4.3.GA'
>>> jboss_version.major
6
>>> jboss_version.minor
4
>>> jboss_version.release
3
>>> jboss_version.version
'6.4.3'
>>> jboss_version.code_name
'GA'
"""
def parse_content(self, content):
self.raw = content[0]
self._parsed = _get_version_tuple(content[0], self.file_path)
@property
def METHOD_NAME(self):
"""string: the version of this running JBoss progress."""
return self._parsed.METHOD_NAME
@property
def version(self):
"""string: the version of this running JBoss progress."""
return self._parsed.version
@property
def major(self):
"""int: the major version of this running JBoss progress."""
return self._parsed.major
@property
def minor(self):
"""int: the minor version of this running JBoss progress."""
return self._parsed.minor
@property
def release(self):
"""int: release of this running JBoss progress."""
return self._parsed.release
@property
def code_name(self):
"""string: code name of this running JBoss progress."""
return self._parsed.code_name
@parser(Specs.jboss_runtime_versions)
class JbossRuntimeVersions(Parser, list):
"""
This class is to access to file ``data/insights_commands/jboss_versions``
Typical content of file ``data/insights_commands/jboss_versions`` is::
{"/opt/jboss-datagrid-7.3.0-server": "Red Hat Data Grid - Version 7.3.0"}
This class parses the file content and stores data in the list.
Examples:
>>> len(all_jboss_versions)
1
>>> all_jboss_versions[0].major
7
>>> all_jboss_versions[0].minor
3
>>> all_jboss_versions[0].release
0
"""
def parse_content(self, content):
jboss_version_dict = json.loads(' '.join(content))
for j_path, version_content in jboss_version_dict.items():
lines = version_content.strip().splitlines()
self.append(_get_version_tuple(lines[0], j_path))
|
2,893 |
test ds length is ceil num epochs
|
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines.cifar100 import image_classification_preprocessing
TEST_DATA = collections.OrderedDict(
coarse_label=([tf.constant(1, dtype=tf.int64)]),
image=([tf.zeros((32, 32, 3), dtype=tf.uint8)]),
label=([tf.constant(1, dtype=tf.int64)]),
)
def _compute_length_of_dataset(ds):
return ds.reduce(0, lambda x, _: x + 1)
class PreprocessFnTest(tf.test.TestCase, parameterized.TestCase):
def test_raises_non_iterable_crop(self):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(TypeError, 'crop_shape must be an iterable'):
image_classification_preprocessing.create_preprocess_fn(
preprocess_spec, crop_shape=32
)
def test_raises_iterable_length_2_crop(self):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(
ValueError, 'The crop_shape must have length 3'
):
image_classification_preprocessing.create_preprocess_fn(
preprocess_spec, crop_shape=(32, 32)
)
@parameterized.named_parameters(
('num_epochs_1_batch_size_1', 1, 1),
('num_epochs_4_batch_size_2', 4, 2),
('num_epochs_9_batch_size_3', 9, 3),
('num_epochs_12_batch_size_1', 12, 1),
('num_epochs_3_batch_size_5', 3, 5),
('num_epochs_7_batch_size_2', 7, 2),
)
def METHOD_NAME(
self, num_epochs, batch_size
):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=num_epochs, batch_size=batch_size
)
preprocess_fn = image_classification_preprocessing.create_preprocess_fn(
preprocess_spec
)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
tf.cast(tf.math.ceil(num_epochs / batch_size), tf.int32),
)
@parameterized.named_parameters(
('crop_shape_1_no_distort', (32, 32, 3), False),
('crop_shape_2_no_distort', (28, 28, 3), False),
('crop_shape_3_no_distort', (24, 26, 3), False),
('crop_shape_1_distort', (32, 32, 3), True),
('crop_shape_2_distort', (28, 28, 3), True),
('crop_shape_3_distort', (24, 26, 3), True),
)
def test_preprocess_fn_returns_correct_element(
self, crop_shape, distort_image
):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=1, shuffle_buffer_size=1
)
preprocess_fn = image_classification_preprocessing.create_preprocess_fn(
preprocess_spec, crop_shape=crop_shape, distort_image=distort_image
)
preprocessed_ds = preprocess_fn(ds)
expected_element_spec_shape = (None,) + crop_shape
self.assertEqual(
preprocessed_ds.element_spec,
(
tf.TensorSpec(shape=expected_element_spec_shape, dtype=tf.float32),
tf.TensorSpec(shape=(None,), dtype=tf.int64),
),
)
expected_element_shape = (1,) + crop_shape
element = next(iter(preprocessed_ds))
expected_element = (
tf.zeros(shape=expected_element_shape, dtype=tf.float32),
tf.ones(shape=(1,), dtype=tf.int32),
)
self.assertAllClose(self.evaluate(element), expected_element)
def test_preprocess_is_no_op_for_normalized_image(self):
crop_shape = (1, 1, 3)
x = tf.constant([[[1.0, -1.0, 0.0]]]) # Has shape (1, 1, 3), mean 0
x = x / tf.math.reduce_std(x) # x now has variance 1
simple_example = collections.OrderedDict(image=x, label=0)
image_map = image_classification_preprocessing.build_image_map(
crop_shape, distort=False
)
cropped_example = image_map(simple_example)
self.assertEqual(cropped_example[0].shape, crop_shape)
self.assertAllClose(x, cropped_example[0], rtol=1e-03)
self.assertEqual(cropped_example[1], 0)
@parameterized.named_parameters(
('max_elements1', 1),
('max_elements3', 3),
('max_elements7', 7),
('max_elements11', 11),
('max_elements18', 18),
)
def test_ds_length_with_max_elements(self, max_elements):
repeat_size = 10
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA).repeat(repeat_size)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=1, max_elements=max_elements
)
preprocess_fn = image_classification_preprocessing.create_preprocess_fn(
preprocess_spec
)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
min(repeat_size, max_elements),
)
if __name__ == '__main__':
execution_contexts.set_sync_local_cpp_execution_context()
tf.test.main()
|
2,894 |
parse args
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
import mmcv
import mmengine
from mmocr.utils import crop_img, dump_ocr_data
def collect_files(img_dir, gt_dir, ratio):
"""Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
ratio (float): Split ratio for val set
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
assert isinstance(ratio, float)
assert ratio < 1.0, 'val_ratio should be a float between 0.0 to 1.0'
ann_list, imgs_list = [], []
for ann_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, ann_file))
imgs_list.append(osp.join(img_dir, ann_file.replace('txt', 'jpg')))
all_files = list(zip(imgs_list, ann_list))
assert len(all_files), f'No images found in {img_dir}'
print(f'Loaded {len(all_files)} images from {img_dir}')
trn_files, val_files = [], []
if ratio > 0:
for i, file in enumerate(all_files):
if i % math.floor(1 / ratio):
trn_files.append(file)
else:
val_files.append(file)
else:
trn_files, val_files = all_files, []
print(f'training #{len(trn_files)}, val #{len(val_files)}')
return trn_files, val_files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmengine.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmengine.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
# read imgs while ignoring orientations
img = mmcv.imread(img_file)
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.txt':
img_info = load_txt_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_txt_info(gt_file, img_info):
"""Collect the annotation information.
The annotation format is as the following:
x1, y1, x2, y2, x3, y3, x4, y4, difficult, text
390,902,1856,902,1856,1225,390,1225,0,"金氏眼镜"
1875,1170,2149,1170,2149,1245,1875,1245,0,"创于1989"
2054,1277,2190,1277,2190,1323,2054,1323,0,"城建店"
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
anno_info = []
with open(gt_file, encoding='utf-8-sig') as f:
lines = f.readlines()
for line in lines:
points = line.split(',')[0:8]
word = line.split(',')[9].rstrip('\n').strip('"')
difficult = 1 if line.split(',')[8] != '0' else 0
bbox = [int(pt) for pt in points]
if word == '###' or difficult == 1:
continue
anno = dict(bbox=bbox, word=word)
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def generate_ann(root_path, split, image_infos, preserve_vertical):
"""Generate cropped annotations and label txt file.
Args:
root_path (str): The root path of the dataset
split (str): The split of dataset. Namely: training or val
image_infos (list[dict]): A list of dicts of the img and
annotation information
preserve_vertical (bool): Whether to preserve vertical texts
"""
dst_image_root = osp.join(root_path, 'crops', split)
ignore_image_root = osp.join(root_path, 'ignores', split)
if split == 'training':
dst_label_file = osp.join(root_path, 'train_label.json')
elif split == 'val':
dst_label_file = osp.join(root_path, 'val_label.json')
mmengine.mkdir_or_exist(dst_image_root)
mmengine.mkdir_or_exist(ignore_image_root)
img_info = []
for image_info in image_infos:
index = 1
src_img_path = osp.join(root_path, 'imgs', image_info['file_name'])
image = mmcv.imread(src_img_path)
src_img_root = image_info['file_name'].split('.')[0]
for anno in image_info['anno_info']:
word = anno['word']
dst_img = crop_img(image, anno['bbox'], 0, 0)
h, w, _ = dst_img.shape
dst_img_name = f'{src_img_root}_{index}.png'
index += 1
# Skip invalid annotations
if min(dst_img.shape) == 0:
continue
# Filter out vertical texts
if not preserve_vertical and h / w > 2:
dst_img_path = osp.join(ignore_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
continue
dst_img_path = osp.join(dst_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
img_info.append({
'file_name': dst_img_name,
'anno_info': [{
'text': word
}]
})
dump_ocr_data(img_info, dst_label_file, 'textrecog')
def METHOD_NAME():
parser = argparse.ArgumentParser(
description='Generate training and val set of RCTW.')
parser.add_argument('root_path', help='Root dir path of RCTW')
parser.add_argument(
'--val-ratio', help='Split ratio for val set', default=0.0, type=float)
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
parser.add_argument(
'--preserve-vertical',
help='Preserve samples containing vertical texts',
action='store_true')
args = parser.METHOD_NAME()
return args
def main():
args = METHOD_NAME()
root_path = args.root_path
ratio = args.val_ratio
trn_files, val_files = collect_files(
osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), ratio)
# Train set
with mmengine.Timer(
print_tmpl='It takes {}s to convert RCTW Training annotation'):
trn_infos = collect_annotations(trn_files, nproc=args.nproc)
generate_ann(root_path, 'training', trn_infos, args.preserve_vertical)
# Val set
if len(val_files) > 0:
with mmengine.Timer(
print_tmpl='It takes {}s to convert RCTW Val annotation'):
val_infos = collect_annotations(val_files, nproc=args.nproc)
generate_ann(root_path, 'val', val_infos, args.preserve_vertical)
if __name__ == '__main__':
main()
|
2,895 |
get base submission message address
|
# Copyright The IETF Trust 2015-2023, All Rights Reserved
from collections import namedtuple
import debug # pyflakes:ignore
from ietf.mailtrigger.models import MailTrigger, Recipient
from ietf.submit.models import Submission
from ietf.utils.mail import excludeaddrs
class AddrLists(namedtuple('AddrLists',['to','cc'])):
__slots__ = ()
def as_strings(self,compact=True):
separator = ", " if compact else ",\n "
to_string = separator.join(self.to)
cc_string = separator.join(self.cc)
return namedtuple('AddrListsAsStrings',['to','cc'])(to=to_string,cc=cc_string)
def gather_address_lists(slug, skipped_recipients=None, create_from_slug_if_not_exists=None,
desc_if_not_exists=None, **kwargs):
mailtrigger = get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists)
to = set()
for recipient in mailtrigger.to.all():
to.update(recipient.gather(**kwargs))
to.discard('')
if skipped_recipients:
to = excludeaddrs(to, skipped_recipients)
cc = set()
for recipient in mailtrigger.cc.all():
cc.update(recipient.gather(**kwargs))
cc.discard('')
if skipped_recipients:
cc = excludeaddrs(cc, skipped_recipients)
return AddrLists(to=sorted(list(to)),cc=sorted(list(cc)))
def get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists):
try:
mailtrigger = MailTrigger.objects.get(slug=slug)
except MailTrigger.DoesNotExist:
if create_from_slug_if_not_exists and desc_if_not_exists:
template = MailTrigger.objects.get(slug=create_from_slug_if_not_exists)
mailtrigger = MailTrigger.objects.create(slug=slug, desc=desc_if_not_exists)
mailtrigger.to.set(template.to.all())
mailtrigger.cc.set(template.cc.all())
if slug.startswith('review_completed') and slug.endswith('early'):
mailtrigger.cc.remove('ietf_last_call')
else:
raise
return mailtrigger
def gather_relevant_expansions(**kwargs):
def starts_with(prefix):
return MailTrigger.objects.filter(slug__startswith=prefix).values_list('slug',flat=True)
relevant = set()
if 'doc' in kwargs:
doc = kwargs['doc']
relevant.add('doc_state_edited')
if not doc.type_id in ['bofreq', 'statement']:
relevant.update(['doc_telechat_details_changed','ballot_deferred','iesg_ballot_saved'])
if doc.type_id in ['draft','statchg']:
relevant.update(starts_with('last_call_'))
if doc.type_id == 'draft':
relevant.update(starts_with('doc_'))
relevant.update(starts_with('resurrection_'))
relevant.update(['ipr_posted_on_doc',])
if doc.stream_id == 'ietf':
relevant.update(['ballot_approved_ietf_stream','pubreq_iesg'])
else:
relevant.update(['pubreq_rfced'])
last_submission = Submission.objects.filter(name=doc.name,state='posted').order_by('-rev').first()
if last_submission and 'submission' not in kwargs:
kwargs['submission'] = last_submission
if doc.type_id == 'conflrev':
relevant.update(['conflrev_requested','ballot_approved_conflrev'])
if doc.type_id == 'charter':
relevant.update(['charter_external_review','ballot_approved_charter'])
if doc.type_id == 'bofreq':
relevant.update(starts_with('bofreq'))
if 'group' in kwargs:
relevant.update(starts_with('group_'))
relevant.update(starts_with('milestones_'))
group = kwargs['group']
if group.features.acts_like_wg:
relevant.update(starts_with('session_'))
if group.features.has_chartering_process:
relevant.update(['charter_external_review',])
if 'submission' in kwargs:
relevant.update(starts_with('sub_'))
rule_list = []
for mailtrigger in MailTrigger.objects.filter(slug__in=relevant):
addrs = gather_address_lists(mailtrigger.slug,**kwargs)
if addrs.to or addrs.cc:
rule_list.append((mailtrigger.slug,mailtrigger.desc,addrs.to,addrs.cc))
return sorted(rule_list)
def METHOD_NAME():
return Recipient.objects.get(slug='submission_manualpost_handling').gather()[0]
def get_base_ipr_request_address():
return Recipient.objects.get(slug='ipr_requests').gather()[0]
|
2,896 |
playback url
|
import os
from io import StringIO
try:
import re2 as re
except ImportError:
import re
from collections import OrderedDict
from uuid import NAMESPACE_DNS, uuid3
from django.template.defaultfilters import register
from django.utils.html import escape
from django.utils.safestring import mark_safe
@register.filter("is_string")
def is_string(value):
return isinstance(value, str)
@register.filter("comma_join")
def comma_join(value):
return ",".join(str(task) for task in value)
@register.filter("network_rn")
def network_rn_func(value):
"""get basename from path"""
if isinstance(value, bytes):
value = value.decode()
return list(filter(None, value.split("\r\n")))
@register.filter("filename")
def filename(value):
"""get basename from path"""
return os.path.basename(value)
@register.filter("mongo_id")
def mongo_id(value):
"""Retrieve _id value.
@todo: it will be removed in future.
"""
if isinstance(value, dict):
if "_id" in value:
value = value["_id"]
# Return value
return str(value)
@register.filter("is_dict")
def is_dict(value):
"""Checks if value is an instance of dict"""
return isinstance(value, dict)
@register.filter
def get_item(dictionary, key):
return dictionary.get(key, "")
malware_name_url_pattern = """<a href="/analysis/search/detections:{malware_name}"><span style="color:#EE1B2F;font-weight: bold;">{malware_name}</span></a>"""
@register.filter("get_detection_by_pid")
def get_detection_by_pid(dictionary, key):
if not dictionary:
return
detections = dictionary.get(str(key), "")
if detections:
if len(detections) > 1:
output = " -> ".join([malware_name_url_pattern.format(malware_name=name) for name in detections])
else:
output = malware_name_url_pattern.format(malware_name=detections[0])
return mark_safe(output)
@register.filter(name="dehex")
def dehex(value):
return re.sub(r"\\x[0-9a-f]{2}", "", value)
@register.filter(name="sort")
def sort(value):
if isinstance(value, dict):
sorteddict = OrderedDict()
sortedkeys = sorted(value.keys())
for key in sortedkeys:
sorteddict[key] = value[key]
return sorteddict
return value
@register.filter(name="format_cli")
def format_cli(cli, length):
if cli.startswith('"'):
ret = " ".join(cli[cli[1:].index('"') + 2 :].split()).strip()
else:
ret = " ".join(cli.split()[1:]).strip()
if len(ret) >= length + 15:
ret = ret[:length] + " ...(truncated)"
# Return blank string instead of 'None'
if not ret:
return ""
return ret
@register.filter(name="flare_capa_capability")
def flare_capa_capabilities(obj, *args, **kwargs):
result = StringIO()
def _print(lvl, s):
result.write((lvl * " ") + s)
_print(1, '<table class="table table-striped table-hover table-bordered">\n')
_print(1, "<thead>\n")
_print(1, "<tr>\n")
_print(1, '<th scope="col">Namespace</th>\n')
_print(1, '<th scope="col">Capability</th>\n')
_print(2, "</tr>\n")
_print(3, "</thead>\n")
_print(3, "<tbody>\n")
for namespaces, capabilities in obj.get("CAPABILITY", {}).items():
_print(4, "<tr>\n")
_print(4, '<th width="25%" scope="row">' + namespaces + "</th>\n")
_print(4, "<td>\n")
for capability in capabilities:
_print(5, "<li>" + capability + "</li>\n")
_print(4, "</td>\n")
_print(3, "</tr>\n")
_print(2, "</tbody>\n")
_print(1, "</table>\n")
ret_result = result.getvalue()
result.close()
return mark_safe(ret_result)
@register.filter(name="flare_capa_attck")
def flare_capa_attck(obj, *args, **kwargs):
result = StringIO()
def _print(lvl, s):
result.write((lvl * " ") + s)
_print(1, '<table class="table table-striped table-hover table-bordered">\n')
_print(1, "<thead>\n")
_print(1, "<tr>\n")
_print(1, '<th scope="col">ATT&CK Tactic</th>\n')
_print(1, '<th scope="col">ATT&CK Technique</th>\n')
_print(2, "</tr>\n")
_print(3, "</thead>\n")
_print(3, "<tbody>\n")
for tactic, techniques in obj.get("ATTCK", {}).items():
_print(4, "<tr>\n")
_print(4, '<th scope="row">' + tactic + "</th>\n")
_print(4, "<td>\n")
for technique in techniques:
_print(5, "<li>" + technique + "</li>\n")
_print(4, "</td>\n")
_print(3, "</tr>\n")
_print(2, "</tbody>\n")
_print(1, "</table>\n")
ret_result = result.getvalue()
result.close()
return mark_safe(ret_result)
@register.filter(name="flare_capa_mbc")
def flare_capa_mbc(obj, *args, **kwargs):
result = StringIO()
def _print(lvl, s):
result.write((lvl * " ") + s)
_print(1, '<table class="table table-striped table-hover table-bordered">\n')
_print(1, "<thead>\n")
_print(1, "<tr>\n")
_print(1, '<th scope="col">MBC Objective</th>\n')
_print(1, '<th scope="col">MBC Behavior</th>\n')
_print(2, "</tr>\n")
_print(3, "</thead>\n")
_print(3, "<tbody>\n")
for objective, behaviors in obj.get("MBC", {}).items():
_print(4, "<tr>\n")
_print(4, '<th scope="row">' + objective + "</th>\n")
_print(4, "<td>\n")
for behavior in behaviors:
_print(5, "<li>" + behavior + "</li>\n")
_print(4, "</td>\n")
_print(3, "</tr>\n")
_print(2, "</tbody>\n")
_print(1, "</table>\n")
ret_result = result.getvalue()
result.close()
return mark_safe(ret_result)
# Thanks Sandor
@register.simple_tag
def malware_config(obj, *args, **kwargs):
"""Custom Django tag for improved malware config rendering.
This tag will render Python dicts as tables, and Python lists as
unordered lists. Empty dicts and lists are rendered as empty fields.
Single element lists are expanded and rendered as regular values.
"""
level = kwargs.get("level") or 0
result = StringIO()
def _print(lvl, s):
result.write((lvl * " ") + str(s))
if isinstance(obj, dict):
if obj:
_print(0, "\n")
_print(level + 0, "<table>\n")
for key, val in obj.items():
_print(level + 1, "<tr>\n")
_print(level + 2, "<td>" + malware_config(key, level=level + 3) + "</td>\n")
_print(level + 2, "<td>" + malware_config(val, level=level + 3) + "</td>\n")
_print(level + 1, "</tr>\n")
_print(level + 0, "</table>\n")
_print(level - 1, "")
elif isinstance(obj, list):
if obj:
if len(obj) > 1:
_print(0, "\n")
_print(level + 0, '<ul style="margin: 0;columns: 4;">\n')
for item in obj:
_print(level + 1, "<li>" + malware_config(item, level=level + 2) + "</li>\n")
_print(level + 0, "</ul>\n")
_print(level - 1, "")
else:
result.write(malware_config(obj[0]))
else:
result.write('<pre style="margin: 0">' + escape(str(obj)) + "</pre>")
ret_result = result.getvalue()
result.close()
return mark_safe(ret_result)
@register.filter(name="playback_url")
def METHOD_NAME(task_id):
session_id = uuid3(NAMESPACE_DNS, str(task_id)).hex[:16]
return f"{task_id}_{session_id}"
|
2,897 |
location
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetMoveCollectionResult',
'AwaitableGetMoveCollectionResult',
'get_move_collection',
'get_move_collection_output',
]
@pulumi.output_type
class GetMoveCollectionResult:
"""
Define the move collection.
"""
def __init__(__self__, etag=None, id=None, identity=None, METHOD_NAME=None, name=None, properties=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
The etag of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
Defines the MSI properties of the Move Collection.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MoveCollectionPropertiesResponse':
"""
Defines the move collection properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMoveCollectionResult(GetMoveCollectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMoveCollectionResult(
etag=self.etag,
id=self.id,
identity=self.identity,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_move_collection(move_collection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMoveCollectionResult:
"""
Gets the move collection.
Azure REST API version: 2022-08-01.
:param str move_collection_name: The Move Collection Name.
:param str resource_group_name: The Resource Group Name.
"""
__args__ = dict()
__args__['moveCollectionName'] = move_collection_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:migrate:getMoveCollection', __args__, opts=opts, typ=GetMoveCollectionResult).value
return AwaitableGetMoveCollectionResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_move_collection)
def get_move_collection_output(move_collection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMoveCollectionResult]:
"""
Gets the move collection.
Azure REST API version: 2022-08-01.
:param str move_collection_name: The Move Collection Name.
:param str resource_group_name: The Resource Group Name.
"""
...
|
2,898 |
time array 1
|
from .common import Benchmark
import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.float_l1000 = [float(i) for i in range(1000)]
self.float64_l1000 = [np.float64(i) for i in range(1000)]
self.int_l1000 = list(range(1000))
self.l = [np.arange(1000), np.arange(1000)]
self.l_view = [memoryview(a) for a in self.l]
self.l10x10 = np.ones((10, 10))
self.float64_dtype = np.dtype(np.float64)
def METHOD_NAME(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_float_l1000(self):
np.array(self.float_l1000)
def time_array_float_l1000_dtype(self):
np.array(self.float_l1000, dtype=self.float64_dtype)
def time_array_float64_l1000(self):
np.array(self.float64_l1000)
def time_array_int_l1000(self):
np.array(self.int_l1000)
def time_array_l(self):
np.array(self.l)
def time_array_l_view(self):
np.array(self.l_view)
def time_can_cast(self):
np.can_cast(self.l10x10, self.float64_dtype)
def time_can_cast_same_kind(self):
np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind")
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_empty_like(self):
np.empty_like(self.l10x10)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
def time_triu_indices_500(self):
np.triu_indices(500)
def time_tril_indices_500(self):
np.tril_indices(500)
class Temporaries(Benchmark):
def setup(self):
self.amid = np.ones(50000)
self.bmid = np.ones(50000)
self.alarge = np.ones(1000000)
self.blarge = np.ones(1000000)
def time_mid(self):
(self.amid * 2) + self.bmid
def time_mid2(self):
(self.amid + self.bmid) - 2
def time_large(self):
(self.alarge * 2) + self.blarge
def time_large2(self):
(self.alarge + self.blarge) - 2
class CorrConv(Benchmark):
params = [[50, 1000, int(1e5)],
[10, 100, 1000, int(1e4)],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
class CountNonzero(Benchmark):
param_names = ['numaxes', 'size', 'dtype']
params = [
[1, 2, 3],
[100, 10000, 1000000],
[bool, np.int8, np.int16, np.int32, np.int64, str, object]
]
def setup(self, numaxes, size, dtype):
self.x = np.arange(numaxes * size).reshape(numaxes, size)
self.x = (self.x % 3).astype(dtype)
def time_count_nonzero(self, numaxes, size, dtype):
np.count_nonzero(self.x)
def time_count_nonzero_axis(self, numaxes, size, dtype):
np.count_nonzero(self.x, axis=self.x.ndim - 1)
def time_count_nonzero_multi_axis(self, numaxes, size, dtype):
if self.x.ndim >= 2:
np.count_nonzero(self.x, axis=(
self.x.ndim - 1, self.x.ndim - 2))
class PackBits(Benchmark):
param_names = ['dtype']
params = [[bool, np.uintp]]
def setup(self, dtype):
self.d = np.ones(10000, dtype=dtype)
self.d2 = np.ones((200, 1000), dtype=dtype)
def time_packbits(self, dtype):
np.packbits(self.d)
def time_packbits_little(self, dtype):
np.packbits(self.d, bitorder="little")
def time_packbits_axis0(self, dtype):
np.packbits(self.d2, axis=0)
def time_packbits_axis1(self, dtype):
np.packbits(self.d2, axis=1)
class UnpackBits(Benchmark):
def setup(self):
self.d = np.ones(10000, dtype=np.uint8)
self.d2 = np.ones((200, 1000), dtype=np.uint8)
def time_unpackbits(self):
np.unpackbits(self.d)
def time_unpackbits_little(self):
np.unpackbits(self.d, bitorder="little")
def time_unpackbits_axis0(self):
np.unpackbits(self.d2, axis=0)
def time_unpackbits_axis1(self):
np.unpackbits(self.d2, axis=1)
def time_unpackbits_axis1_little(self):
np.unpackbits(self.d2, bitorder="little", axis=1)
class Indices(Benchmark):
def time_indices(self):
np.indices((1000, 500))
class StatsMethods(Benchmark):
params = [['int64', 'uint64', 'float32', 'float64',
'complex64', 'bool_'],
[100, 10000]]
param_names = ['dtype', 'size']
def setup(self, dtype, size):
self.data = np.ones(size, dtype=dtype)
if dtype.startswith('complex'):
self.data = np.random.randn(size) + 1j * np.random.randn(size)
def time_min(self, dtype, size):
self.data.min()
def time_max(self, dtype, size):
self.data.max()
def time_mean(self, dtype, size):
self.data.mean()
def time_std(self, dtype, size):
self.data.std()
def time_prod(self, dtype, size):
self.data.prod()
def time_var(self, dtype, size):
self.data.var()
def time_sum(self, dtype, size):
self.data.sum()
|
2,899 |
get next
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._log_files_operations import build_list_by_server_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LogFilesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.mysql_flexibleservers.aio.MySQLManagementClient`'s
:attr:`log_files` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, **kwargs: Any
) -> AsyncIterable["_models.LogFile"]:
"""List all the server log files in a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LogFile or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.mysql_flexibleservers.models.LogFile]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview"))
cls: ClsType[_models.LogFileListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LogFileListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list_by_server.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/flexibleServers/{serverName}/logFiles"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.