id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,500 |
capped
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import threading
from contextlib import contextmanager
from hypothesis.errors import InvalidArgument
from hypothesis.internal.lazyformat import lazyformat
from hypothesis.internal.reflection import get_pretty_function_description
from hypothesis.internal.validation import check_type
from hypothesis.strategies._internal.strategies import (
OneOfStrategy,
SearchStrategy,
check_strategy,
)
class LimitReached(BaseException):
pass
class LimitedStrategy(SearchStrategy):
def __init__(self, strategy):
super().__init__()
self.base_strategy = strategy
self._threadlocal = threading.local()
@property
def marker(self):
return getattr(self._threadlocal, "marker", 0)
@marker.setter
def marker(self, value):
self._threadlocal.marker = value
@property
def currently_capped(self):
return getattr(self._threadlocal, "currently_capped", False)
@currently_capped.setter
def currently_capped(self, value):
self._threadlocal.currently_capped = value
def __repr__(self):
return f"LimitedStrategy({self.base_strategy!r})"
def do_validate(self):
self.base_strategy.validate()
def do_draw(self, data):
assert self.currently_capped
if self.marker <= 0:
raise LimitReached
self.marker -= 1
return data.draw(self.base_strategy)
@contextmanager
def METHOD_NAME(self, max_templates):
try:
was_capped = self.currently_capped
self.currently_capped = True
self.marker = max_templates
yield
finally:
self.currently_capped = was_capped
class RecursiveStrategy(SearchStrategy):
def __init__(self, base, extend, max_leaves):
self.max_leaves = max_leaves
self.base = base
self.limited_base = LimitedStrategy(base)
self.extend = extend
strategies = [self.limited_base, self.extend(self.limited_base)]
while 2 ** (len(strategies) - 1) <= max_leaves:
strategies.append(extend(OneOfStrategy(tuple(strategies))))
self.strategy = OneOfStrategy(strategies)
def __repr__(self):
if not hasattr(self, "_cached_repr"):
self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % (
self.base,
get_pretty_function_description(self.extend),
self.max_leaves,
)
return self._cached_repr
def do_validate(self):
check_strategy(self.base, "base")
extended = self.extend(self.limited_base)
check_strategy(extended, f"extend({self.limited_base!r})")
self.limited_base.validate()
extended.validate()
check_type(int, self.max_leaves, "max_leaves")
if self.max_leaves <= 0:
raise InvalidArgument(
f"max_leaves={self.max_leaves!r} must be greater than zero"
)
def do_draw(self, data):
count = 0
while True:
try:
with self.limited_base.METHOD_NAME(self.max_leaves):
return data.draw(self.strategy)
except LimitReached:
# Workaround for possible coverage bug - this branch is definitely
# covered but for some reason is showing up as not covered.
if count == 0: # pragma: no branch
data.note_event(
lazyformat(
"Draw for %r exceeded max_leaves and had to be retried",
self,
)
)
count += 1
|
2,501 |
cell flowsheet model
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
# TODO: Missing doc strings
# pylint: disable=missing-module-docstring
__author__ = "Douglas Allan"
import pyomo.environ as pyo
from idaes.core import FlowsheetBlock
def METHOD_NAME(dynamic, time_set, zfaces):
# function that creates a unit model with cell-level variables for testing
# subcomponents that require them
m = pyo.ConcreteModel()
m.fs = FlowsheetBlock(dynamic=False, time_set=time_set, time_units=pyo.units.s)
tset = m.fs.config.time
znodes = m.fs.znodes = pyo.Set(
initialize=[(zfaces[i] + zfaces[i + 1]) / 2.0 for i in range(len(zfaces) - 1)]
)
iznodes = m.fs.iznodes = pyo.Set(initialize=range(1, len(znodes) + 1))
m.fs.length_z = pyo.Var(initialize=0.25, units=pyo.units.m)
m.fs.length_y = pyo.Var(initialize=0.25, units=pyo.units.m)
m.fs.current_density = pyo.Var(
tset, iznodes, initialize=0, units=pyo.units.A / pyo.units.m**2
)
m.fs.temperature_z = pyo.Var(tset, iznodes, initialize=1000, units=pyo.units.K)
m.fs.length_y.fix(0.08)
m.fs.length_z.fix(0.08)
m.fs.temperature_z.fix(1000)
m.fs.current_density.fix(0)
return m
def _build_test_utility(block, comp_dict, references=None):
# Takes a unit model and four dictionaries: references (variables),
# not_references (variables), constraints, and expressions. They should
# have the attribute name as a key and the length of the attribute as
# the value. This function goes through and ensures that all these
# components exist and are the right length, and furthermore they are no
# unlisted components of these types
if references is not None:
for attr in references:
try:
comp = getattr(block, attr)
except AttributeError:
raise AttributeError(
f"Reference {attr} missing from block {block.name}."
)
if not comp.is_reference():
raise AssertionError(
f"Attribute {attr} found on block {block.name}, but "
"was not Reference."
)
for comp in block.component_data_objects(descend_into=False):
if comp.is_reference():
if not comp in references:
raise AssertionError(
f"Unexpected Reference {comp.name} encountered "
f"in block {block.name}."
)
for ctype, sub_dict in comp_dict.items():
for attr, length in sub_dict.items():
try:
comp = getattr(block, attr)
except AttributeError:
raise AttributeError(f"{ctype} {attr} missing from block {block.name}.")
if not len(comp) == length:
raise AssertionError(
f"{ctype} {comp.name} was not expected length in block "
f"{block.name}."
)
for comp in block.component_data_objects(ctype=ctype, descend_into=False):
short_name = comp.local_name.split("[")[0]
if not short_name in sub_dict.keys():
raise AssertionError(
f"Unexpected {ctype} {comp.name} encountered in block "
f"{block.name}."
)
|
2,502 |
about
|
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
Copyright 2016-2023, Pulumi Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import collections.abc
import google.protobuf.empty_pb2
import grpc
import grpc.aio
import typing
import pulumi.language_pb2
import pulumi.plugin_pb2
class LanguageRuntimeStub:
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def __init__(self, channel: grpc.Channel) -> None: ...
GetRequiredPlugins: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.GetRequiredPluginsRequest,
pulumi.language_pb2.GetRequiredPluginsResponse,
]
"""GetRequiredPlugins computes the complete set of anticipated plugins required by a program."""
Run: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.RunRequest,
pulumi.language_pb2.RunResponse,
]
"""Run executes a program and returns its result."""
GetPluginInfo: grpc.UnaryUnaryMultiCallable[
google.protobuf.empty_pb2.Empty,
pulumi.plugin_pb2.PluginInfo,
]
"""GetPluginInfo returns generic information about this plugin, like its version."""
InstallDependencies: grpc.UnaryStreamMultiCallable[
pulumi.language_pb2.InstallDependenciesRequest,
pulumi.language_pb2.InstallDependenciesResponse,
]
"""InstallDependencies will install dependencies for the project, e.g. by running `npm install` for nodejs projects."""
METHOD_NAME: grpc.UnaryUnaryMultiCallable[
google.protobuf.empty_pb2.Empty,
pulumi.language_pb2.AboutResponse,
]
"""About returns information about the runtime for this language."""
GetProgramDependencies: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.GetProgramDependenciesRequest,
pulumi.language_pb2.GetProgramDependenciesResponse,
]
"""GetProgramDependencies returns the set of dependencies required by the program."""
RunPlugin: grpc.UnaryStreamMultiCallable[
pulumi.language_pb2.RunPluginRequest,
pulumi.language_pb2.RunPluginResponse,
]
"""RunPlugin executes a plugin program and returns its result asynchronously."""
GenerateProgram: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.GenerateProgramRequest,
pulumi.language_pb2.GenerateProgramResponse,
]
"""GenerateProgram generates a given PCL program into a program for this language."""
GenerateProject: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.GenerateProjectRequest,
pulumi.language_pb2.GenerateProjectResponse,
]
"""GenerateProject generates a given PCL program into a project for this language."""
GeneratePackage: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.GeneratePackageRequest,
pulumi.language_pb2.GeneratePackageResponse,
]
"""GeneratePackage generates a given pulumi package into a package for this language."""
Pack: grpc.UnaryUnaryMultiCallable[
pulumi.language_pb2.PackRequest,
pulumi.language_pb2.PackResponse,
]
"""Pack packs a package into a language specific artifact."""
class LanguageRuntimeServicer(metaclass=abc.ABCMeta):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def GetRequiredPlugins(
self,
request: pulumi.language_pb2.GetRequiredPluginsRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.GetRequiredPluginsResponse:
"""GetRequiredPlugins computes the complete set of anticipated plugins required by a program."""
def Run(
self,
request: pulumi.language_pb2.RunRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.RunResponse:
"""Run executes a program and returns its result."""
def GetPluginInfo(
self,
request: google.protobuf.empty_pb2.Empty,
context: grpc.ServicerContext,
) -> pulumi.plugin_pb2.PluginInfo:
"""GetPluginInfo returns generic information about this plugin, like its version."""
def InstallDependencies(
self,
request: pulumi.language_pb2.InstallDependenciesRequest,
context: grpc.ServicerContext,
) -> collections.abc.Iterator[pulumi.language_pb2.InstallDependenciesResponse]:
"""InstallDependencies will install dependencies for the project, e.g. by running `npm install` for nodejs projects."""
def METHOD_NAME(
self,
request: google.protobuf.empty_pb2.Empty,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.AboutResponse:
"""About returns information about the runtime for this language."""
def GetProgramDependencies(
self,
request: pulumi.language_pb2.GetProgramDependenciesRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.GetProgramDependenciesResponse:
"""GetProgramDependencies returns the set of dependencies required by the program."""
def RunPlugin(
self,
request: pulumi.language_pb2.RunPluginRequest,
context: grpc.ServicerContext,
) -> collections.abc.Iterator[pulumi.language_pb2.RunPluginResponse]:
"""RunPlugin executes a plugin program and returns its result asynchronously."""
def GenerateProgram(
self,
request: pulumi.language_pb2.GenerateProgramRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.GenerateProgramResponse:
"""GenerateProgram generates a given PCL program into a program for this language."""
def GenerateProject(
self,
request: pulumi.language_pb2.GenerateProjectRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.GenerateProjectResponse:
"""GenerateProject generates a given PCL program into a project for this language."""
def GeneratePackage(
self,
request: pulumi.language_pb2.GeneratePackageRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.GeneratePackageResponse:
"""GeneratePackage generates a given pulumi package into a package for this language."""
def Pack(
self,
request: pulumi.language_pb2.PackRequest,
context: grpc.ServicerContext,
) -> pulumi.language_pb2.PackResponse:
"""Pack packs a package into a language specific artifact."""
def add_LanguageRuntimeServicer_to_server(servicer: LanguageRuntimeServicer, server: typing.Union[grpc.Server, grpc.aio.Server]) -> None: ...
|
2,503 |
get service name
|
import random
from pathlib import Path
from typing import Callable, Dict, Tuple
import opentelemetry.sdk.metrics.export
import opentelemetry.sdk.metrics.view
import pytest
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
MetricExporter,
MetricExportResult,
MetricsData,
PeriodicExportingMetricReader,
)
class DirMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to a file in a given directory.
This class can be used for diagnostic or testing purposes.
"""
def __init__(
self,
metric_dir: str,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk.metrics.view.Aggregation"
] = None,
):
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self.metric_filename: Path = Path(metric_dir) / str(random.randint(0, 1048575))
self.f = open(self.metric_filename, 'a')
def export(
self,
metrics_data: MetricsData,
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
self.f.write(metrics_data.to_json())
self.f.write('\n')
self.f.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
def force_flush(self, timeout_millis: float = 10_000) -> bool:
return True
def __del__(self):
self.f.close()
@pytest.fixture(scope='function')
def monkeypatch_metric_exporter(
tmpdir_factory: pytest.TempdirFactory,
) -> Tuple[Callable, Callable]:
import json
import os
import time
from pathlib import Path
import opentelemetry.sdk.metrics.export
collect_path = Path(tmpdir_factory.mktemp('otel-collector'))
metrics_path = collect_path / 'metrics'
os.mkdir(metrics_path)
tick_counter_filename = collect_path / 'tick_counter'
with open(tick_counter_filename, 'w', encoding='utf-8') as f:
f.write('0')
def collect_metrics():
with open(tick_counter_filename, 'r', encoding='utf-8') as f:
tick_counter = int(f.read())
with open(tick_counter_filename, 'w', encoding='utf-8') as f:
f.write(str(tick_counter + 1))
time.sleep(2)
def METHOD_NAME(otel_measurement):
return otel_measurement['resource_metrics'][0]['resource']['attributes'][
'service.name'
]
def read_metrics():
def read_metric_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
return json.loads(f.read())
return {
METHOD_NAME(i): i
for i in map(read_metric_file, metrics_path.glob('*'))
}
class PatchedTextReader(PeriodicExportingMetricReader):
def __init__(self, *args, **kwargs) -> None:
self.exporter = DirMetricExporter(metrics_path)
self.tick_counter = 0
super().__init__(
exporter=self.exporter,
export_interval_millis=500,
)
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
with open(tick_counter_filename, 'r', encoding='utf-8') as f:
tick_counter = int(f.read())
if tick_counter != self.tick_counter:
self.tick_counter = tick_counter
self.collect(timeout_millis=self._export_timeout_millis)
self.collect(timeout_millis=self._export_interval_millis)
real_reader = opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = PatchedTextReader
yield collect_metrics, read_metrics
opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader = real_reader
|
2,504 |
method
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm extension delete",
)
class Delete(AAZCommand):
"""Delete operation to delete the extension.
:example: Use a VM name and extension to delete an extension from a VM.
az vm extension delete -g MyResourceGroup --vm-name MyVm -n MyExtensionName
:example: Delete extensions with IDs containing the string "MyExtension" from a VM.
az vm extension delete --ids $(az resource list --query "[?contains(name, 'MyExtension')].id" -o tsv)
"""
_aaz_info = {
"version": "2020-06-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/extensions/{}", "2020-06-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_extension_name = AAZStrArg(
options=["-n", "--name", "--vm-extension-name"],
help="The name of the virtual machine extension.",
required=True,
id_part="child_name_1",
)
_args_schema.vm_name = AAZStrArg(
options=["--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachineExtensionsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachineExtensionsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmExtensionName", self.ctx.args.vm_extension_name,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2020-06-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
2,505 |
set
|
import logging
from pyvo.utils.http import create_session
from . import securitymethods
__all__ = ["CredentialStore"]
class CredentialStore(object):
"""
The credential store takes user credentials, and uses them
to create appropriate requests sessions for dispatching
requests using those credentials.
Different types of credentials can be passed in, such as
cookies, a jar of cookies, certificates, and basic auth.
A session can also be associated with a security method
URI by calling the set function.
Before a request is to be dispatched, the AuthSession
calls the get method to retrieve the appropriate
requests.Session for making that HTTP request.
"""
def __init__(self):
self.credentials = {}
self.METHOD_NAME(securitymethods.ANONYMOUS, create_session())
def negotiate_method(self, allowed_methods):
"""
Compare the credentials provided by the user against the
security methods passed in, and determine which method is
to be used for making this request.
Parameters
----------
allowed_methods : list(str)
list of allowed security methods to return
Raises
------
Raises an exception if a common method could not be negotiated.
"""
available_methods = METHOD_NAME(self.credentials.keys())
methods = available_methods.intersection(allowed_methods)
logging.debug('Available methods: %s', methods)
# If we have no common auth mechanism, then fail.
if not methods:
msg = 'Negotiation failed. Server supports %s, client supports %s' % \
(allowed_methods, available_methods)
raise Exception(msg)
# If there are more than 1 method to pick, don't pick
# anonymous over an actual method.
if len(methods) > 1:
methods.discard(securitymethods.ANONYMOUS)
# Pick a random method.
return methods.pop()
def METHOD_NAME(self, method_uri, session):
"""
Associate a security method URI with a requests.Session like object.
Parameters
----------
method_uri : str
URI representing the security method
session : object
the requests.Session like object that will dispatch requests
for the authentication method provided by method_uri
"""
self.credentials[method_uri] = session
def get(self, method_uri):
"""
Retrieve the requests.Session like object associated with a security
method URI.
Parameters
----------
method_uri : str
URI representing the security method
"""
return self.credentials[method_uri]
def set_cookie(self, cookie_name, cookie_value, domain='', path='/'):
"""
Add a cookie to use as authentication.
More than one call to set_cookie will add multiple cookies into
the same cookie jar used for the request.
Parameters
----------
cookie_name : str
name of the cookie
cookie_value : str
value of the cookie
domain : str
restrict usage of this cookie to this domain
path : str
restrict usage of this cookie to this path
"""
cookie_session = self.credentials.setdefault(securitymethods.COOKIE, create_session())
cookie_session.cookies.METHOD_NAME(cookie_name, cookie_value, domain=domain, path=path)
def set_cookie_jar(self, cookie_jar):
"""
Set the cookie jar to use for authentication.
Any previous cookies or cookie jars set will be removed.
Parameters
----------
cookie_jar : obj
the cookie jar to use.
"""
cookie_session = self.credentials.setdefault(securitymethods.COOKIE, create_session())
cookie_session.cookies = cookie_jar
def set_client_certificate(self, certificate_path):
"""
Add a client certificate to use for authentication.
Parameters
----------
certificate_path : str
path to the file of the client certificate
"""
cert_session = create_session()
cert_session.cert = certificate_path
self.METHOD_NAME(securitymethods.CLIENT_CERTIFICATE, cert_session)
def set_password(self, username, password):
"""
Add a username / password for basic authentication.
Parameters
----------
username : str
username to use
password : str
password to use
"""
basic_session = create_session()
basic_session.auth = (username, password)
self.METHOD_NAME(securitymethods.BASIC, basic_session)
def __repr__(self):
return 'Support for %s' % self.credentials.keys()
|
2,506 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
'get_cluster_output',
]
@pulumi.output_type
class GetClusterResult:
"""
The HDInsight cluster.
"""
def __init__(__self__, etag=None, METHOD_NAME=None, identity=None, location=None, name=None, properties=None, system_data=None, tags=None, type=None, zones=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The ETag for the resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ClusterIdentityResponse']:
"""
The identity of the cluster, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ClusterGetPropertiesResponse':
"""
The properties of the cluster.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
The availability zones.
"""
return pulumi.get(self, "zones")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type,
zones=self.zones)
def get_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
Gets the specified cluster.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hdinsight/v20230815preview:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
zones=pulumi.get(__ret__, 'zones'))
@_utilities.lift_output_func(get_cluster)
def get_cluster_output(cluster_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:
"""
Gets the specified cluster.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group.
"""
...
|
2,507 |
create dyn state matrices
|
#!/usr/bin/env python3
"""
Dynamic bicycle model from "The Science of Vehicle Dynamics (2014), M. Guiggiani"
The state is x = [v, r]^T
with v lateral speed [m/s], and r rotational speed [rad/s]
The input u is the steering angle [rad], and roll [rad]
The system is defined by
x_dot = A*x + B*u
A depends on longitudinal speed, u [m/s], and vehicle parameters CP
"""
from typing import Tuple
import numpy as np
from numpy.linalg import solve
from cereal import car
ACCELERATION_DUE_TO_GRAVITY = 9.8
class VehicleModel:
def __init__(self, CP: car.CarParams):
"""
Args:
CP: Car Parameters
"""
# for math readability, convert long names car params into short names
self.m: float = CP.mass
self.j: float = CP.rotationalInertia
self.l: float = CP.wheelbase
self.aF: float = CP.centerToFront
self.aR: float = CP.wheelbase - CP.centerToFront
self.chi: float = CP.steerRatioRear
self.cF_orig: float = CP.tireStiffnessFront
self.cR_orig: float = CP.tireStiffnessRear
self.update_params(1.0, CP.steerRatio)
def update_params(self, stiffness_factor: float, steer_ratio: float) -> None:
"""Update the vehicle model with a new stiffness factor and steer ratio"""
self.cF: float = stiffness_factor * self.cF_orig
self.cR: float = stiffness_factor * self.cR_orig
self.sR: float = steer_ratio
def steady_state_sol(self, sa: float, u: float, roll: float) -> np.ndarray:
"""Returns the steady state solution.
If the speed is too low we can't use the dynamic model (tire slip is undefined),
we then have to use the kinematic model
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
roll: Road Roll [rad]
Returns:
2x1 matrix with steady state solution (lateral speed, rotational speed)
"""
if u > 0.1:
return dyn_ss_sol(sa, u, roll, self)
else:
return kin_ss_sol(sa, u, self)
def calc_curvature(self, sa: float, u: float, roll: float) -> float:
"""Returns the curvature. Multiplied by the speed this will give the yaw rate.
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
roll: Road Roll [rad]
Returns:
Curvature factor [1/m]
"""
return (self.curvature_factor(u) * sa / self.sR) + self.roll_compensation(roll, u)
def curvature_factor(self, u: float) -> float:
"""Returns the curvature factor.
Multiplied by wheel angle (not steering wheel angle) this will give the curvature.
Args:
u: Speed [m/s]
Returns:
Curvature factor [1/m]
"""
sf = calc_slip_factor(self)
return (1. - self.chi) / (1. - sf * u**2) / self.l
def get_steer_from_curvature(self, curv: float, u: float, roll: float) -> float:
"""Calculates the required steering wheel angle for a given curvature
Args:
curv: Desired curvature [1/m]
u: Speed [m/s]
roll: Road Roll [rad]
Returns:
Steering wheel angle [rad]
"""
return (curv - self.roll_compensation(roll, u)) * self.sR * 1.0 / self.curvature_factor(u)
def roll_compensation(self, roll: float, u: float) -> float:
"""Calculates the roll-compensation to curvature
Args:
roll: Road Roll [rad]
u: Speed [m/s]
Returns:
Roll compensation curvature [rad]
"""
sf = calc_slip_factor(self)
if abs(sf) < 1e-6:
return 0
else:
return (ACCELERATION_DUE_TO_GRAVITY * roll) / ((1 / sf) - u**2)
def get_steer_from_yaw_rate(self, yaw_rate: float, u: float, roll: float) -> float:
"""Calculates the required steering wheel angle for a given yaw_rate
Args:
yaw_rate: Desired yaw rate [rad/s]
u: Speed [m/s]
roll: Road Roll [rad]
Returns:
Steering wheel angle [rad]
"""
curv = yaw_rate / u
return self.get_steer_from_curvature(curv, u, roll)
def yaw_rate(self, sa: float, u: float, roll: float) -> float:
"""Calculate yaw rate
Args:
sa: Steering wheel angle [rad]
u: Speed [m/s]
roll: Road Roll [rad]
Returns:
Yaw rate [rad/s]
"""
return self.calc_curvature(sa, u, roll) * u
def kin_ss_sol(sa: float, u: float, VM: VehicleModel) -> np.ndarray:
"""Calculate the steady state solution at low speeds
At low speeds the tire slip is undefined, so a kinematic
model is used.
Args:
sa: Steering angle [rad]
u: Speed [m/s]
VM: Vehicle model
Returns:
2x1 matrix with steady state solution
"""
K = np.zeros((2, 1))
K[0, 0] = VM.aR / VM.sR / VM.l * u
K[1, 0] = 1. / VM.sR / VM.l * u
return K * sa
def METHOD_NAME(u: float, VM: VehicleModel) -> Tuple[np.ndarray, np.ndarray]:
"""Returns the A and B matrix for the dynamics system
Args:
u: Vehicle speed [m/s]
VM: Vehicle model
Returns:
A tuple with the 2x2 A matrix, and 2x2 B matrix
Parameters in the vehicle model:
cF: Tire stiffness Front [N/rad]
cR: Tire stiffness Front [N/rad]
aF: Distance from CG to front wheels [m]
aR: Distance from CG to rear wheels [m]
m: Mass [kg]
j: Rotational inertia [kg m^2]
sR: Steering ratio [-]
chi: Steer ratio rear [-]
"""
A = np.zeros((2, 2))
B = np.zeros((2, 2))
A[0, 0] = - (VM.cF + VM.cR) / (VM.m * u)
A[0, 1] = - (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.m * u) - u
A[1, 0] = - (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.j * u)
A[1, 1] = - (VM.cF * VM.aF**2 + VM.cR * VM.aR**2) / (VM.j * u)
# Steering input
B[0, 0] = (VM.cF + VM.chi * VM.cR) / VM.m / VM.sR
B[1, 0] = (VM.cF * VM.aF - VM.chi * VM.cR * VM.aR) / VM.j / VM.sR
# Roll input
B[0, 1] = -ACCELERATION_DUE_TO_GRAVITY
return A, B
def dyn_ss_sol(sa: float, u: float, roll: float, VM: VehicleModel) -> np.ndarray:
"""Calculate the steady state solution when x_dot = 0,
Ax + Bu = 0 => x = -A^{-1} B u
Args:
sa: Steering angle [rad]
u: Speed [m/s]
roll: Road Roll [rad]
VM: Vehicle model
Returns:
2x1 matrix with steady state solution
"""
A, B = METHOD_NAME(u, VM)
inp = np.array([[sa], [roll]])
return -solve(A, B) @ inp # type: ignore
def calc_slip_factor(VM: VehicleModel) -> float:
"""The slip factor is a measure of how the curvature changes with speed
it's positive for Oversteering vehicle, negative (usual case) otherwise.
"""
return VM.m * (VM.cF * VM.aF - VM.cR * VM.aR) / (VM.l**2 * VM.cF * VM.cR)
|
2,508 |
test filename
|
from unittest import mock
import pytest
from sentry import eventstore
from sentry.event_manager import EventManager
from sentry.interfaces.stacktrace import get_context, is_url
def test_is_url():
assert is_url("http://example.org/") is True
assert is_url("https://example.org/") is True
assert is_url("file:///tmp/filename") is True
assert is_url("applewebdata://00000000-0000-1000-8080-808080808080") is True
assert is_url("app:///index.bundle") is False # react native
assert is_url("webpack:///./app/index.jsx") is False # webpack bundle
assert is_url("data:,") is False
assert is_url("blob:\x00") is False
def test_works_with_empty_filename():
result = get_context(0, "hello world")
assert result == [(0, "hello world")]
@pytest.fixture
def make_stacktrace_snapshot(insta_snapshot):
def inner(data):
mgr = EventManager(data={"stacktrace": data})
mgr.normalize()
evt = eventstore.backend.create_event(data=mgr.get_data())
interface = evt.interfaces.get("stacktrace")
insta_snapshot(
{
"errors": evt.data.get("errors"),
"to_json": interface and interface.to_json(),
"get_stacktrace": interface and interface.get_stacktrace(evt),
"to_string": interface and interface.to_string(evt),
}
)
return inner
def test_basic(make_stacktrace_snapshot):
make_stacktrace_snapshot(
dict(
frames=[
{"filename": "foo/bar.py"},
{"filename": "foo/baz.py", "lineno": 1, "in_app": True},
]
)
)
@pytest.mark.parametrize("input", [{"frames": [{}]}, {"frames": [{"abs_path": None}]}])
def test_null_values_in_frames(make_stacktrace_snapshot, input):
make_stacktrace_snapshot(input)
def METHOD_NAME(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"filename": "foo.py"}]))
def test_filename2(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"lineno": 1, "filename": "foo.py"}]))
def test_allows_abs_path_without_filename(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"lineno": 1, "abs_path": "foo/bar/baz.py"}]))
def test_coerces_url_filenames(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"lineno": 1, "filename": "http://foo.com/foo.js"}]))
def test_does_not_overwrite_filename(make_stacktrace_snapshot):
make_stacktrace_snapshot(
dict(frames=[{"lineno": 1, "filename": "foo.js", "abs_path": "http://foo.com/foo.js"}])
)
def test_ignores_results_with_empty_path(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"lineno": 1, "filename": "http://foo.com"}]))
def test_serialize_returns_frames(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"lineno": 1, "filename": "foo.py"}]))
@mock.patch("sentry.interfaces.stacktrace.Stacktrace.get_stacktrace", mock.Mock(return_value="foo"))
def test_to_string_returns_stacktrace(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[]))
@mock.patch("sentry.interfaces.stacktrace.is_newest_frame_first", mock.Mock(return_value=False))
def test_get_stacktrace_with_only_filename(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"filename": "foo"}, {"filename": "bar"}]))
@mock.patch("sentry.interfaces.stacktrace.is_newest_frame_first", mock.Mock(return_value=False))
def test_get_stacktrace_with_module(make_stacktrace_snapshot):
make_stacktrace_snapshot(dict(frames=[{"module": "foo"}, {"module": "bar"}]))
@mock.patch("sentry.interfaces.stacktrace.is_newest_frame_first", mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_and_function(make_stacktrace_snapshot):
make_stacktrace_snapshot(
dict(
frames=[{"filename": "foo", "function": "biz"}, {"filename": "bar", "function": "baz"}]
)
)
@mock.patch("sentry.interfaces.stacktrace.is_newest_frame_first", mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_function_lineno_and_context(make_stacktrace_snapshot):
make_stacktrace_snapshot(
dict(
frames=[
{
"filename": "foo",
"function": "biz",
"lineno": 3,
"context_line": " def foo(r):",
},
{
"filename": "bar",
"function": "baz",
"lineno": 5,
"context_line": " return None",
},
]
)
)
|
2,509 |
acr scope map show
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from azure.cli.core.util import CLIError
from ._utils import (
get_resource_group_name_by_registry_name,
parse_scope_map_actions
)
class RepoScopeMapActions(Enum):
CONTENT_DELETE = 'content/delete'
CONTENT_READ = 'content/read'
CONTENT_WRITE = 'content/write'
METADATA_READ = 'metadata/read'
METADATA_WRITE = 'metadata/write'
class GatewayScopeMapActions(Enum):
CONFIG_READ = 'config/read'
CONFIG_WRITE = 'config/write'
MESSAGES_READ = 'message/read'
MESSAGES_WRITE = 'message/write'
def acr_scope_map_create(cmd,
client,
registry_name,
scope_map_name,
repository_actions_list=None,
gateway_actions_list=None,
resource_group_name=None,
description=None):
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
actions = parse_scope_map_actions(repository_actions_list, gateway_actions_list)
ScopeMap = cmd.get_models('ScopeMap')
scope_map = ScopeMap(
actions=actions,
description=description
)
return client.begin_create(
resource_group_name,
registry_name,
scope_map_name,
scope_map
)
def acr_scope_map_delete(cmd,
client,
registry_name,
scope_map_name,
yes=None,
resource_group_name=None):
if not yes:
from knack.prompting import prompt_y_n
confirmation = prompt_y_n("Deleting the scope map '{}' will remove its permissions with associated tokens. "
"Proceed?".format(scope_map_name))
if not confirmation:
return None
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.begin_delete(resource_group_name, registry_name, scope_map_name)
def acr_scope_map_update(cmd,
client,
registry_name,
scope_map_name,
add_repository=None,
remove_repository=None,
add_gateway=None,
remove_gateway=None,
resource_group_name=None,
description=None):
if not (add_repository or remove_repository or add_gateway or remove_gateway or description):
raise CLIError('No scope map properties to update.')
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
current_scope_map = METHOD_NAME(cmd, client, registry_name, scope_map_name, resource_group_name)
current_actions = current_scope_map.actions
if add_repository or remove_repository or add_gateway or remove_gateway:
add_actions_set = set(parse_scope_map_actions(add_repository, add_gateway))
remove_actions_set = set(parse_scope_map_actions(remove_repository, remove_gateway))
# Duplicate actions can lead to inconsistency based on order of operations (set subtraction isn't associative).
# Eg: ({A, B} - {B}) U {B, C} = {A, B, C}, ({A, B} U {B, C}) - {B} = {A, C}
duplicate_actions = set.intersection(add_actions_set, remove_actions_set)
if duplicate_actions:
# Display these actions to users: remove 'repositories/' prefix from 'repositories/<repo>/<action>'
errors = sorted(map(lambda action: action[action.find('/') + 1:], duplicate_actions))
raise CLIError(
'Update ambiguity. Duplicate actions were provided with --add and --remove arguments.\n{}'
.format(errors))
final_actions_set = set(current_scope_map.actions).union(add_actions_set).difference(remove_actions_set)
current_actions = list(final_actions_set)
ScopeMapUpdateParameters = cmd.get_models('ScopeMapUpdateParameters')
scope_map_update_parameters = ScopeMapUpdateParameters(
description=description,
actions=current_actions
)
return client.begin_update(resource_group_name,
registry_name,
scope_map_name,
scope_map_update_parameters)
def METHOD_NAME(cmd,
client,
registry_name,
scope_map_name,
resource_group_name=None):
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.get(
resource_group_name,
registry_name,
scope_map_name
)
def acr_scope_map_list(cmd,
client,
registry_name,
resource_group_name=None):
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.list(
resource_group_name,
registry_name
)
|
2,510 |
test encoding flat map
|
import json
from datetime import datetime, timedelta, timezone
from bytewax._encoder import encode_dataflow
from bytewax.dataflow import Dataflow
from bytewax.inputs import PartitionedInput
from bytewax.window import EventClockConfig, TumblingWindow
# Helper functions for some steps
def acc_values(acc, event):
acc.append((event["value"], event["time"]))
return acc
# Example class to be encoded
class OrderBook:
def __init__(self):
self.data = []
def update(self, data):
self.data.append(data)
def test_encoding_custom_object():
flow = Dataflow()
flow.stateful_map("avg", OrderBook, OrderBook.update)
assert encode_dataflow(flow) == json.dumps(
{
"type": "Dataflow",
"steps": [
{
"type": "StatefulMap",
"builder": "OrderBook",
"mapper": "update",
"step_id": "avg",
}
],
},
sort_keys=True,
)
def test_encoding_custom_input():
flow = Dataflow()
class MyCustomInput(PartitionedInput):
def list_parts(self):
return ["one"]
def build_part(self, for_key, resume_state):
...
flow.input("inp", MyCustomInput())
assert encode_dataflow(flow) == json.dumps(
{
"type": "Dataflow",
"steps": [
{
"input": {
"type": "MyCustomInput",
},
"step_id": "inp",
"type": "Input",
},
],
},
sort_keys=True,
)
def test_encoding_map():
flow = Dataflow()
flow.map(lambda x: x + 1)
assert encode_dataflow(flow) == json.dumps(
{"type": "Dataflow", "steps": [{"type": "Map", "mapper": "<lambda>"}]},
sort_keys=True,
)
def test_encoding_filter():
flow = Dataflow()
flow.filter(lambda x: x == 1)
assert encode_dataflow(flow) == json.dumps(
{"type": "Dataflow", "steps": [{"type": "Filter", "predicate": "<lambda>"}]},
sort_keys=True,
)
def test_encoding_reduce():
flow = Dataflow()
flow.reduce("sessionizer", lambda x, y: x + y, lambda x, y: x == y)
assert encode_dataflow(flow) == json.dumps(
{
"type": "Dataflow",
"steps": [
{
"type": "Reduce",
"step_id": "sessionizer",
"reducer": "<lambda>",
"is_complete": "<lambda>",
}
],
},
sort_keys=True,
)
def METHOD_NAME():
flow = Dataflow()
flow.flat_map(lambda x: x + 1)
assert encode_dataflow(flow) == json.dumps(
{"type": "Dataflow", "steps": [{"type": "FlatMap", "mapper": "<lambda>"}]},
sort_keys=True,
)
def test_encoding_stateful_map():
flow = Dataflow()
flow.stateful_map("order_book", lambda key: OrderBook(), OrderBook.update)
assert encode_dataflow(flow) == json.dumps(
{
"type": "Dataflow",
"steps": [
{
"builder": "<lambda>",
"mapper": "update",
"step_id": "order_book",
"type": "StatefulMap",
}
],
},
sort_keys=True,
)
def test_encoding_fold_window():
flow = Dataflow()
align_to = datetime(2005, 7, 14, 12, 30, tzinfo=timezone.utc)
wc = TumblingWindow(align_to=align_to, length=timedelta(seconds=5))
cc = EventClockConfig(
lambda x: datetime.fromisoformat(x["time"]),
wait_for_system_duration=timedelta(seconds=10),
)
flow.fold_window("running_average", cc, wc, lambda x: list(x), acc_values)
assert encode_dataflow(flow) == json.dumps(
{
"type": "Dataflow",
"steps": [
{
"builder": "<lambda>",
"clock_config": {
"dt_getter": "<lambda>",
"type": "EventClockConfig",
"wait_for_system_duration": "0:00:10",
},
"folder": "acc_values",
"step_id": "running_average",
"type": "FoldWindow",
"window_config": {
"length": "0:00:05",
"align_to": "2005-07-14T12:30:00+00:00",
"type": "TumblingWindow",
},
}
],
},
sort_keys=True,
)
def test_encoding_method_descriptor():
flow = Dataflow()
flow.flat_map(str.split)
assert encode_dataflow(flow) == json.dumps(
{"type": "Dataflow", "steps": [{"type": "FlatMap", "mapper": "split"}]},
sort_keys=True,
)
|
2,511 |
test variable positive matches
|
"""
Typograhic Number Theory tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.lexers.tnt import TNTLexer
from pygments.token import Text, Operator, Keyword, Name, Number, \
Punctuation, Error
@pytest.fixture(autouse=True)
def lexer():
yield TNTLexer()
# whitespace
@pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3'))
def test_whitespace_positive_matches(lexer, text):
"""Test fragments that should be tokenized as whitespace text."""
assert lexer.whitespace(0, text) == len(text) - 1
assert lexer.whitespace(0, text, True) == len(text) - 1
assert lexer.cur[-1] == (0, Text, text[:-1])
@pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry'))
def test_whitespace_negative_matches(lexer, text):
"""Test statements that do not start with whitespace text."""
assert lexer.whitespace(0, text) == 0
with pytest.raises(AssertionError):
lexer.whitespace(0, text, True)
assert not lexer.cur
# terms that can go on either side of an = sign
@pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' "))
def METHOD_NAME(lexer, text):
"""Test fragments that should be tokenized as variables."""
assert lexer.variable(0, text) == len(text) - 1
assert lexer.cur[-1] == (0, Name.Variable, text[:-1])
@pytest.mark.parametrize('text', ("' ", 'f ', "f' "))
def test_variable_negative_matches(lexer, text):
"""Test fragments that should **not** be tokenized as variables."""
with pytest.raises(AssertionError):
lexer.variable(0, text)
assert not lexer.cur
@pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0'))
def test_numeral_positive_matches(lexer, text):
"""Test fragments that should be tokenized as (unary) numerals."""
assert lexer.term(0, text) == len(text)
assert lexer.cur[-1] == (len(text) - 1, Number.Integer, text[-1])
if text != '0':
assert lexer.cur[-2] == (0, Number.Integer, text[:-1])
@pytest.mark.parametrize('text', (
'(a+b)', '(b.a)', '(c+d)'
))
def test_multiterm_positive_matches(lexer, text):
"""Test fragments that should be tokenized as a compound term."""
assert lexer.term(0, text) == len(text)
assert [t[1] for t in lexer.cur] == [
Punctuation, Name.Variable, Operator,
Name.Variable, Punctuation
]
@pytest.mark.parametrize('text', ('1', '=', 'A'))
def test_term_negative_matches(lexer, text):
"""Test fragments that should not be tokenized as terms at all."""
with pytest.raises(AssertionError):
lexer.term(0, text)
assert not lexer.cur
# full statements, minus rule
@pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b '))
def test_negator_positive_matches(lexer, text):
"""Test statements that start with a negation."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0] == (0, Operator, text[:-4])
@pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b '))
def test_quantifier_positive_matches(lexer, text):
"""Test statements that start with a quantifier."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
assert lexer.cur[2] == (2, Punctuation, ':')
@pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b'))
def test_quantifier_negative_matches(lexer, text):
"""Test quantifiers that are only partially valid."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
# leftovers should still be valid
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
@pytest.mark.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>'))
def test_compound_positive_matches(lexer, text):
"""Test statements that consist of multiple formulas compounded."""
assert lexer.formula(0, text) == len(text)
assert lexer.cur[0] == (0, Punctuation, '<')
assert lexer.cur[4][1] == Operator
assert lexer.cur[-1] == (len(text)-1, Punctuation, '>')
@pytest.mark.parametrize('text', ('<a=b/b=a>', '<a=b&b=a '))
def test_compound_negative_matches(lexer, text):
"""Test statements that look like compounds but are invalid."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
assert lexer.cur[0] == (0, Punctuation, '<')
@pytest.mark.parametrize('text', ('a=b ', 'a=0 ', '0=b '))
def test_formula_postive_matches(lexer, text):
"""Test the normal singular formula."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0][2] == text[0]
assert lexer.cur[1] == (1, Operator, '=')
assert lexer.cur[2][2] == text[2]
@pytest.mark.parametrize('text', ('a/b', '0+0 '))
def test_formula_negative_matches(lexer, text):
"""Test anything but an equals sign."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
# rules themselves
@pytest.mark.parametrize('text', (
'fantasy rule', 'carry over line 5', 'premise', 'joining',
'double-tilde', 'switcheroo', 'De Morgan', 'specification'
))
def test_rule_positive_matches(lexer, text):
"""Test some valid rules of TNT."""
assert lexer.rule(0, text) == len(text)
assert lexer.cur[0][:2] == (0, Keyword)
if text[-1].isdigit():
assert lexer.cur[1][1] == Number.Integer
@pytest.mark.parametrize('text', (
'fantasy', 'carry over', 'premse', 'unjoining',
'triple-tilde', 'switcheru', 'De-Morgan', 'despecification'
))
def test_rule_negative_matches(lexer, text):
"""Test some invalid rules of TNT."""
with pytest.raises(AssertionError):
lexer.rule(0, text)
# referrals
@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)',
'(lines 1, 6 and 0)'))
def test_lineno_positive_matches(lexer, text):
"""Test line referrals."""
assert lexer.lineno(0, text) == len(text)
assert lexer.cur[0] == (0, Punctuation, '(')
assert lexer.cur[1][:2] == (1, Text)
assert lexer.cur[2][1] == Number.Integer
assert lexer.cur[3] == (len(text)-1, Punctuation, ')')
@pytest.mark.parametrize('text', (
'(lines one, two, and four)1 ', # to avoid IndexError
'(lines 1 2 and 3)', '(lines 1 2 3)'
))
def test_lineno_negative_matches(lexer, text):
"""Test invalid line referrals."""
with pytest.raises(AssertionError):
lexer.lineno(0, text)
# worst-case: error text
@pytest.mark.parametrize('text', ('asdf', 'fdsa\nasdf', 'asdf\n '))
def test_error_till_line_end(lexer, text):
try:
nl = text.index('\n')
except ValueError:
nl = len(text)
try:
end = text.find(text.split(None, 2)[1])
except IndexError: # split failed
end = len(text)
assert lexer.error_till_line_end(0, text) == end
assert lexer.cur[0] == (0, Error, text[:nl])
# full statement, including rule (because this can't be tested any other way)
@pytest.mark.parametrize('text', ('[ push', '] pop'))
def test_fantasy_positive_matches(lexer, text):
"""Test statements that should be tokenized as push/pop statements."""
assert lexer.get_tokens_unprocessed(text)[0] == (0, Keyword, text[0])
# full text is already done by examplefiles, but here's some exceptions
@pytest.mark.parametrize('text', (
'0', 'a=b', 'premise',
'0 a=b premise', '1 b=a symmetry (line 0)'
))
def test_no_crashing(lexer, text):
"""Test incomplete text fragments that shouldn't crash the whole lexer."""
assert lexer.get_tokens(text)
|
2,512 |
has ipv6
|
from __future__ import absolute_import
import socket
from .wait import wait_for_read
from .selectors import HAS_SELECT, SelectorError
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not HAS_SELECT:
return False
try:
return bool(wait_for_read(sock, timeout=0.0))
except SelectorError:
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def METHOD_NAME(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = METHOD_NAME('::1')
|
2,513 |
echo
|
# -*- coding: utf-8 -*-
"""Format and display CLI output.
Classes:
CliTable: Pretty prints tabular data to stdout, via Rich's Console API
"""
import os
from typing import Any, List, Union
import rich
from rich import box, print
from rich.console import Console
from rich.style import Style
from rich.table import Column, Table
CHECKMARK = "[green]:heavy_check_mark:" if os.name == "posix" else "+"
CROSSMARK = "[red]:cross_mark:" if os.name == "posix" else "-"
class CliTable:
"""Format and print data to the command line in tabular form.
Attributes:
* PICTOGRAM_TRUE: True boolean values are replaced with this string.
* PICTOGRAM_FALSE = False boolean values are replaced with this string.
Methods:
* echo: Print the table data to stdout using rich.Console.print
Class Methods:
* columnify_headers: Convert a list of strs to Columns
"""
PICTOGRAM_TRUE = CHECKMARK
PICTOGRAM_FALSE = " "
def __init__(
self,
data: List[List[Any]],
title: str = None,
dim_rows: List[int] = None,
**kwargs,
):
"""Constructor.
Args:
data: Required. List[List] of data to format, with the heading as 0-th member.
title: String to use as the table's title.
dim_rows: List[int] of row indices to dim.
"""
headers: List[Column] = self.columnify_headers(data[0])
rows = [self._stringify_row(r) for r in data[1:]]
self._table: Table = Table(*headers, title=title, box=box.SIMPLE, **kwargs)
for idx, row in enumerate(rows):
dim_row = idx + 1 in dim_rows if dim_rows else False
self._table.add_row(*row, style=Style(dim=dim_row))
def _stringify_row(self, row: list) -> List[str]:
return [self._stringify_cell(cell) for cell in row]
def _stringify_cell(self, cell: Any) -> str:
if isinstance(cell, bool):
cell = self.PICTOGRAM_TRUE if cell else self.PICTOGRAM_FALSE
elif cell is None:
cell = ""
else:
cell = str(cell)
return cell
def __rich__(self) -> str:
return self._table
def columnify_headers(self, headers: List[Union[str, Column]]) -> List[Column]:
"""
Given a List[str, Column], return a list of Columns.
Strings are instantiated with default styles, Columns are unchanged.
"""
cols: List[Column] = []
for header in headers:
if isinstance(header, Column):
cols.append(header)
else:
header: str = self._stringify_cell(header)
cols.append(Column(header=header, overflow="fold"))
return cols
def METHOD_NAME(self, plain=False, box_style: box.Box = None):
"""
Print this table to the global Console using console.print().
"""
orig_box = self._table.box
if plain:
self._table.box = box.ASCII2
else:
self._table.box = box_style or orig_box
console = rich.get_console()
console.print(self._table)
self._table.box = orig_box
@property
def table(self):
return self._table
def __str__(self):
from io import StringIO
console = Console(file=StringIO())
tab_width = console.size.width - 21
console.print(self._table, width=tab_width)
return console.file.getvalue()
def _soql_table(results, truncated):
if results:
if truncated:
assert results[-1] == truncated
first_row = results[0]
fake_row = {k: "" for k, v in first_row.items()}
first_column = list(first_row)[0]
fake_row[first_column] = truncated
results[-1] = fake_row
headings = list(results[0].keys())
return CliTable(
[headings] + [list(map(str, r.values())) for r in results],
).METHOD_NAME()
else:
return CliTable([["No results"]]).METHOD_NAME()
def _summarize(field):
if field["referenceTo"]:
allowed = field["referenceTo"]
elif field["picklistValues"]:
allowed = [value["value"] for value in field["picklistValues"]]
else:
allowed = field["type"]
return (field["name"], allowed)
class SimpleSalesforceUIHelpers:
def __init__(self, sf):
self._sf = sf
def query(self, query, format="table", include_deleted=False, max_rows=100):
"""Return the result of a Salesforce SOQL query.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "[email protected]"
* include_deleted -- True if deleted records should be included
* format -- one of these values:
- "table" -- printable ASCII table (the default)
- "obj" -- ordinary Python objects
- "pprint" -- string in easily readable Python dict shape
- "json" -- JSON
* max_rows -- maximum rows to output, defaults to 100
For example:
query("select Name, Id from Account", format="pprint")
contact_ids = query("select count(Id) from Contact", format="obj")
"""
results = self._sf.query_all(query, include_deleted=include_deleted)["records"]
if len(results) > max_rows:
truncated = f"... truncated {len(results) - max_rows} rows"
results = results[0:max_rows]
else:
truncated = False
for result in results:
if result.get("attributes"):
del result["attributes"]
if truncated:
results.append(truncated)
if format == "table":
help_message = "Type help(query) to learn about other return formats or assigning the result."
print(_soql_table(results, truncated))
print()
print(help_message)
rc = None
elif format == "obj":
rc = results
elif format == "pprint":
from rich.pretty import pprint
pprint(results)
rc = None
elif format == "json":
from json import dumps
rc = dumps(results, indent=2)
else:
raise TypeError(f"Unknown format `{format}`")
return rc
def describe(self, sobj_name, detailed=False, format="pprint"):
"""Describe an sobject.
Arguments:
sobj_name - sobject name to describe. e.g. "Account", "Contact"
detailed - set to `True` to get detailed information about object
format -- one of these values:
- "pprint" -- string in easily readable Python dict shape (default)
- "obj" -- ordinary Python objects
For example:
>>> describe("Account")
>>> data = describe("Account", detailed=True, format=obj)
"""
from pprint import pprint
data = getattr(self._sf, sobj_name).describe()
if detailed:
rc = data
else:
rc = dict(_summarize(field) for field in data["fields"])
if format == "pprint":
pprint(rc)
elif format == "obj":
return rc
else:
raise TypeError(f"Unknown format {format}")
|
2,514 |
test cpu features with static template
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the CPU features for aarch64."""
import platform
import re
import pytest
import framework.utils_cpuid as cpuid_utils
from framework.utils_cpu_templates import nonci_on_arm
PLATFORM = platform.machine()
DEFAULT_G2_FEATURES = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs"
)
DEFAULT_G2_FEATURES_NO_SSBS = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp"
)
DEFAULT_G3_FEATURES_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs"
)
DEFAULT_G3_FEATURES_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_NO_SSBS_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm ssbs"
)
DEFAULT_G3_FEATURES_NO_SSBS_4_14 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm"
)
DEFAULT_G3_FEATURES_NO_SSBS_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 asimdfhm dit uscat ilrcpc flagm dcpodp i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_5_10 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp "
"sha512 sve asimdfhm dit uscat ilrcpc flagm ssbs paca pacg dcpodp svei8mm svebf16 i8mm bf16 dgh rng"
)
DEFAULT_G3_FEATURES_V1N1 = (
"fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs"
)
def _check_cpu_features_arm(test_microvm, guest_kv, template_name=None):
expected_cpu_features = {"Flags": []}
match (cpuid_utils.get_instance_type(), guest_kv, template_name):
case ("m6g.metal", _, "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES_NO_SSBS
case ("m6g.metal", _, "aarch64_v1n1"):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES
case ("m6g.metal", _, None):
expected_cpu_features["Flags"] = DEFAULT_G2_FEATURES
case ("c7g.metal", "4.14", "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_NO_SSBS_4_14
case ("c7g.metal", "5.10", "aarch64_remove_ssbs"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_NO_SSBS_5_10
case ("c7g.metal", "4.14", "aarch64_with_sve_and_pac"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_4_14
case ("c7g.metal", "5.10", "aarch64_with_sve_and_pac"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_WITH_SVE_AND_PAC_5_10
case ("c7g.metal", _, "aarch64_v1n1"):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_V1N1
case ("c7g.metal", "4.14", None):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_4_14
case ("c7g.metal", "5.10", None):
expected_cpu_features["Flags"] = DEFAULT_G3_FEATURES_5_10
cpuid_utils.check_guest_cpuid_output(
test_microvm, "lscpu", None, ":", expected_cpu_features
)
def get_cpu_template_dir(cpu_template):
"""
Utility function to return a valid string which will be used as
name of the directory where snapshot artifacts are stored during
snapshot test and loaded from during restore test.
"""
return cpu_template if cpu_template else "none"
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
def test_default_cpu_features(microvm_factory, guest_kernel, rootfs_ubuntu_22):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config()
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv)
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
@nonci_on_arm
def METHOD_NAME(
microvm_factory, guest_kernel, rootfs_ubuntu_22, cpu_template
):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config(cpu_template=cpu_template)
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv, "aarch64_v1n1")
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="This is aarch64 specific test.",
)
@nonci_on_arm
def test_cpu_features_with_custom_template(
microvm_factory, guest_kernel, rootfs_ubuntu_22, custom_cpu_template
):
"""
Check the CPU features for a microvm with the specified config.
"""
vm = microvm_factory.build(guest_kernel, rootfs_ubuntu_22, monitor_memory=False)
vm.spawn()
vm.basic_config()
vm.api.cpu_config.put(**custom_cpu_template["template"])
vm.add_net_iface()
vm.start()
guest_kv = re.search(r"vmlinux-(\d+\.\d+)", guest_kernel.name).group(1)
_check_cpu_features_arm(vm, guest_kv, custom_cpu_template["name"])
|
2,515 |
test call
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import logging
import threading
import unittest
from typing import Any
from apache_beam.utils import multi_process_shared
class CallableCounter(object):
def __init__(self, start=0):
self.running = start
self.lock = threading.Lock()
def __call__(self):
return self.running
def increment(self, value=1):
with self.lock:
self.running += value
return self.running
def error(self, msg):
raise RuntimeError(msg)
class Counter(object):
def __init__(self, start=0):
self.running = start
self.lock = threading.Lock()
def get(self):
return self.running
def increment(self, value=1):
with self.lock:
self.running += value
return self.running
def error(self, msg):
raise RuntimeError(msg)
class CounterWithBadAttr(object):
def __init__(self, start=0):
self.running = start
self.lock = threading.Lock()
def get(self):
return self.running
def increment(self, value=1):
with self.lock:
self.running += value
return self.running
def error(self, msg):
raise RuntimeError(msg)
def __getattribute__(self, __name: str) -> Any:
if __name == 'error':
raise AttributeError('error is not actually supported on this platform')
else:
# Default behaviour
return object.__getattribute__(self, __name)
class MultiProcessSharedTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.shared = multi_process_shared.MultiProcessShared(
Counter, tag='basic', always_proxy=True).acquire()
cls.sharedCallable = multi_process_shared.MultiProcessShared(
CallableCounter, tag='callable', always_proxy=True).acquire()
def METHOD_NAME(self):
self.assertEqual(self.shared.get(), 0)
self.assertEqual(self.shared.increment(), 1)
self.assertEqual(self.shared.increment(10), 11)
self.assertEqual(self.shared.increment(value=10), 21)
self.assertEqual(self.shared.get(), 21)
def test_call_illegal_attr(self):
shared_handle = multi_process_shared.MultiProcessShared(
CounterWithBadAttr, tag='test_call_illegal_attr', always_proxy=True)
shared = shared_handle.acquire()
self.assertEqual(shared.get(), 0)
self.assertEqual(shared.increment(), 1)
self.assertEqual(shared.get(), 1)
def test_call_callable(self):
self.assertEqual(self.sharedCallable(), 0)
self.assertEqual(self.sharedCallable.increment(), 1)
self.assertEqual(self.sharedCallable.increment(10), 11)
self.assertEqual(self.sharedCallable.increment(value=10), 21)
self.assertEqual(self.sharedCallable(), 21)
def test_error(self):
with self.assertRaisesRegex(Exception, 'something bad'):
self.shared.error('something bad')
def test_no_method(self):
with self.assertRaisesRegex(Exception, 'no_such_method'):
self.shared.no_such_method()
def test_connect(self):
first = multi_process_shared.MultiProcessShared(
Counter, tag='counter').acquire()
second = multi_process_shared.MultiProcessShared(
Counter, tag='counter').acquire()
self.assertEqual(first.get(), 0)
self.assertEqual(first.increment(), 1)
self.assertEqual(second.get(), 1)
self.assertEqual(second.increment(), 2)
self.assertEqual(first.get(), 2)
self.assertEqual(first.increment(), 3)
def test_release(self):
shared1 = multi_process_shared.MultiProcessShared(
Counter, tag='test_release')
shared2 = multi_process_shared.MultiProcessShared(
Counter, tag='test_release')
counter1 = shared1.acquire()
counter2 = shared2.acquire()
self.assertEqual(counter1.increment(), 1)
self.assertEqual(counter2.increment(), 2)
counter1again = shared1.acquire()
self.assertEqual(counter1again.increment(), 3)
shared1.release(counter1)
shared2.release(counter2)
with self.assertRaisesRegex(Exception, 'released'):
counter1.get()
with self.assertRaisesRegex(Exception, 'released'):
counter2.get()
self.assertEqual(counter1again.get(), 3)
shared1.release(counter1again)
counter1New = shared1.acquire()
self.assertEqual(counter1New.get(), 0)
with self.assertRaisesRegex(Exception, 'released'):
counter1.get()
def test_release_always_proxy(self):
shared1 = multi_process_shared.MultiProcessShared(
Counter, tag='test_release_always_proxy', always_proxy=True)
shared2 = multi_process_shared.MultiProcessShared(
Counter, tag='test_release_always_proxy', always_proxy=True)
counter1 = shared1.acquire()
counter2 = shared2.acquire()
self.assertEqual(counter1.increment(), 1)
self.assertEqual(counter2.increment(), 2)
counter1again = shared1.acquire()
self.assertEqual(counter1again.increment(), 3)
shared1.release(counter1)
shared2.release(counter2)
with self.assertRaisesRegex(Exception, 'released'):
counter1.get()
with self.assertRaisesRegex(Exception, 'released'):
counter2.get()
self.assertEqual(counter1again.get(), 3)
shared1.release(counter1again)
counter1New = shared1.acquire()
self.assertEqual(counter1New.get(), 0)
with self.assertRaisesRegex(Exception, 'released'):
counter1.get()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
2,516 |
token
|
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
from neo4j import GraphDatabase
from pandas import DataFrame, Series
from pyarrow import flight
from pyarrow.flight import ClientMiddleware, ClientMiddlewareFactory
from .query_runner import QueryRunner
from graphdatascience.query_runner.graph_constructor import GraphConstructor
from graphdatascience.query_runner.neo4j_query_runner import Neo4jQueryRunner
class AuraDbConnectionInfo(NamedTuple):
uri: str
auth: Tuple[str, str]
class AuraDbArrowQueryRunner(QueryRunner):
def __init__(self, fallback_query_runner: QueryRunner, aura_db_connection_info: AuraDbConnectionInfo):
self._fallback_query_runner = fallback_query_runner
aura_db_endpoint, auth = aura_db_connection_info
self._auth = auth
config: Dict[str, Any] = {"max_connection_lifetime": 60}
self._driver = GraphDatabase.driver(aura_db_endpoint, auth=auth, **config)
arrow_info: "Series[Any]" = (
Neo4jQueryRunner(self._driver, auto_close=True)
.run_query("CALL gds.debug.arrow.plugin()", custom_error=False)
.squeeze()
)
if not arrow_info.get("running"):
raise RuntimeError("The plugin arrow server for AuraDB is not running")
listen_address: Optional[str] = arrow_info.get("advertisedListenAddress") # type: ignore
if not listen_address:
raise ConnectionError("Did not retrieve connection info from database")
host, port_string = listen_address.split(":")
self._auth_pair_middleware = AuthPairInterceptingMiddleware()
client_options: Dict[str, Any] = {
"middleware": [AuthPairInterceptingMiddlewareFactory(self._auth_pair_middleware)],
"disable_server_verification": True,
}
location = (
flight.Location.for_grpc_tls(host, int(port_string))
if self._driver.encrypted
else flight.Location.for_grpc_tcp(host, int(port_string))
)
self._client = flight.FlightClient(location, **client_options)
def run_query(
self,
query: str,
params: Optional[Dict[str, Any]] = None,
database: Optional[str] = None,
custom_error: bool = True,
) -> DataFrame:
if params is None:
params = {}
if "gds.alpha.graph.project.remote" in query:
METHOD_NAME, aura_db_arrow_endpoint = self._get_or_request_auth_pair()
params["token"] = METHOD_NAME
params["host"] = aura_db_arrow_endpoint
params["config"] = {"useEncryption": False}
elif ".write" in query and "config" in params and "remote" in params["config"] and params["config"]["remote"]:
METHOD_NAME, aura_db_arrow_endpoint = self._get_or_request_auth_pair()
host, port_string = aura_db_arrow_endpoint.split(":")
del params["config"]["remote"]
params["config"]["arrowConnectionInfo"] = {
"hostname": host,
"port": int(port_string),
"bearerToken": METHOD_NAME,
"useEncryption": False,
}
return self._fallback_query_runner.run_query(query, params, database, custom_error)
def set_database(self, database: str) -> None:
self._fallback_query_runner.set_database(database)
def database(self) -> Optional[str]:
return self._fallback_query_runner.database()
def create_graph_constructor(
self, graph_name: str, concurrency: int, undirected_relationship_types: Optional[List[str]]
) -> GraphConstructor:
return self._fallback_query_runner.create_graph_constructor(
graph_name, concurrency, undirected_relationship_types
)
def close(self) -> None:
self._client.close()
self._driver.close()
self._fallback_query_runner.close()
def _get_or_request_auth_pair(self) -> Tuple[str, str]:
self._client.authenticate_basic_token(self._auth[0], self._auth[1])
return (self._auth_pair_middleware.METHOD_NAME(), self._auth_pair_middleware.endpoint())
class AuthPairInterceptingMiddlewareFactory(ClientMiddlewareFactory): # type: ignore
def __init__(self, middleware: "AuthPairInterceptingMiddleware", *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._middleware = middleware
def start_call(self, info: Any) -> "AuthPairInterceptingMiddleware":
return self._middleware
class AuthPairInterceptingMiddleware(ClientMiddleware): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def received_headers(self, headers: Dict[str, Any]) -> None:
auth_header = headers.get("authorization")
auth_type, METHOD_NAME = self._read_auth_header(auth_header)
if auth_type == "Bearer":
self._token = METHOD_NAME
self._arrow_address = self._read_address_header(headers.get("arrowpluginaddress"))
def sending_headers(self) -> Dict[str, str]:
return {}
def METHOD_NAME(self) -> str:
return self._token
def endpoint(self) -> str:
return self._arrow_address
def _read_auth_header(self, auth_header: Any) -> Tuple[str, str]:
if isinstance(auth_header, List):
auth_header = auth_header[0]
elif not isinstance(auth_header, str):
raise ValueError("Incompatible header format '{}'", auth_header)
auth_type, METHOD_NAME = auth_header.split(" ", 1)
return (str(auth_type), str(METHOD_NAME))
def _read_address_header(self, address_header: Any) -> str:
if isinstance(address_header, List):
return str(address_header[0])
if isinstance(address_header, str):
return address_header
raise ValueError("Incompatible header format '{}'", address_header)
|
2,517 |
compressed image
|
import time
from typing import Tuple, List, Union
import depthai as dai
import numpy as np
import rclpy
import rclpy.node as node
# from geometry_msgs.msg import Vector3, Quaternion, Pose2D, Point, Transform, TransformStamped
# from std_msgs.msg import Header, ColorRGBA, String
# from visualization_msgs.msg import ImageMarker
from geometry_msgs.msg import Vector3, Quaternion
from sensor_msgs.msg import METHOD_NAME, Image, Imu # , PointCloud2, PointField, Imu # s, PointCloud
from std_msgs.msg import Header
from builtin_interfaces.msg import Time
from depthai_sdk.integrations.ros.imu_interpolation import ImuInterpolation
class DepthAi2Ros2:
xyz = dict()
def __init__(self, device: dai.Device) -> None:
self.start_time = dai.Clock.now()
self.device = device
self.imu_packets = []
self.imu_interpolation = ImuInterpolation()
def set_header(self, msg, dai_msg: Union[dai.ImgFrame, dai.IMUReport]) -> Header:
try:
msg.header.frame_id = str(dai_msg.getSequenceNum()) # ImgFrame
except:
msg.header.frame_id = str(dai_msg.sequence) # IMUReport
ts = dai_msg.getTimestampDevice() - self.start_time
# secs / nanosecs
msg.header.stamp = Time(sec=ts.seconds, nanosec=ts.microseconds * 1000)
return msg
def METHOD_NAME(self, imgFrame: dai.ImgFrame) -> METHOD_NAME:
msg = METHOD_NAME()
self.set_header(msg, imgFrame)
msg.format = "jpeg"
msg.data.frombytes(imgFrame.getData())
return msg
def Image(self, imgFrame: dai.ImgFrame) -> Image:
msg = Image()
self.set_header(msg, imgFrame)
msg.height = imgFrame.getHeight()
msg.width = imgFrame.getWidth()
msg.step = imgFrame.getWidth()
msg.is_bigendian = 0
type = imgFrame.getType()
TYPE = dai.ImgFrame.Type
if type == TYPE.RAW16: # Depth
msg.encoding = 'mono16'
msg.step *= 2 # 2 bytes per pixel
msg.data.frombytes(imgFrame.getData())
elif type in [TYPE.GRAY8, TYPE.RAW8]: # Mono frame
msg.encoding = 'mono8'
msg.data.frombytes(imgFrame.getData())
else:
msg.encoding = 'bgr8'
msg.data.frombytes(imgFrame.getCvFrame())
return msg
# def TfMessage(self,
# imgFrame: dai.ImgFrame,
# translation: Tuple[float, float, float] = (0., 0., 0.),
# rotation: Tuple[float, float, float, float] = (0., 0., 0., 0.)) -> tfMessage:
# msg = tfMessage()
# tf = TransformStamped()
# tf.header = self.header(imgFrame)
# tf.child_frame_id = str(imgFrame.getSequenceNum())
# tf.transform = Transform(
# translation=Vector3(x=translation[0], y=translation[1], z=translation[2]),
# rotation=Quaternion(x=rotation[0], y=rotation[1], z=rotation[2], w=rotation[3])
# )
# msg.transforms.append(tf)
# return msg
#
# def PointCloud2(self, imgFrame: dai.ImgFrame) -> PointCloud2:
# """
# Depth frame -> ROS1 PointCloud2 message
# """
# msg = PointCloud2()
# msg.header = self.header(imgFrame)
#
# heigth = str(imgFrame.getHeight())
# if heigth not in self.xyz:
# self._create_xyz(imgFrame.getWidth(), imgFrame.getHeight())
#
# frame = imgFrame.getCvFrame()
# frame = np.expand_dims(np.array(frame), axis=-1)
# pcl = self.xyz[heigth] * frame / 1000.0 # To meters
#
# msg.height = imgFrame.getHeight()
# msg.width = imgFrame.getWidth()
# msg.fields = [
# PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1),
# PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1),
# PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1)
# ]
# msg.is_bigendian = False
# msg.point_step = 12 # 3 * float32 (=4 bytes)
# msg.row_step = 12 * imgFrame.getWidth()
# msg.data = pcl.tobytes()
# msg.is_dense = True
# return msg
#
# def _create_xyz(self, width, height):
# calibData = self.device.readCalibration()
# M_right = calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, dai.Size2f(width, height))
# camera_matrix = np.array(M_right).reshape(3, 3)
#
# xs = np.linspace(0, width - 1, width, dtype=np.float32)
# ys = np.linspace(0, height - 1, height, dtype=np.float32)
#
# # generate grid by stacking coordinates
# base_grid = np.stack(np.meshgrid(xs, ys)) # WxHx2
# points_2d = base_grid.transpose(1, 2, 0) # 1xHxWx2
#
# # unpack coordinates
# u_coord: np.array = points_2d[..., 0]
# v_coord: np.array = points_2d[..., 1]
#
# # unpack intrinsics
# fx: np.array = camera_matrix[0, 0]
# fy: np.array = camera_matrix[1, 1]
# cx: np.array = camera_matrix[0, 2]
# cy: np.array = camera_matrix[1, 2]
#
# # projective
# x_coord: np.array = (u_coord - cx) / fx
# y_coord: np.array = (v_coord - cy) / fy
#
# xyz = np.stack([x_coord, y_coord], axis=-1)
# self.xyz[str(height)] = np.pad(xyz, ((0, 0), (0, 0), (0, 1)), "constant", constant_values=1.0)
def Imu(self, dai_msg):
dai_msg: dai.IMUData
for packet in dai_msg.packets:
msg = Imu(
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0),
orientation_covariance=np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
angular_velocity=Vector3(x=0.0, y=0.0, z=0.0),
angular_velocity_covariance=np.zeros(9),
linear_acceleration=Vector3(x=0.0,y=0.0, z=0.0),
linear_acceleration_covariance=np.zeros(9)
)
report = packet.acceleroMeter or packet.gyroscope or packet.magneticField or packet.rotationVector
self.set_header(msg, report)
self.imu_interpolation.Imu(msg, packet)
# TODO: publish from here directly, so single IMUData can result in more Imu packets?
return msg
|
2,518 |
get gw mac address
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import binascii
import ipaddress
import logging
import socket
import subprocess
from typing import List, Optional, Tuple
import dpkt
import netifaces
from lte.protos.mobilityd_pb2 import IPAddress
from magma.pipelined.app.packet_parser import ParseSocketPacket
from magma.pipelined.ifaces import get_mac_address_from_iface
def METHOD_NAME(ip: IPAddress, vlan: str, non_nat_arp_egress_port: str) -> str:
gw_ip = str(ipaddress.ip_address(ip.address))
if ip.version == IPAddress.IPV4:
return _get_gw_mac_address_v4(gw_ip, vlan, non_nat_arp_egress_port)
elif ip.version == IPAddress.IPV6:
if vlan == "NO_VLAN":
try:
mac = get_mac_by_ip6(gw_ip)
logging.debug("Got mac %s for IP: %s", mac, gw_ip)
return mac
except ValueError:
logging.warning(
"Invalid GW Ip address: [%s]",
gw_ip,
)
else:
logging.error("Not supported: GW IPv6: %s over vlan %d", gw_ip, vlan)
return ""
def get_mac_by_ip4(target_ip4: str) -> Optional[str]:
iface = get_iface_by_ip4(target_ip4)
if iface:
return _get_gw_mac_address_v4(
gw_ip=target_ip4,
vlan="NO_VLAN",
non_nat_arp_egress_port=iface,
)
return None
def get_iface_by_ip4(target_ip4: str) -> Optional[str]:
for iface in netifaces.interfaces():
iface_ip4 = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
netmask = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['netmask']
res_iface = ipaddress.ip_network(f'{iface_ip4}/{netmask}', strict=False)
res_target_ip4 = ipaddress.ip_network(f'{target_ip4}/{netmask}', strict=False)
if res_iface == res_target_ip4:
return iface
return None
def get_mac_by_ip6(gw_ip: str) -> str:
for iface, _ in get_ifaces_by_ip6(gw_ip):
# Refresh the ip neighbor table
if subprocess.run(["ping", "-c", "1", gw_ip], check=False).returncode != 0:
continue
res = subprocess.run(
["ip", "neigh", "get", gw_ip, "dev", iface],
capture_output=True,
check=False,
).stdout.decode("utf-8")
if "lladdr" in res:
res = res.split("lladdr ")[1].split(" ")[0]
return res
raise ValueError(f"No mac address found for ip6 {gw_ip}")
def get_ifaces_by_ip6(target_ip6: str) -> Tuple[List[str], List[str]]:
ifaces = []
ifaces_ip6 = []
for iface in netifaces.interfaces():
try:
for i in range(len(netifaces.ifaddresses(iface)[netifaces.AF_INET6])):
iface_ip6 = netifaces.ifaddresses(iface)[netifaces.AF_INET6][i]['addr']
netmask = netifaces.ifaddresses(iface)[netifaces.AF_INET6][i]['netmask']
res_prefix = ipaddress.IPv6Network(iface_ip6.split('%')[0] + '/' + netmask.split('/')[-1], strict=False)
target_prefix = ipaddress.IPv6Network(target_ip6.split('%')[0] + '/' + netmask.split('/')[-1], strict=False)
if res_prefix == target_prefix:
ifaces.append(iface)
ifaces_ip6.append(iface_ip6.split('%')[0])
except KeyError:
continue
return ifaces, ifaces_ip6
def _get_gw_mac_address_v4(gw_ip: str, vlan: str, non_nat_arp_egress_port: str) -> str:
try:
logging.debug(
"sending arp via egress: %s",
non_nat_arp_egress_port,
)
eth_mac_src, psrc = _get_addresses(non_nat_arp_egress_port)
pkt = _create_arp_packet(eth_mac_src, psrc, gw_ip, vlan)
logging.debug("ARP Req pkt:\n%s", pkt.pprint())
res = _send_packet_and_receive_response(pkt, vlan, non_nat_arp_egress_port)
if res is None:
logging.debug("Got Null response")
return ""
parsed = ParseSocketPacket(res)
logging.debug("ARP Res pkt %s", str(parsed))
if str(parsed.arp.psrc) != gw_ip:
logging.warning(
"Unexpected IP in ARP response. expected: %s pkt: {str(parsed)}",
gw_ip,
)
return ""
if vlan.isdigit():
if parsed.dot1q is not None and str(parsed.dot1q.vlan) == vlan:
mac = parsed.arp.hwsrc
else:
logging.warning(
"Unexpected vlan in ARP response. expected: %s pkt: %s",
vlan,
str(parsed),
)
return ""
else:
mac = parsed.arp.hwsrc
return mac.mac_address
except ValueError:
logging.warning(
"Invalid GW Ip address: [%s] or vlan %s",
gw_ip, vlan,
)
return ""
def _get_addresses(non_nat_arp_egress_port):
eth_mac_src = get_mac_address_from_iface(non_nat_arp_egress_port)
eth_mac_src = binascii.unhexlify(eth_mac_src.replace(':', ''))
psrc = "0.0.0.0"
egress_port_ip = netifaces.ifaddresses(non_nat_arp_egress_port)
if netifaces.AF_INET in egress_port_ip:
psrc = egress_port_ip[netifaces.AF_INET][0]['addr']
return eth_mac_src, psrc
def _create_arp_packet(eth_mac_src: bytes, psrc: str, gw_ip: str, vlan: str) -> dpkt.arp.ARP:
pkt = dpkt.arp.ARP(
sha=eth_mac_src,
spa=socket.inet_aton(psrc),
tha=b'\x00' * 6,
tpa=socket.inet_aton(gw_ip),
op=dpkt.arp.ARP_OP_REQUEST,
)
if vlan.isdigit():
pkt = dpkt.ethernet.VLANtag8021Q(
id=int(vlan), data=bytes(pkt), type=dpkt.ethernet.ETH_TYPE_ARP,
)
t = dpkt.ethernet.ETH_TYPE_8021Q
else:
t = dpkt.ethernet.ETH_TYPE_ARP
pkt = dpkt.ethernet.Ethernet(
dst=b'\xff' * 6, src=eth_mac_src, data=bytes(pkt), type=t,
)
return pkt
def _send_packet_and_receive_response(pkt: dpkt.arp.ARP, vlan: str, non_nat_arp_egress_port: str) -> Optional[bytes]:
buffsize = 2 ** 16
sol_packet = 263
packet_aux_data = 8
with socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, buffsize)
s.settimeout(50)
if vlan.isdigit():
s.setsockopt(sol_packet, packet_aux_data, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_MARK, 1)
s.bind((non_nat_arp_egress_port, 0x0003))
s.send(bytes(pkt))
if vlan.isdigit():
res, aux, _, _ = s.recvmsg(0xffff, socket.CMSG_LEN(4096))
for cmsg_level, cmsg_type, cmsg_data in aux:
if cmsg_level == sol_packet and cmsg_type == packet_aux_data:
# add VLAN tag after ethernet header
res = res[:12] + cmsg_data[-1:-5:-1] + res[12:]
else:
res = s.recv(0xffff)
return res
|
2,519 |
do for each proc
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.instrument import is_installed
from wa.framework.plugin import Plugin
from wa.utils.log import log_error, indentcontext
from wa.utils.misc import isiterable
from wa.utils.types import identifier
class OutputProcessor(Plugin):
kind = 'output_processor'
requires = []
def __init__(self, **kwargs):
super(OutputProcessor, self).__init__(**kwargs)
self.is_enabled = True
def validate(self):
super(OutputProcessor, self).validate()
for instrument in self.requires:
if not is_installed(instrument):
msg = 'Instrument "{}" is required by {}, but is not installed.'
raise ConfigError(msg.format(instrument, self.name))
def initialize(self, context):
pass
def finalize(self, context):
pass
class ProcessorManager(object):
def __init__(self, loader=pluginloader):
self.loader = loader
self.logger = logging.getLogger('processor')
self.processors = []
def install(self, processor, context):
if not isinstance(processor, OutputProcessor):
processor = self.loader.get_output_processor(processor)
self.logger.debug('Installing {}'.format(processor.name))
processor.logger.context = context
self.processors.append(processor)
context.add_augmentation(processor)
def disable_all(self):
for output_processor in self.processors:
self._disable_output_processor(output_processor)
def enable_all(self):
for output_processor in self.processors:
self._enable_output_processor(output_processor)
def enable(self, to_enable):
if isiterable(to_enable):
for inst in to_enable:
self._enable_output_processor(inst)
else:
self._enable_output_processor(to_enable)
def disable(self, to_disable):
if isiterable(to_disable):
for inst in to_disable:
self._disable_output_processor(inst)
else:
self._disable_output_processor(to_disable)
def get_output_processor(self, processor):
if isinstance(processor, OutputProcessor):
return processor
processor = identifier(processor)
for p in self.processors:
if processor == p.name:
return p
raise ValueError('Output processor {} is not installed'.format(processor))
def get_enabled(self):
return [p for p in self.processors if p.is_enabled]
def get_disabled(self):
return [p for p in self.processors if not p.is_enabled]
def validate(self):
for proc in self.processors:
proc.validate()
def initialize(self, context):
for proc in self.processors:
proc.initialize(context)
def finalize(self, context):
for proc in self.processors:
proc.finalize(context)
def process_job_output(self, context):
self.METHOD_NAME('process_job_output', 'Processing using "{}"',
context.job_output, context.target_info,
context.run_output)
def export_job_output(self, context):
self.METHOD_NAME('export_job_output', 'Exporting using "{}"',
context.job_output, context.target_info,
context.run_output)
def process_run_output(self, context):
self.METHOD_NAME('process_run_output', 'Processing using "{}"',
context.run_output, context.target_info)
def export_run_output(self, context):
self.METHOD_NAME('export_run_output', 'Exporting using "{}"',
context.run_output, context.target_info)
def METHOD_NAME(self, method_name, message, *args):
with indentcontext():
for proc in self.processors:
if proc.is_enabled:
proc_func = getattr(proc, method_name, None)
if proc_func is None:
continue
try:
self.logger.info(message.format(proc.name))
proc_func(*args)
except Exception as e: # pylint: disable=broad-except
if isinstance(e, KeyboardInterrupt):
raise
log_error(e, self.logger)
def _enable_output_processor(self, inst):
inst = self.get_output_processor(inst)
self.logger.debug('Enabling output processor {}'.format(inst.name))
if not inst.is_enabled:
inst.is_enabled = True
def _disable_output_processor(self, inst):
inst = self.get_output_processor(inst)
self.logger.debug('Disabling output processor {}'.format(inst.name))
if inst.is_enabled:
inst.is_enabled = False
|
2,520 |
path
|
"""
.. _slice_example:
Slicing
~~~~~~~
Extract thin planar slices from a volume.
"""
import matplotlib.pyplot as plt
import numpy as np
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
###############################################################################
# PyVista meshes have several slicing filters bound directly to all datasets.
# These filters allow you to slice through a volumetric dataset to extract and
# view sections through the volume of data.
#
# One of the most common slicing filters used in PyVista is the
# :func:`pyvista.DataSetFilters.slice_orthogonal` filter which creates three
# orthogonal slices through the dataset parallel to the three Cartesian planes.
# For example, let's slice through the sample geostatistical training image
# volume. First, load up the volume and preview it:
mesh = examples.load_channels()
# define a categorical colormap
cmap = plt.cm.get_cmap("viridis", 4)
mesh.plot(cmap=cmap)
###############################################################################
# Note that this dataset is a 3D volume and there might be regions within this
# volume that we would like to inspect. We can create slices through the mesh
# to gain further insight about the internals of the volume.
slices = mesh.slice_orthogonal()
slices.plot(cmap=cmap)
###############################################################################
# The orthogonal slices can be easily translated throughout the volume:
slices = mesh.slice_orthogonal(x=20, y=20, z=30)
slices.plot(cmap=cmap)
###############################################################################
# We can also add just a single slice of the volume by specifying the origin
# and normal of the slicing plane with the :func:`pyvista.DataSetFilters.slice`
# filter:
# Single slice - origin defaults to the center of the mesh
single_slice = mesh.slice(normal=[1, 1, 0])
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(single_slice, cmap=cmap)
p.show()
###############################################################################
# Adding slicing planes uniformly across an axial direction can also be
# automated with the :func:`pyvista.DataSetFilters.slice_along_axis` filter:
slices = mesh.slice_along_axis(n=7, axis="y")
slices.plot(cmap=cmap)
###############################################################################
# Slice Along Line
# ++++++++++++++++
#
# We can also slice a dataset along a :func:`pyvista.Spline` or
# :func:`pyvista.Line` using the :func:`pyvista.DataSetFilters.slice_along_line` filter.
#
# First, define a line source through the dataset of interest. Please note
# that this type of slicing is computationally expensive and might take a while
# if there are a lot of points in the line - try to keep the resolution of
# the line low.
model = examples.load_channels()
def METHOD_NAME(y):
"""Equation: x = a(y-h)^2 + k"""
a = 110.0 / 160.0**2
x = a * y**2 + 0.0
return x, y
x, y = METHOD_NAME(np.arange(model.bounds[2], model.bounds[3], 15.0))
zo = np.linspace(9.0, 11.0, num=len(y))
points = np.c_[x, y, zo]
spline = pv.Spline(points, 15)
spline
###############################################################################
# Then run the filter
slc = model.slice_along_line(spline)
slc
###############################################################################
p = pv.Plotter()
p.add_mesh(slc, cmap=cmap)
p.add_mesh(model.outline())
p.show(cpos=[1, -1, 1])
###############################################################################
# Multiple Slices in Vector Direction
# +++++++++++++++++++++++++++++++++++
#
# Slice a mesh along a vector direction perpendicularly.
mesh = examples.download_brain()
# Create vector
vec = np.array([1.0, 2.0, 1.0])
# Normalize the vector
normal = vec / np.linalg.norm(vec)
# Make points along that vector for the extent of your slices
a = mesh.center + normal * mesh.length / 3.0
b = mesh.center - normal * mesh.length / 3.0
# Define the line/points for the slices
n_slices = 5
line = pv.Line(a, b, n_slices)
# Generate all of the slices
slices = pv.MultiBlock()
for point in line.points:
slices.append(mesh.slice(normal=normal, origin=point))
###############################################################################
p = pv.Plotter()
p.add_mesh(mesh.outline(), color="k")
p.add_mesh(slices, opacity=0.75)
p.add_mesh(line, color="red", line_width=5)
p.show()
###############################################################################
# Slice At Different Bearings
# +++++++++++++++++++++++++++
#
# From `pyvista-support#23 <https://github.com/pyvista/pyvista-support/issues/23>`_
#
# An example of how to get many slices at different bearings all centered
# around a user-chosen location.
#
# Create a point to orient slices around
ranges = np.array(model.bounds).reshape(-1, 2).ptp(axis=1)
point = np.array(model.center) - ranges * 0.25
###############################################################################
# Now generate a few normal vectors to rotate a slice around the z-axis.
# Use equation for circle since its about the Z-axis.
increment = np.pi / 6.0
# use a container to hold all the slices
slices = pv.MultiBlock() # treat like a dictionary/list
for theta in np.arange(0, np.pi, increment):
normal = np.array([np.cos(theta), np.sin(theta), 0.0]).dot(np.pi / 2.0)
name = f'Bearing: {np.rad2deg(theta):.2f}'
slices[name] = model.slice(origin=point, normal=normal)
slices
###############################################################################
# And now display it.
p = pv.Plotter()
p.add_mesh(slices, cmap=cmap)
p.add_mesh(model.outline())
p.show()
|
2,521 |
json seems like protocol
|
"""File identifier interface."""
import json
from dataclasses import dataclass
from typing import Any, Dict, Sequence, Union
import anyio
from opentrons_shared_data.robot.dev_types import RobotType
from opentrons.protocols.api_support.definitions import MAX_SUPPORTED_VERSION
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocols import parse
from opentrons.protocols.types import MalformedPythonProtocolError, PythonProtocol
from .file_reader_writer import BufferedFile
from .protocol_files_invalid_error import ProtocolFilesInvalidError
from .protocol_source import Metadata
JsonDict = Dict[str, Any]
@dataclass(frozen=True)
class IdentifiedJsonMain:
"""A file identified as a JSON protocol's main .json file."""
original_file: BufferedFile
"""The original file that this was identified from."""
unvalidated_json: JsonDict
"""The parsed JSON contents.
Believed, but not confirmed at this point, to conform to one of our JSON protocol
schemas.
"""
schema_version: int
"""The JSON protocol schema that this file is believed to conform to."""
robot_type: RobotType
"""The type of robot on which this protocol is meant to run."""
metadata: Metadata
"""The protocol metadata extracted from this file."""
@dataclass(frozen=True)
class IdentifiedPythonMain:
"""A file identified as a Python protocol's main .py file."""
original_file: BufferedFile
"""The original file that this was identified from."""
api_level: APIVersion
"""The Python Protocol API apiLevel declared by the Python source."""
robot_type: RobotType
"""The type of robot on which this protocol is meant to run."""
metadata: Metadata
"""The protocol metadata extracted from this file."""
@dataclass(frozen=True)
class IdentifiedLabwareDefinition:
"""A file identified as a labware definition."""
original_file: BufferedFile
"""The original file that this was identified from."""
unvalidated_json: JsonDict
"""The parsed JSON contents.
Believed, but not confirmed at this point, to conform to our labware definition
schema v2.
"""
@dataclass(frozen=True)
class IdentifiedData:
"""A file identified as a user-defined data file."""
original_file: BufferedFile
"""The original file that this was identified from."""
IdentifiedFile = Union[
IdentifiedJsonMain,
IdentifiedPythonMain,
IdentifiedLabwareDefinition,
IdentifiedData,
]
class FileIdentificationError(ProtocolFilesInvalidError):
"""Raised when FileIdentifier detects an invalid file."""
class FileIdentifier:
"""File identifier interface."""
@staticmethod
async def identify(
files: Sequence[BufferedFile], python_parse_mode: parse.PythonParseMode
) -> Sequence[IdentifiedFile]:
"""Identify the type and extract basic information from each file.
This is intended to take ≲1 second per protocol on an OT-2, so it can extract
basic information about all stored protocols relatively quickly. Fully parsing
and validating protocols can take 10-100x longer, so that's left to other units,
for only when it's really needed.
"""
return [await _identify(file, python_parse_mode) for file in files]
async def _identify(
file: BufferedFile, python_parse_mode: parse.PythonParseMode
) -> IdentifiedFile:
lower_file_name = file.name.lower()
if lower_file_name.endswith(".json"):
return await _analyze_json(json_file=file)
elif lower_file_name.endswith(".py"):
return _analyze_python_protocol(
py_file=file, python_parse_mode=python_parse_mode
)
elif lower_file_name.endswith(".csv") or lower_file_name.endswith(".txt"):
return IdentifiedData(original_file=file)
else:
raise FileIdentificationError(
f"{file.name} has an unrecognized file extension."
)
async def _analyze_json(
json_file: BufferedFile,
) -> Union[IdentifiedJsonMain, IdentifiedLabwareDefinition]:
try:
json_contents = await anyio.to_thread.run_sync(json.loads, json_file.contents)
except json.JSONDecodeError as e:
raise FileIdentificationError(
f"{json_file.name} is not valid JSON. {str(e)}"
) from e
if _json_seems_like_labware(json_contents):
return IdentifiedLabwareDefinition(
original_file=json_file,
unvalidated_json=json_contents,
)
elif METHOD_NAME(json_contents):
return _analyze_json_protocol(
original_file=json_file,
json_contents=json_contents,
)
else:
raise FileIdentificationError(
f"{json_file.name} is not a known Opentrons format."
)
def _json_seems_like_labware(json: JsonDict) -> bool:
# "ordering" and "wells" are required properties in our labware schema v2.
return "ordering" in json and "wells" in json
def METHOD_NAME(json: JsonDict) -> bool:
# "schemaVersion" and "commands" are required properties in all of our JSON
# protocol schemas since schema v3. (v7 is the latest at the time of writing.)
#
# When we stop supporting v3 files, we can look at "$otSharedSchema" instead,
# which is more precise.
return "schemaVersion" in json and "commands" in json
def _analyze_json_protocol(
original_file: BufferedFile, json_contents: JsonDict
) -> IdentifiedJsonMain:
error_message = f"{original_file.name} is not a valid JSON protocol."
try:
metadata = json_contents["metadata"]
schema_version = json_contents["schemaVersion"]
robot_type = json_contents["robot"]["model"]
except KeyError as e:
raise FileIdentificationError(error_message) from e
# todo(mm, 2022-12-22): A JSON protocol file's metadata is not quite just an
# arbitrary dict: its fields are supposed to follow a schema. Should we validate
# this metadata against that schema instead of doing this simple isinstance() check?
if not isinstance(metadata, dict):
raise FileIdentificationError(error_message)
if not isinstance(schema_version, int):
raise FileIdentificationError(error_message)
if robot_type not in ("OT-2 Standard", "OT-3 Standard"):
raise FileIdentificationError(error_message)
return IdentifiedJsonMain(
original_file=original_file,
unvalidated_json=json_contents,
schema_version=schema_version,
robot_type=robot_type,
metadata=metadata,
)
def _analyze_python_protocol(
py_file: BufferedFile,
python_parse_mode: parse.PythonParseMode,
) -> IdentifiedPythonMain:
try:
parsed = parse.parse(
protocol_file=py_file.contents,
filename=py_file.name,
python_parse_mode=python_parse_mode,
)
except MalformedPythonProtocolError as e:
raise FileIdentificationError(e.short_message) from e
# We know this should never be a JsonProtocol. Help out the type-checker.
assert isinstance(
parsed, PythonProtocol
), "Parsing a Python file returned something other than a Python protocol."
if parsed.api_level > MAX_SUPPORTED_VERSION:
raise FileIdentificationError(
f"API version {parsed.api_level} is not supported by this "
f"robot software. Please either reduce your requested API "
f"version or update your robot."
)
return IdentifiedPythonMain(
original_file=py_file,
metadata=parsed.metadata or {},
robot_type=parsed.robot_type,
api_level=parsed.api_level,
)
|
2,522 |
main
|
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-only
#
# Advanced cgclassify functionality test - '-b' '-g' <controller> (cgroup v2)
#
# Copyright (c) 2023 Oracle and/or its affiliates.
# Author: Kamalesh Babulal <[email protected]>
#
from cgroup import Cgroup, CgroupVersion
from systemd import Systemd
from process import Process
from run import RunError
import consts
import ftests
import time
import sys
import os
CONTROLLER = 'cpu'
SYSTEMD_CGNAME = '066_cg_in_scope'
OTHER_CGNAME = '066_cg_not_in_scope'
SLICE = 'libcgtests.slice'
SCOPE = 'test066.scope'
CONFIG_FILE_NAME = os.path.join(os.getcwd(), '066cgconfig.conf')
SYSTEMD_PIDS = ''
OTHER_PIDS = ''
def prereqs(config):
result = consts.TEST_PASSED
cause = None
if CgroupVersion.get_version('cpu') != CgroupVersion.CGROUP_V2:
result = consts.TEST_SKIPPED
cause = 'This test requires the cgroup v2 cpu controller'
return result, cause
if config.args.container:
result = consts.TEST_SKIPPED
cause = 'This test cannot be run within a container'
return result, cause
def setup(config):
result = consts.TEST_PASSED
cause = None
pid = Systemd.write_config_with_pid(config, CONFIG_FILE_NAME, SLICE, SCOPE)
Cgroup.configparser(config, load_file=CONFIG_FILE_NAME)
# create and check if the cgroup was created under the systemd default path
if not Cgroup.create_and_validate(config, None, SYSTEMD_CGNAME):
result = consts.TEST_FAILED
cause = (
'Failed to create systemd delegated cgroup {} under '
'/sys/fs/cgroup/{}/{}/'.format(SYSTEMD_CGNAME, SLICE, SCOPE)
)
return result, cause
# With cgroup v2, we can't enable controller for the child cgroup, while
# a task is attached to test066.scope. Attach the task from test066.scope
# to child cgroup SYSTEMD_CGNAME and then enable cpu controller in the parent,
# so that the cgroup.get() works
Cgroup.set(config, cgname=SYSTEMD_CGNAME, setting='cgroup.procs', value=pid)
Cgroup.set(
config, cgname=(os.path.join(SLICE, SCOPE)), setting='cgroup.subtree_control',
value='+cpu', ignore_systemd=True
)
# create and check if the cgroup was created under the controller root
if not Cgroup.create_and_validate(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True):
result = consts.TEST_FAILED
cause = (
'Failed to create cgroup {} under '
'/sys/fs/cgroup/{}/'.format(OTHER_CGNAME, CONTROLLER)
)
return result, cause
def create_process_get_pid(config, CGNAME, SLICENAME='', ignore_systemd=False):
result = consts.TEST_PASSED
cause = None
config.process.create_process_in_cgroup(
config, CONTROLLER, CGNAME,
ignore_systemd=ignore_systemd
)
pids = Cgroup.get_pids_in_cgroup(config, os.path.join(SLICENAME, CGNAME), CONTROLLER)
if pids is None:
result = consts.TEST_FAILED
cause = 'No processes were found in cgroup {}'.format(CGNAME)
return pids, result, cause
def test(config):
global SYSTEMD_PIDS, OTHER_PIDS
result = consts.TEST_PASSED
cause = None
# Test cgclassify, that creates a process and then uses cgclassify
# to migrate the task the cgroup.
SYSTEMD_PIDS, result, cause = create_process_get_pid(
config, SYSTEMD_CGNAME,
os.path.join(SLICE, SCOPE)
)
if result == consts.TEST_FAILED:
return result, cause
OTHER_PIDS, result, tmp_cause = create_process_get_pid(
config, OTHER_CGNAME,
ignore_systemd=True
)
if result == consts.TEST_FAILED:
return result, cause
# classify a task from the non-systemd scope cgroup (OTHER_CGNAME) to
# systemd scope cgroup (SYSTEMD_CGNAME). Migration should fail due to
# the incorrect destination cgroup path that gets constructed, without
# the systemd slice/scope when ignore_systemd=True)
try:
Cgroup.classify(config, CONTROLLER, SYSTEMD_CGNAME, OTHER_PIDS, ignore_systemd=True)
except RunError as re:
err_str = 'Error changing group of pid {}: Cgroup does not exist'.format(OTHER_PIDS[0])
if re.stderr != err_str:
raise re
else:
result = consts.TEST_FAILED
cause = 'Changing group of pid {} erroneously succeeded'.format(OTHER_PIDS[0])
# classify a task from the systemd scope cgroup (SYSTEMD_CGNAME) to
# non-systemd scope cgroup (OTHER_CGNAME). Migration should fail due
# to the incorrect destination cgroup path that gets constructed, with
# the systemd slice/scope when ignore_systemd=False)
try:
Cgroup.classify(config, CONTROLLER, OTHER_CGNAME, SYSTEMD_PIDS[1])
except RunError as re:
err_str = 'Error changing group of pid {}: Cgroup does not exist'.format(
SYSTEMD_PIDS[1])
if re.stderr != err_str:
raise re
else:
result = consts.TEST_FAILED
tmp_cause = 'Changing group of pid {} erroneously succeeded'.format(SYSTEMD_PIDS[1])
cause = '\n'.join(filter(None, [cause, tmp_cause]))
# classify the task from the non-systemd scope cgroup to systemd scope cgroup.
Cgroup.classify(config, CONTROLLER, SYSTEMD_CGNAME, OTHER_PIDS)
return result, cause
def teardown(config):
global SYSTEMD_PIDS, OTHER_PIDS
Process.kill(config, SYSTEMD_PIDS)
Process.kill(config, OTHER_PIDS)
# We need a pause, so that cgroup.procs gets updated.
time.sleep(1)
os.remove(CONFIG_FILE_NAME)
try:
Cgroup.delete(config, CONTROLLER, cgname=SLICE, ignore_systemd=True)
except RunError as re:
if 'No such file or directory' not in re.stderr:
raise re
# Incase the error occurs before the creation of OTHER_CGNAME,
# let's ignore the exception
try:
Cgroup.delete(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True)
except RunError as re:
if 'No such file or directory' not in re.stderr:
raise re
def METHOD_NAME(config):
[result, cause] = prereqs(config)
if result != consts.TEST_PASSED:
return [result, cause]
[result, cause] = setup(config)
if result != consts.TEST_PASSED:
return [result, cause]
try:
[result, cause] = test(config)
finally:
teardown(config)
return [result, cause]
if __name__ == '__main__':
config = ftests.parse_args()
# this test was invoked directly. run only it
config.args.num = int(os.path.basename(__file__).split('-')[0])
sys.exit(ftests.METHOD_NAME(config))
# vim: set et ts=4 sw=4:
|
2,523 |
test is cors
|
from datetime import datetime
from unittest.mock import Mock, patch
from actstream.actions import follow
from actstream.models import Action, Follow
from actstream.signals import action
from django.contrib.contenttypes.models import ContentType
from rest_framework.test import APIClient
from kitsune.notifications import api
from kitsune.notifications import tasks as notification_tasks
from kitsune.notifications.models import Notification, RealtimeRegistration
from kitsune.questions.tests import AnswerFactory, QuestionFactory
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.templatetags.jinja_helpers import profile_avatar
from kitsune.users.tests import UserFactory
class TestPushNotificationRegistrationSerializer(TestCase):
def setUp(self):
self.user = UserFactory()
self.profile = self.user.profile
self.request = Mock()
self.request.user = self.user
self.context = {
"request": self.request,
}
self.data = {
"creator": self.user,
"push_url": "https://example.com/notifications/123123123",
}
def test_automatic_creator(self):
del self.data["creator"]
serializer = api.PushNotificationRegistrationSerializer(
context=self.context, data=self.data
)
assert serializer.is_valid()
self.assertEqual(serializer.errors, {})
obj = serializer.save()
self.assertEqual(obj.creator, self.user)
def test_cant_register_for_other_users(self):
wrong_user = UserFactory()
self.data["creator"] = wrong_user
serializer = api.PushNotificationRegistrationSerializer(
context=self.context, data=self.data
)
assert not serializer.is_valid()
self.assertEqual(
serializer.errors,
{
"creator": ["Can't register push notifications for another user."],
},
)
class TestNotificationSerializer(TestCase):
def test_correct_fields(self):
follower = UserFactory()
followed = UserFactory()
q = QuestionFactory(creator=followed)
# The above might make follows, which this test isn't about. Clear them out.
Follow.objects.all().delete()
follow(follower, followed)
# Make a new action for the above. This should trigger notifications
action.send(followed, verb="asked", action_object=q)
act = Action.objects.order_by("-id")[0]
notification = Notification.objects.get(action=act)
serializer = api.NotificationSerializer(instance=notification)
self.assertEqual(serializer.data["is_read"], False)
self.assertEqual(
serializer.data["actor"],
{
"type": "user",
"username": followed.username,
"display_name": followed.profile.name,
"avatar": profile_avatar(followed),
},
)
self.assertEqual(serializer.data["verb"], "asked")
self.assertEqual(serializer.data["action_object"]["type"], "question")
self.assertEqual(serializer.data["action_object"]["id"], q.id)
self.assertEqual(serializer.data["target"], None)
# Check that the serialized data is in the correct format. If it is
# not, this will throw an exception.
datetime.strptime(serializer.data["timestamp"], "%Y-%m-%dT%H:%M:%SZ")
class TestNotificationViewSet(TestCase):
def setUp(self):
self.client = APIClient()
self.follower = UserFactory()
self.followed = UserFactory()
self.question = QuestionFactory(creator=self.followed)
# The above might make follows, which this test isn't about. Clear them out.
Follow.objects.all().delete()
follow(self.follower, self.followed)
def _makeNotification(self, is_read=False):
# Make a new action. This should trigger notifications
action.send(self.followed, verb="asked", action_object=self.question)
act = Action.objects.order_by("-id")[0]
n = Notification.objects.get(action=act)
if is_read:
n.is_read = True
n.save()
return n
def test_mark_read(self):
n = self._makeNotification()
self.client.force_authenticate(user=self.follower)
res = self.client.post(reverse("notification-mark-read", args=[n.id]))
self.assertEqual(res.status_code, 204)
n = Notification.objects.get(id=n.id)
self.assertEqual(n.is_read, True)
def test_mark_unread(self):
n = self._makeNotification(is_read=True)
self.client.force_authenticate(user=self.follower)
res = self.client.post(reverse("notification-mark-unread", args=[n.id]))
self.assertEqual(res.status_code, 204)
n = Notification.objects.get(id=n.id)
self.assertEqual(n.is_read, False)
def test_filter_is_read_false(self):
n = self._makeNotification(is_read=False)
self._makeNotification(is_read=True)
self.client.force_authenticate(user=self.follower)
res = self.client.get(reverse("notification-list") + "?is_read=0")
self.assertEqual(res.status_code, 200)
self.assertEqual([d["id"] for d in res.data], [n.id])
def test_filter_is_read_true(self):
self._makeNotification(is_read=False)
n = self._makeNotification(is_read=True)
self.client.force_authenticate(user=self.follower)
res = self.client.get(reverse("notification-list") + "?is_read=1")
self.assertEqual(res.status_code, 200)
self.assertEqual([d["id"] for d in res.data], [n.id])
@patch.object(notification_tasks, "requests")
class RealtimeViewSet(TestCase):
def setUp(self):
self.client = APIClient()
def test_updates_subview(self, requests):
requests.put.return_value.status_code = 200
u = UserFactory()
q = QuestionFactory(content="asdf")
ct = ContentType.objects.get_for_model(q)
rt = RealtimeRegistration.objects.create(
creator=u, content_type=ct, object_id=q.id, endpoint="http://example.com/"
)
# Some of the above may have created actions, which we don't care about.
Action.objects.all().delete()
# This should create an action that will trigger the above.
a = AnswerFactory(question=q, content="asdf")
self.client.force_authenticate(user=u)
url = reverse("realtimeregistration-updates", args=[rt.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data), 1)
act = res.data[0]
self.assertEqual(act["actor"]["username"], a.creator.username)
self.assertEqual(act["target"]["content"], q.content_parsed)
self.assertEqual(act["action_object"]["content"], a.content_parsed)
def METHOD_NAME(self, requests):
u = UserFactory()
q = QuestionFactory()
self.client.force_authenticate(user=u)
url = reverse("realtimeregistration-list")
data = {
"content_type": "question",
"object_id": q.id,
"endpoint": "http://example.com",
}
res = self.client.post(url, data, HTTP_ORIGIN="http://example.com")
self.assertEqual(res.status_code, 201)
self.assertEqual(res["Access-Control-Allow-Origin"], "*")
|
2,524 |
remove data loading plan
|
import os
import re
from cache import cached
from db import node_database
from flask import request, g
from middlewares import middleware, medical_folder_dataset, common
from schemas import ValidateMedicalFolderReferenceCSV, \
ValidateMedicalFolderRoot, \
ValidateSubjectsHasAllModalities, \
ValidateMedicalFolderAddRequest, \
ValidateDataLoadingPlanAddRequest, \
ValidateDataLoadingPlanDeleteRequest, \
PreviewDatasetRequest
from utils import error, validate_request_data, response
from fedbiomed.common.data import MedicalFolderController, MapperBlock, MedicalFolderLoadingBlockTypes
from fedbiomed.common.exceptions import FedbiomedError
from fedbiomed.node.dataset_manager import DatasetManager
from . import api
from config import config
dataset_manager = DatasetManager()
# Medical Folder Controller
mf_controller = MedicalFolderController()
# Path to write and read the datafiles
DATA_PATH_RW = config['DATA_PATH_RW']
# Database table (default datasets table of TinyDB) and query object
table = node_database.table_datasets()
query = node_database.query()
@api.route('/datasets/medical-folder-dataset/validate-reference-column', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderReferenceCSV)
@middleware(middlewares=[medical_folder_dataset.read_medical_folder_reference,
medical_folder_dataset.validate_available_subjects])
def validate_reference_csv_column():
""" Validate selected reference CSV and column shows folder names """
subjects = g.available_subjects
return response({"valid": True, "subjects": subjects}), 200
@api.route('/datasets/medical-folder-dataset/validate-root', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderRoot)
@middleware(middlewares=[medical_folder_dataset.validate_medical_folder_root])
def validate_root_path():
"""Validates MedicalFolder Dataset root path"""
return response(data={"valid": True, "modalities": g.modalities}), 200
@api.route('/datasets/medical-folder-dataset/validate-all-modalities', methods=['POST'])
@validate_request_data(schema=ValidateSubjectsHasAllModalities)
@middleware(middlewares=[medical_folder_dataset.validate_all_modalities])
def validate_subjects_has_all_modalities():
"""Validates MedicalFolder Dataset has subjects with all modalities"""
return response(data={"valid": True, "subjects": g.subjects}), 200
@api.route('/datasets/medical-folder-dataset/add', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderAddRequest)
@middleware(middlewares=[common.check_tags_already_registered,
medical_folder_dataset.load_dlp,
medical_folder_dataset.validate_medical_folder_root,
medical_folder_dataset.read_medical_folder_reference,
medical_folder_dataset.validate_available_subjects])
def add_medical_folder_dataset():
""" Adds MedicalFolder dataset into database of NODE """
# Request object as JSON
req = request.json
data_path_save = os.path.join(config['DATA_PATH_SAVE'], *req['medical_folder_root'])
if req["reference_csv_path"] is None:
dataset_parameters = {}
else:
reference_csv = os.path.join(config['DATA_PATH_SAVE'], *req["reference_csv_path"])
dataset_parameters = {"index_col": req["index_col"],
"tabular_file": reference_csv}
try:
dataset_id = dataset_manager.add_database(
name=req["name"],
data_type="medical-folder",
tags=req['tags'],
description=req['desc'],
path=data_path_save,
dataset_parameters=dataset_parameters,
data_loading_plan=g.dlp,
save_dlp=False)
except FedbiomedError as e:
return error(str(e)), 400
except Exception as e:
return error("Unexpected error: " + str(e)), 400
# Get saved dataset document
res = table.get(query.dataset_id == dataset_id)
if not res:
return error("Medical Folder Dataset is not properly deployed. "
"Please try again."), 400
return response(data=res), 200
@api.route('/datasets/medical-folder-dataset/add-dlp', methods=['POST'])
@validate_request_data(schema=ValidateDataLoadingPlanAddRequest)
@middleware(middlewares=[medical_folder_dataset.create_dlp])
def add_data_loading_plan():
"""Adds DataLoadingPlan into database of NODE """
try:
dlp_id = dataset_manager.save_data_loading_plan(g.dlp)
except FedbiomedError as e:
return error(f"Cannot save data loading plan for customizations: {e}"), 400
if dlp_id is None:
return error("Cannot save data loading plan for customizations: no DLP id"), 400
return response(data=dlp_id), 200
@api.route('/datasets/medical-folder-dataset/delete-dlp', methods=['POST'])
@validate_request_data(schema=ValidateDataLoadingPlanDeleteRequest)
def METHOD_NAME():
"""Remove DataLoadingPlan from database of NODE """
# Request object as JSON
req = request.json
try:
dataset_manager.remove_dlp_by_id(req['dlp_id'], True)
except FedbiomedError as e:
return error(f"Cannot remove data loading plan for customizations: {e}"), 400
return response(data=True), 200
@api.route('/datasets/medical-folder-dataset/preview', methods=['POST'])
@validate_request_data(schema=PreviewDatasetRequest)
@cached(key="dataset_id", prefix="medical_folder_dataset-preview", timeout=600)
def medical_folder_preview():
"""Gets preview of MedicalFolder dataset by providing a table of subject and available modalities"""
# Request object as JSON
req = request.json
dataset = table.get(query.dataset_id == req['dataset_id'])
# Extract data path where the files are saved in the local GUI repository
rexp = re.match('^' + config['DATA_PATH_SAVE'], dataset['path'])
data_path = dataset['path'].replace(rexp.group(0), config['DATA_PATH_RW'])
mf_controller.root = data_path
if "index_col" in dataset["dataset_parameters"]:
# Extract data path where the files are saved in the local GUI repository
rexp = re.match('^' + config['DATA_PATH_SAVE'], dataset['path'])
reference_path = dataset["dataset_parameters"]["tabular_file"].replace(rexp.group(0),
config['DATA_PATH_RW'])
reference_csv = mf_controller.read_demographics(
path=reference_path,
index_col=dataset["dataset_parameters"]["index_col"]
)
subject_table = mf_controller.subject_modality_status(index=reference_csv.index)
else:
subject_table = mf_controller.subject_modality_status()
modalities, _ = mf_controller.modalities()
data = {
"subject_table": subject_table,
"modalities": modalities,
}
return response(data=data), 200
@api.route('/datasets/medical-folder-dataset/default-modalities', methods=['GET'])
def get_default_modalities():
formatted_modalities = [{'value': name, 'label': name} for name in MedicalFolderController.default_modality_names]
return response(data={'default_modalities': formatted_modalities}), 200
|
2,525 |
output child post
|
import typing as t
from ast import literal_eval
from ast import parse
from itertools import chain
from itertools import islice
from types import GeneratorType
from . import nodes
from .compiler import CodeGenerator
from .compiler import Frame
from .compiler import has_safe_repr
from .environment import Environment
from .environment import Template
def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
the string is returned.
:param values: Iterable of outputs to concatenate.
"""
head = list(islice(values, 2))
if not head:
return None
if len(head) == 1:
raw = head[0]
if not isinstance(raw, str):
return raw
else:
if isinstance(values, GeneratorType):
values = chain(head, values)
raw = "".join([str(v) for v in values])
try:
return literal_eval(
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
# from the given string. For backwards compatibility we need to
# parse the string ourselves without removing leading spaces/tabs.
parse(raw, mode="eval")
)
except (ValueError, SyntaxError, MemoryError):
return raw
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
``str()`` around output nodes.
"""
@staticmethod
def _default_finalize(value: t.Any) -> t.Any:
return value
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
return repr("".join([str(v) for v in group]))
def _output_child_to_const(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> t.Any:
const = node.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
if isinstance(node, nodes.TemplateData):
return const
return finalize.const(const) # type: ignore
def _output_child_pre(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(finalize.src)
def METHOD_NAME(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(")")
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
concat = staticmethod(native_concat) # type: ignore
class NativeTemplate(Template):
environment_class = NativeEnvironment
def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
self.root_render_func(ctx) # type: ignore
)
except Exception:
return self.environment.handle_exception()
async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if not self.environment.is_async:
raise RuntimeError(
"The environment was not created with async mode enabled."
)
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
[n async for n in self.root_render_func(ctx)] # type: ignore
)
except Exception:
return self.environment.handle_exception()
NativeEnvironment.template_class = NativeTemplate
|
2,526 |
rgb to hex
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import numpy as np
###############################################################################
# Utility functions
def _check_color_dim(val):
"""Ensure val is Nx(n_col), usually Nx3"""
val = np.atleast_2d(val)
if val.shape[1] not in (3, 4):
raise RuntimeError('Value must have second dimension of size 3 or 4')
return val, val.shape[1]
###############################################################################
# RGB<->HEX conversion
def _hex_to_rgba(hexs):
"""Convert hex to rgba, permitting alpha values in hex"""
hexs = np.atleast_1d(np.array(hexs, '|U9'))
out = np.ones((len(hexs), 4), np.float32)
for hi, h in enumerate(hexs):
assert isinstance(h, str)
off = 1 if h[0] == '#' else 0
assert len(h) in (6+off, 8+off)
e = (len(h)-off) // 2
out[hi, :e] = [int(h[i:i+2], 16) / 255.
for i in range(off, len(h), 2)]
return out
def METHOD_NAME(rgbs):
"""Convert rgb to hex triplet"""
rgbs, n_dim = _check_color_dim(rgbs)
return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8))
for rgb in rgbs], '|U7')
###############################################################################
# RGB<->HSV conversion
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs
def _hsv_to_rgb(hsvs):
"""Convert Nx3 or Nx4 hsv to rgb"""
hsvs, n_dim = _check_color_dim(hsvs)
# In principle, we *might* be able to vectorize this, but might as well
# wait until a compelling use case appears
rgbs = list()
for hsv in hsvs:
c = hsv[1] * hsv[2]
m = hsv[2] - c
hp = hsv[0] / 60
x = c * (1 - abs(hp % 2 - 1))
if 0 <= hp < 1:
r, g, b = c, x, 0
elif hp < 2:
r, g, b = x, c, 0
elif hp < 3:
r, g, b = 0, c, x
elif hp < 4:
r, g, b = 0, x, c
elif hp < 5:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
rgb = [r + m, g + m, b + m]
rgbs.append(rgb)
rgbs = np.array(rgbs, dtype=np.float32)
if n_dim == 4:
rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1)
return rgbs
###############################################################################
# RGB<->CIELab conversion
# These numbers are adapted from MIT-licensed MATLAB code for
# Lab<->RGB conversion. They provide an XYZ<->RGB conversion matrices,
# w/D65 white point normalization built in.
# _rgb2xyz = np.array([[0.412453, 0.357580, 0.180423],
# [0.212671, 0.715160, 0.072169],
# [0.019334, 0.119193, 0.950227]])
# _white_norm = np.array([0.950456, 1.0, 1.088754])
# _rgb2xyz /= _white_norm[:, np.newaxis]
# _rgb2xyz_norm = _rgb2xyz.T
_rgb2xyz_norm = np.array([[0.43395276, 0.212671, 0.01775791],
[0.37621941, 0.71516, 0.10947652],
[0.18982783, 0.072169, 0.87276557]])
# _xyz2rgb = np.array([[3.240479, -1.537150, -0.498535],
# [-0.969256, 1.875992, 0.041556],
# [0.055648, -0.204043, 1.057311]])
# _white_norm = np.array([0.950456, 1., 1.088754])
# _xyz2rgb *= _white_norm[np.newaxis, :]
_xyz2rgb_norm = np.array([[3.07993271, -1.53715, -0.54278198],
[-0.92123518, 1.875992, 0.04524426],
[0.05289098, -0.204043, 1.15115158]])
def _rgb_to_lab(rgbs):
rgbs, n_dim = _check_color_dim(rgbs)
# convert RGB->XYZ
xyz = rgbs[:, :3].copy() # a misnomer for now but will end up being XYZ
over = xyz > 0.04045
xyz[over] = ((xyz[over] + 0.055) / 1.055) ** 2.4
xyz[~over] /= 12.92
xyz = np.dot(xyz, _rgb2xyz_norm)
over = xyz > 0.008856
xyz[over] = xyz[over] ** (1. / 3.)
xyz[~over] = 7.787 * xyz[~over] + 0.13793103448275862
# Convert XYZ->LAB
L = (116. * xyz[:, 1]) - 16
a = 500 * (xyz[:, 0] - xyz[:, 1])
b = 200 * (xyz[:, 1] - xyz[:, 2])
labs = [L, a, b]
# Append alpha if necessary
if n_dim == 4:
labs.append(np.atleast1d(rgbs[:, 3]))
labs = np.array(labs, order='F').T # Becomes 'C' order b/c of .T
return labs
def _lab_to_rgb(labs):
"""Convert Nx3 or Nx4 lab to rgb"""
# adapted from BSD-licensed work in MATLAB by Mark Ruzon
# Based on ITU-R Recommendation BT.709 using the D65
labs, n_dim = _check_color_dim(labs)
# Convert Lab->XYZ (silly indexing used to preserve dimensionality)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z])) # 3xN
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
# Convert XYZ->LAB
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs
|
2,527 |
whiteout section
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2013 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Maintains an overview of the structure of a Document.
"""
import re
from PyQt5.QtCore import QSettings
import app
import plugin
import lydocument
import ly.document
# default outline patterns that are ignored in comments
default_outline_patterns = [
r"(?P<title>\\(score|book|bookpart))\b",
r"^\\(paper|layout|header)\b",
r"\\(new|context)\s+[A-Z]\w+",
r"^[a-zA-Z]+\s*=",
r"^<<",
r"^\{",
r"^\\relative([ \t]+\w+[',]*)?",
]
# default outline patterns that are matched also in comments
default_outline_patterns_comments = [
r"(?P<title>BEGIN[^\n]*)[ \t]*$",
r"\b(?P<alert>(FIXME|HACK|XXX+)\b\W*\w+)",
]
# cache the outline regexp
_outline_re = None
_outline_re_comments = None
def outline_re(comments):
"""Return the expression to look for document outline items.
If comments is True it is used to search in the whole document,
if it is False comments are excluded."""
v = '_outline_re'+('_comments' if comments else '')
if globals()[v] is None:
globals()[v] = create_outline_re(comments)
return globals()[v]
def _reset_outline_re():
global _outline_re
global _outline_re_comments
_outline_re = None
_outline_re_comments = None
app.settingsChanged.connect(_reset_outline_re, -999)
def create_outline_re(comments):
"""Create and return the expression to look for document outline items.
If comments is True it is used to search in the whole document,
if it is False comments are excluded."""
try:
if comments:
rx = QSettings().value("documentstructure/outline_patterns_comments",
default_outline_patterns_comments, str)
else:
rx = QSettings().value("documentstructure/outline_patterns",
default_outline_patterns, str)
except TypeError:
rx = []
# suffix duplicate named groups with a number
groups = {}
new_rx = []
for e in rx:
try:
c = re.compile(e)
except re.error:
continue
if c.groupindex:
for name in c.groupindex:
if name in groups:
groups[name] += 1
new_name = name + format(groups[name])
e = e.replace(f"(?P<{name}>", f"(?P<{new_name}>")
else:
groups[name] = 0
new_rx.append(e)
rx = '|'.join(new_rx)
return re.compile(rx, re.MULTILINE | re.UNICODE)
class DocumentStructure(plugin.DocumentPlugin):
def __init__(self, document):
self._outline = None
def invalidate(self):
"""Called when the document changes or the settings are changed."""
self._outline = None
app.settingsChanged.disconnect(self.invalidate)
self.document().contentsChanged.disconnect(self.invalidate)
def outline(self):
"""Return the document outline as a series of match objects."""
if self._outline is None:
# match patterns excluding comments
active_code = self.remove_comments()
outline_list = list(outline_re(False).finditer(active_code))
# match patterns including comments
outline_list_comments = list(outline_re(True).finditer(self.document().toPlainText()))
# merge lists and sort by start position
self._outline = outline_list + outline_list_comments
self._outline.sort(key=lambda match: match.start())
self.document().contentsChanged.connect(self.invalidate)
app.settingsChanged.connect(self.invalidate, -999)
return self._outline
def remove_comments(self):
"""Remove Lilypond comments from text"""
def METHOD_NAME(cursor, start, end):
spaces = ''.join(' ' for x in range(start, end))
with cursor.document as doc:
doc[start:end] = spaces
doc = ly.document.Document(self.document().toPlainText())
cursor = lydocument.Cursor(doc)
source = ly.document.Source(cursor, True, tokens_with_position=True)
start = 0
for token in source:
if isinstance(token, ly.lex.BlockCommentStart):
start = token.pos
elif isinstance(token, ly.lex.BlockCommentEnd):
if start:
METHOD_NAME(cursor, start, token.end)
start = 0
elif isinstance(token, ly.lex.Comment):
METHOD_NAME(cursor, token.pos, token.end)
return cursor.document.plaintext()
|
2,528 |
load partition data mnist
|
import json
import os
import numpy as np
import wget
from ...ml.engine import ml_engine_adapter
cwd = os.getcwd()
import zipfile
from ...constants import FEDML_DATA_MNIST_URL
import logging
def download_mnist(data_cache_dir):
if not os.path.exists(data_cache_dir):
os.makedirs(data_cache_dir, exist_ok=True)
file_path = os.path.join(data_cache_dir, "MNIST.zip")
logging.info(file_path)
# Download the file (if we haven't already)
if not os.path.exists(file_path):
wget.download(FEDML_DATA_MNIST_URL, out=file_path)
file_extracted_path = os.path.join(data_cache_dir, "MNIST")
if not os.path.exists(file_extracted_path):
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(data_cache_dir)
def read_data(train_data_dir, test_data_dir):
"""parses data in given train and test data directories
assumes:
- the data in the input directories are .json files with
keys 'users' and 'user_data'
- the set of train set users is the same as the set of test set users
Return:
clients: list of non-unique client ids
groups: list of group ids; empty list if none found
train_data: dictionary of train data
test_data: dictionary of test data
"""
clients = []
groups = []
train_data = {}
test_data = {}
train_files = os.listdir(train_data_dir)
train_files = [f for f in train_files if f.endswith(".json")]
for f in train_files:
file_path = os.path.join(train_data_dir, f)
with open(file_path, "r") as inf:
cdata = json.load(inf)
clients.extend(cdata["users"])
if "hierarchies" in cdata:
groups.extend(cdata["hierarchies"])
train_data.update(cdata["user_data"])
test_files = os.listdir(test_data_dir)
test_files = [f for f in test_files if f.endswith(".json")]
for f in test_files:
file_path = os.path.join(test_data_dir, f)
with open(file_path, "r") as inf:
cdata = json.load(inf)
test_data.update(cdata["user_data"])
clients = sorted(cdata["users"])
return clients, groups, train_data, test_data
def batch_data(args, data, batch_size):
"""
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
"""
data_x = data["x"]
data_y = data["y"]
# randomly shuffle data
np.random.seed(100)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
batch_data = list()
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i : i + batch_size]
batched_y = data_y[i : i + batch_size]
batched_x, batched_y = ml_engine_adapter.convert_numpy_to_ml_engine_data_format(args, batched_x, batched_y)
batch_data.append((batched_x, batched_y))
return batch_data
def load_partition_data_mnist_by_device_id(batch_size, device_id, train_path="MNIST_mobile", test_path="MNIST_mobile"):
train_path += os.path.join("/", device_id, "train")
test_path += os.path.join("/", device_id, "test")
return METHOD_NAME(batch_size, train_path, test_path)
def METHOD_NAME(
args, batch_size, train_path=os.path.join(os.getcwd(), "MNIST", "train"),
test_path=os.path.join(os.getcwd(), "MNIST", "test")
):
users, groups, train_data, test_data = read_data(train_path, test_path)
if len(groups) == 0:
groups = [None for _ in users]
train_data_num = 0
test_data_num = 0
train_data_local_dict = dict()
test_data_local_dict = dict()
train_data_local_num_dict = dict()
train_data_global = list()
test_data_global = list()
client_idx = 0
logging.info("loading data...")
for u, g in zip(users, groups):
user_train_data_num = len(train_data[u]["x"])
user_test_data_num = len(test_data[u]["x"])
train_data_num += user_train_data_num
test_data_num += user_test_data_num
train_data_local_num_dict[client_idx] = user_train_data_num
# transform to batches
train_batch = batch_data(args, train_data[u], batch_size)
test_batch = batch_data(args, test_data[u], batch_size)
# index using client index
train_data_local_dict[client_idx] = train_batch
test_data_local_dict[client_idx] = test_batch
train_data_global += train_batch
test_data_global += test_batch
client_idx += 1
logging.info("finished the loading data")
client_num = client_idx
class_num = 10
return (
client_num,
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
)
|
2,529 |
name op
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the arg_scope used for scoping layers arguments.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use tf.contrib.framework.arg_scope:
```
from third_party.tensorflow.contrib.layers.python import layers
arg_scope = tf.contrib.framework.arg_scope
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)):
net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
The first call to conv2d will behave as follows:
layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv1')
The second call to conv2d will also use the arg_scope's default for padding:
layers.conv2d(inputs, 256, [5, 5], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05), scope='conv2')
Example of how to reuse an arg_scope:
```
with arg_scope([layers.conv2d], padding='SAME',
initializer=layers.variance_scaling_initializer(),
regularizer=layers.l2_regularizer(0.05)) as sc:
net = layers.conv2d(net, 256, [5, 5], scope='conv1')
....
with arg_scope(sc):
net = layers.conv2d(net, 256, [5, 5], scope='conv2')
```
Example of how to use tf.contrib.framework.add_arg_scope to enable your
function to be called within an arg_scope later:
@tf.contrib.framework.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
__all__ = [
'arg_scope', 'add_arg_scope', 'current_arg_scope', 'has_arg_scope',
'arg_scoped_arguments', 'arg_scope_func_key'
]
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
else:
_ARGSTACK.append({})
return _ARGSTACK
def current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def arg_scope_func_key(op):
return getattr(op, '_key_op', str(op))
def METHOD_NAME(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length:func.__code__.co_argcount]
def _add_op(op):
key_op = arg_scope_func_key(op)
_DECORATED_OPS[key_op] = _kwarg_names(op)
@tf_contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError('When attempting to re-use a scope by suppling a'
'dictionary, kwargs must be empty.')
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError('list_ops_or_scope must either be a list/tuple or reused '
'scope (i.e. dict)')
try:
current_scope = current_arg_scope().copy()
for op in list_ops_or_scope:
key = arg_scope_func_key(op)
if not has_arg_scope(op):
raise ValueError('%s is not decorated with @add_arg_scope',
METHOD_NAME(op))
if key in current_scope:
current_kwargs = current_scope[key].copy()
current_kwargs.update(kwargs)
current_scope[key] = current_kwargs
else:
current_scope[key] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
def func_with_args(*args, **kwargs):
current_scope = current_arg_scope()
current_args = kwargs
key_func = arg_scope_func_key(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, '_key_op', arg_scope_func_key(func))
return tf_decorator.make_decorator(func, func_with_args)
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return arg_scope_func_key(func) in _DECORATED_OPS
def arg_scoped_arguments(func):
"""Returns the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[arg_scope_func_key(func)]
|
2,530 |
unwrap
|
from typing import Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.optim import Optimizer
class OptimizerWrapper:
"""
A standard interface for optimizers wrapped by the Booster.
Args:
optim (Optimizer): The optimizer to be wrapped.
"""
def __init__(self, optim: Optimizer):
self.optim = optim
@property
def parameters(self):
params = []
for group in self.param_groups:
params += group['params']
return params
@property
def param_groups(self):
return self.optim.param_groups
@property
def defaults(self):
return self.optim.defaults
def add_param_group(self, *args, **kwargs):
return self.optim.add_param_group(*args, **kwargs)
def step(self, *args, **kwargs):
"""
Performs a single optimization step.
"""
return self.optim.step(*args, **kwargs)
def zero_grad(self, *args, **kwargs):
"""
Clears the gradients of all optimized `torch.Tensor`.
"""
self.optim.zero_grad(*args, **kwargs)
def backward(self, loss: Tensor, *args, **kwargs):
"""
Performs a backward pass on the loss.
"""
loss.backward(*args, **kwargs)
def backward_by_grad(self, tensor: Tensor, grad: Tensor):
torch.autograd.backward(tensor, grad)
def state_dict(self):
"""
Returns the optimizer state.
"""
return self.optim.state_dict()
def load_state_dict(self, *args, **kwargs):
"""
Loads the optimizer state.
"""
self.optim.load_state_dict(*args, **kwargs)
def clip_grad_by_value(self, clip_value: float, *args, **kwargs) -> None:
"""
Clips gradient of an iterable of parameters at specified min and max values.
Args:
clip_value (float or int): maximum allowed value of the gradients. Gradients are clipped in the range
Note:
In PyTorch Torch 2.0 and above, you can pass in foreach=True as kwargs to clip_grad_value_ to use the
faster implementation. Please refer to the PyTorch documentation for more details.
"""
nn.utils.clip_grad_value_(self.parameters, clip_value, *args, **kwargs)
def clip_grad_by_norm(self,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = False,
*args,
**kwargs) -> Tensor:
"""
Clips gradient norm of an iterable of parameters.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm.
error_if_nonfinite (bool): if True, an error is raised if the total norm is non-finite. Default: False
Note:
In PyTorch Torch 2.0 and above, you can pass in foreach=True as kwargs to clip_grad_norm_ to use the
faster implementation. Please refer to the PyTorch documentation for more details.
"""
norm = nn.utils.clip_grad_norm_(self.parameters, max_norm, norm_type, error_if_nonfinite, *args, **kwargs)
return norm
def scale_loss(self, loss: Tensor):
"""
Scales the loss for mixed precision training.
Note: Only available for optimizers with mixed precision training.
Args:
loss (Tensor): The loss to be scaled.
"""
raise NotImplementedError(
"The method scale_loss is only available for optimizers with mixed precision training")
def unscale_grad(self):
"""
Unscale the gradients for mixed precision training.
Note: Only available for optimizers with mixed precision training.
"""
raise NotImplementedError(
"The method unscale_grad is only available for optimizers with mixed precision training")
def METHOD_NAME(self):
"""
Unwrap the optimizer for checkpoint saving/loading.
"""
return self.optim
|
2,531 |
run validations
|
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, Extra, Field, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
class Config:
allow_mutation = False
reader: Optional[Mapping[str, Any]]
writer: Optional[Mapping[str, Any]]
class ExtraMetric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class MetricPatterns(BaseModel):
class Config:
allow_mutation = False
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
class Metric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Proxy(BaseModel):
class Config:
allow_mutation = False
http: Optional[str]
https: Optional[str]
no_proxy: Optional[Sequence[str]]
class ShareLabel(BaseModel):
class Config:
allow_mutation = False
labels: Optional[Sequence[str]]
match: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
allow_redirects: Optional[bool]
auth_token: Optional[AuthToken]
auth_type: Optional[str]
aws_host: Optional[str]
aws_region: Optional[str]
aws_service: Optional[str]
cache_metric_wildcards: Optional[bool]
cache_shared_labels: Optional[bool]
collect_counters_with_distributions: Optional[bool]
collect_histogram_buckets: Optional[bool]
connect_timeout: Optional[float]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
enable_health_service_check: Optional[bool]
exclude_labels: Optional[Sequence[str]]
exclude_metrics: Optional[Sequence[str]]
exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]]
extra_headers: Optional[Mapping[str, Any]]
extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]]
headers: Optional[Mapping[str, Any]]
histogram_buckets_as_distributions: Optional[bool]
hostname_format: Optional[str]
hostname_label: Optional[str]
ignore_tags: Optional[Sequence[str]]
include_labels: Optional[Sequence[str]]
kerberos_auth: Optional[str]
kerberos_cache: Optional[str]
kerberos_delegate: Optional[bool]
kerberos_force_initiate: Optional[bool]
kerberos_hostname: Optional[str]
kerberos_keytab: Optional[str]
kerberos_principal: Optional[str]
log_requests: Optional[bool]
metric_patterns: Optional[MetricPatterns]
metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]]
min_collection_interval: Optional[float]
namespace: Optional[str] = Field(None, regex='\\w*')
non_cumulative_histogram_buckets: Optional[bool]
ntlm_domain: Optional[str]
openmetrics_endpoint: str
password: Optional[str]
persist_connections: Optional[bool]
proxy: Optional[Proxy]
raw_line_filters: Optional[Sequence[str]]
raw_metric_prefix: Optional[str]
read_timeout: Optional[float]
rename_labels: Optional[Mapping[str, Any]]
request_size: Optional[float]
service: Optional[str]
share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]]
skip_proxy: Optional[bool]
tag_by_endpoint: Optional[bool]
tags: Optional[Sequence[str]]
telemetry: Optional[bool]
timeout: Optional[float]
tls_ca_cert: Optional[str]
tls_cert: Optional[str]
tls_ignore_warning: Optional[bool]
tls_private_key: Optional[str]
tls_protocols_allowed: Optional[Sequence[str]]
tls_use_host_header: Optional[bool]
tls_verify: Optional[bool]
use_latest_spec: Optional[bool]
use_legacy_auth_encoding: Optional[bool]
use_process_start_time: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def METHOD_NAME(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
|
2,532 |
remove embedded layers
|
import logging
import os
from pathlib import Path
from core.models import ProjectMapUrlAlias
from core.signals import (execute_search_on_models, load_layer_actions,
pre_delete_project, pre_update_project)
from django.conf import settings
from django.contrib.auth.signals import user_logged_out
from django.core.cache import caches
from django.db.models.signals import (post_delete, post_save, pre_delete,
pre_save)
from django.dispatch import receiver
from django.template import loader
from qgis.core import QgsProject
from .models import ColumnAcl, Layer, Project, SessionTokenFilter
from .searches import ProjectSearch
from .signals import post_save_qdjango_project_file
from .views import QdjangoProjectListView, QdjangoProjectUpdateView
logger = logging.getLogger('django.request')
@receiver(post_delete, sender=Project)
def delete_project_file(sender, **kwargs):
"""
Perform delete project file from 'projects' media folder
"""
instance = kwargs['instance']
try:
os.remove(instance.qgis_file.path)
except Exception as e:
logger.error(e)
if 'qdjango' in settings.CACHES:
caches['qdjango'].delete(
settings.QDJANGO_PRJ_CACHE_KEY.format(instance.pk))
# delete ProjectMapUrlAlias related instance
ProjectMapUrlAlias.objects.filter(
app_name='qdjango', project_id=instance.pk).delete()
@receiver(post_save, sender=Project)
def delete_cache_project_settings(sender, **kwargs):
"""
Perform deleting of key caches for getprojectsettings response.
"""
if 'qdjango' in settings.CACHES:
caches['qdjango'].delete(
settings.QDJANGO_PRJ_CACHE_KEY.format(kwargs['instance'].pk))
instance = kwargs['instance']
@receiver(post_delete, sender=Layer)
def METHOD_NAME(sender, **kwargs):
"""
Checks for layers embedded from the deleted layer,
deletes them accordingly and remove the whole project if empty
"""
layer = kwargs['instance']
# If it is embedded make sure it is removed from the project
# because it may be a cascade
if layer.parent_project is not None:
project = QgsProject()
assert project.read(layer.project.qgis_file.file.name)
project.removeMapLayers([layer.qgs_layer_id])
assert project.write()
@receiver(post_save, sender=Project)
def remove_parent_project_from_cache(sender, **kwargs):
"""Triggers a cache invalidation in parent projects from embedded layers"""
# only for update
if kwargs['created']:
return
project = kwargs['instance']
updated_parents = []
for l in Layer.objects.filter(parent_project=project):
path = l.project.qgis_file.file.name
if path in updated_parents:
continue
updated_parents.append(path)
p = Path(path)
p.touch()
logging.getLogger('g3wadmin.debug').debug(
'QGIS Server parent project touched to invalidate cache: %s' % path)
@receiver(post_save, sender=Layer)
def update_widget(sender, **kwargs):
"""
Update widget data when layer datasource change
"""
# only for update
if kwargs['created']:
return
layer = kwargs['instance']
# search for widget
widgets = layer.widget_set.all()
for widget in widgets:
if widget.datasource != layer.datasource:
widget.datasource = layer.datasource
widget.save()
@receiver(user_logged_out)
def delete_session_token_filter(sender, **kwargs):
"""
Delete session token filter on user logout
"""
SessionTokenFilter.objects.filter(
sessionid=kwargs['request'].session.session_key).delete()
@receiver(execute_search_on_models)
def execute_search(sender, request, search_text, **kwargs):
"""
Execute searches on Group and MacroGroup models
:param request: django request instance
:param text_search: str search string
:return: list object search result
"""
return [
ProjectSearch(search_text, request.user)
]
@receiver(load_layer_actions)
def filter_by_user_layer_action(sender, **kwargs):
"""
Return html actions editing for project layer.
"""
template = loader.get_template('qdjango/layer_actions/filter_by_user.html')
return template.render(kwargs)
@receiver(pre_delete_project)
def check_embedded_layer_on_delete(sender, **kwargs):
"""
Check project for embedded layers from other projects.
"""
if isinstance(sender, QdjangoProjectListView):
# get config data
projects = kwargs['projects']
messages = []
for project in projects:
for embedded_layer in Layer.objects.filter(parent_project=project):
msg = loader.get_template(
'qdjango/check_embedded_layer_on_delete.html')
messages.append(
{'project': project, 'message': msg.render({'project': project, 'embedded_layer': embedded_layer})})
if len(messages):
return messages
@receiver(pre_update_project)
def check_embedded_layer_on_update(sender, **kwargs):
"""
Check project for embedded layers from other projects.
"""
if isinstance(sender, QdjangoProjectUpdateView):
# get config data
project = kwargs['project']
embedded_layers = Layer.objects.filter(parent_project=project)
if embedded_layers.count() > 0:
msg = loader.get_template(
'qdjango/check_embedded_layer_on_update.html')
return msg.render({'embedded_layers': embedded_layers})
@receiver(pre_save, sender=ColumnAcl)
def set_layer_acl_flag_save(sender, **kwargs):
"""Updates has_column_acl flag in the layers"""
if not kwargs.get('raw', True):
column_acl = kwargs['instance']
try:
old_acl = ColumnAcl.objects.get(pk=column_acl.pk)
if old_acl.layer != column_acl.layer and ColumnAcl.objects.filter(layer=old_acl.layer).count() == 1:
old_acl.layer.has_column_acl = False
old_acl.layer.save()
except ColumnAcl.DoesNotExist:
pass
column_acl = kwargs['instance']
column_acl.layer.has_column_acl = True
column_acl.layer.save()
@receiver(post_delete, sender=ColumnAcl)
def set_layer_acl_flag_delete(sender, **kwargs):
"""Updates has_column_acl flag in the layers"""
column_acl = kwargs['instance']
try:
layer = column_acl.layer
layer.has_column_acl = ColumnAcl.objects.filter(
layer=layer).count() > 0
layer.save()
except ColumnAcl.DoesNotExist:
pass
|
2,533 |
test altconstraints
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the DataLad package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Unit tests for basic constraints functionality.'''
from datalad.tests.utils_pytest import (
assert_equal,
assert_raises,
)
from ..support import constraints as ct
def test_int():
c = ct.EnsureInt()
# this should always work
assert_equal(c(7), 7)
assert_equal(c(7.0), 7)
assert_equal(c('7'), 7)
assert_equal(c([7, 3]), [7, 3])
# this should always fail
assert_raises(ValueError, lambda: c('fail'))
assert_raises(ValueError, lambda: c([3, 'fail']))
# this will also fail
assert_raises(ValueError, lambda: c('17.0'))
assert_equal(c.short_description(), 'int')
def test_float():
c = ct.EnsureFloat()
# this should always work
assert_equal(c(7.0), 7.0)
assert_equal(c(7), 7.0)
assert_equal(c('7'), 7.0)
assert_equal(c([7.0, '3.0']), [7.0, 3.0])
# this should always fail
assert_raises(ValueError, lambda: c('fail'))
assert_raises(ValueError, lambda: c([3.0, 'fail']))
def test_bool():
c = ct.EnsureBool()
# this should always work
assert_equal(c(True), True)
assert_equal(c(False), False)
# all that resuls in True
assert_equal(c('True'), True)
assert_equal(c('true'), True)
assert_equal(c('1'), True)
assert_equal(c('yes'), True)
assert_equal(c('on'), True)
assert_equal(c('enable'), True)
# all that resuls in False
assert_equal(c('false'), False)
assert_equal(c('False'), False)
assert_equal(c('0'), False)
assert_equal(c('no'), False)
assert_equal(c('off'), False)
assert_equal(c('disable'), False)
# this should always fail
assert_raises(ValueError, c, 0)
assert_raises(ValueError, c, 1)
def test_str():
c = ct.EnsureStr()
# this should always work
assert_equal(c('hello'), 'hello')
assert_equal(c('7.0'), '7.0')
# this should always fail
assert_raises(ValueError, lambda: c(['ab']))
assert_raises(ValueError, lambda: c(['a', 'b']))
assert_raises(ValueError, lambda: c(('a', 'b')))
# no automatic conversion attempted
assert_raises(ValueError, lambda: c(7.0))
assert_equal(c.short_description(), 'str')
def test_str_min_len():
c = ct.EnsureStr(min_len=1)
assert_equal(c('hello'), 'hello')
assert_equal(c('h'), 'h')
assert_raises(ValueError, c, '')
c = ct.EnsureStr(min_len=2)
assert_equal(c('hello'), 'hello')
assert_raises(ValueError, c, 'h')
def test_none():
c = ct.EnsureNone()
# this should always work
assert_equal(c(None), None)
# instance of NoneDeprecated is also None
assert_equal(c(ct.NoneDeprecated), None)
# this should always fail
assert_raises(ValueError, lambda: c('None'))
assert_raises(ValueError, lambda: c([]))
def test_callable():
c = ct.EnsureCallable()
# this should always work
assert_equal(c(range), range)
assert_raises(ValueError, c, 'range')
def test_choice():
c = ct.EnsureChoice('choice1', 'choice2', None)
# this should always work
assert_equal(c('choice1'), 'choice1')
assert_equal(c(None), None)
# this should always fail
assert_raises(ValueError, lambda: c('fail'))
assert_raises(ValueError, lambda: c('None'))
def test_keychoice():
c = ct.EnsureKeyChoice(key='some', values=('choice1', 'choice2', None))
assert_equal(c({'some': 'choice1'}), {'some': 'choice1'})
assert_equal(c({'some': None}), {'some': None})
assert_equal(c({'some': None, 'ign': 'ore'}), {'some': None, 'ign': 'ore'})
assert_raises(ValueError, c, 'fail')
assert_raises(ValueError, c, 'None')
assert_raises(ValueError, c, {'nope': 'None'})
assert_raises(ValueError, c, {'some': 'None'})
assert_raises(ValueError, c, {'some': ('a', 'b')})
def test_range():
c = ct.EnsureRange(min=3, max=7)
# this should always work
assert_equal(c(3.0), 3.0)
# this should always fail
assert_raises(ValueError, lambda: c(2.9999999))
assert_raises(ValueError, lambda: c(77))
assert_raises(TypeError, lambda: c('fail'))
assert_raises(TypeError, lambda: c((3, 4)))
# since no type checks are performed
assert_raises(TypeError, lambda: c('7'))
# Range doesn't have to be numeric
c = ct.EnsureRange(min="e", max="qqq")
assert_equal(c('e'), 'e')
assert_equal(c('fa'), 'fa')
assert_equal(c('qq'), 'qq')
assert_raises(ValueError, c, 'a')
assert_raises(ValueError, c, 'qqqa')
def test_listof():
c = ct.EnsureListOf(str)
assert_equal(c(['a', 'b']), ['a', 'b'])
assert_equal(c(['a1', 'b2']), ['a1', 'b2'])
assert_equal(c('a1 b2'), ['a1 b2'])
def test_tupleof():
c = ct.EnsureTupleOf(str)
assert_equal(c(('a', 'b')), ('a', 'b'))
assert_equal(c(('a1', 'b2')), ('a1', 'b2'))
assert_equal(c('a1 b2'), ('a1 b2',))
def test_constraints():
# this should always work
c = ct.Constraints(ct.EnsureFloat())
assert_equal(c(7.0), 7.0)
c = ct.Constraints(ct.EnsureFloat(), ct.EnsureRange(min=4.0))
assert_equal(c(7.0), 7.0)
# __and__ form
c = ct.EnsureFloat() & ct.EnsureRange(min=4.0)
assert_equal(c(7.0), 7.0)
assert_raises(ValueError, c, 3.9)
c = ct.Constraints(ct.EnsureFloat(), ct.EnsureRange(min=4), ct.EnsureRange(max=9))
assert_equal(c(7.0), 7.0)
assert_raises(ValueError, c, 3.9)
assert_raises(ValueError, c, 9.01)
# __and__ form
c = ct.EnsureFloat() & ct.EnsureRange(min=4) & ct.EnsureRange(max=9)
assert_equal(c(7.0), 7.0)
assert_raises(ValueError, c, 3.99)
assert_raises(ValueError, c, 9.01)
# and reordering should not have any effect
c = ct.Constraints(ct.EnsureRange(max=4), ct.EnsureRange(min=9), ct.EnsureFloat())
assert_raises(ValueError, c, 3.99)
assert_raises(ValueError, c, 9.01)
def METHOD_NAME():
# this should always work
c = ct.AltConstraints(ct.EnsureFloat())
assert_equal(c(7.0), 7.0)
c = ct.AltConstraints(ct.EnsureFloat(), ct.EnsureNone())
assert_equal(c.short_description(), '(float or None)')
assert_equal(c(7.0), 7.0)
assert_equal(c(None), None)
# __or__ form
c = ct.EnsureFloat() | ct.EnsureNone()
assert_equal(c(7.0), 7.0)
assert_equal(c(None), None)
# this should always fail
c = ct.Constraints(ct.EnsureRange(min=0, max=4), ct.EnsureRange(min=9, max=11))
assert_raises(ValueError, c, 7.0)
c = ct.EnsureRange(min=0, max=4) | ct.EnsureRange(min=9, max=11)
assert_equal(c(3.0), 3.0)
assert_equal(c(9.0), 9.0)
assert_raises(ValueError, c, 7.0)
assert_raises(ValueError, c, -1.0)
def test_both():
# this should always work
c = ct.AltConstraints(
ct.Constraints(
ct.EnsureFloat(),
ct.EnsureRange(min=7.0, max=44.0)),
ct.EnsureNone())
assert_equal(c(7.0), 7.0)
assert_equal(c(None), None)
# this should always fail
assert_raises(ValueError, lambda: c(77.0))
def test_type_str():
assert_equal(ct._type_str((str,)), 'str')
assert_equal(ct._type_str(str), 'str')
|
2,534 |
delete
|
'''
local.py - this file is part of S3QL.
Copyright © 2008 Nikolaus Rath <[email protected]>
This work can be distributed under the terms of the GNU GPLv3.
'''
import _thread
import logging
import os
import struct
from contextlib import ExitStack
from typing import Any, BinaryIO, Dict, Optional
from ..common import ThawError, copyfh, freeze_basic_mapping, thaw_basic_mapping
from .common import AbstractBackend, CorruptedObjectError, DanglingStorageURLError, NoSuchObject
log = logging.getLogger(__name__)
class Backend(AbstractBackend):
'''
A backend that stores data on the local hard disk
'''
needs_login = False
known_options = set()
def __init__(self, options):
'''Initialize local backend'''
# Unused argument
# pylint: disable=W0613
super().__init__()
self.prefix = options.storage_url[len('local://') :].rstrip('/')
if not os.path.exists(self.prefix):
raise DanglingStorageURLError(self.prefix)
@property
def has_delete_multi(self):
return True
def __str__(self):
return 'local directory %s' % self.prefix
def is_temp_failure(self, exc): # IGNORE:W0613
return False
def lookup(self, key):
path = self._key_to_path(key)
try:
with open(path, 'rb') as src:
return _read_meta(src)
except FileNotFoundError:
raise NoSuchObject(key)
def get_size(self, key):
return os.path.getsize(self._key_to_path(key))
def readinto_fh(self, key: str, ofh: BinaryIO):
'''Transfer data stored under *key* into *fh*, return metadata.
The data will be inserted at the current offset.
'''
path = self._key_to_path(key)
with ExitStack() as es:
try:
ifh = es.enter_context(open(path, 'rb', buffering=0))
except FileNotFoundError:
raise NoSuchObject(key)
try:
metadata = _read_meta(ifh)
except ThawError:
raise CorruptedObjectError('Invalid metadata')
copyfh(ifh, ofh)
return metadata
def write_fh(
self,
key: str,
fh: BinaryIO,
metadata: Optional[Dict[str, Any]] = None,
len_: Optional[int] = None,
):
'''Upload *len_* bytes from *fh* under *key*.
The data will be read at the current offset. If *len_* is None, reads until the
end of the file.
If a temporary error (as defined by `is_temp_failure`) occurs, the operation is
retried. Returns the size of the resulting storage object .
'''
if metadata is None:
metadata = dict()
path = self._key_to_path(key)
buf = freeze_basic_mapping(metadata)
if len(buf).bit_length() > 16:
raise ValueError('Metadata too large')
# By renaming, we make sure that there are no
# conflicts between parallel reads, the last one wins
tmpname = '%s#%d-%d.tmp' % (path, os.getpid(), _thread.get_ident())
with ExitStack() as es:
try:
dest = es.enter_context(open(tmpname, 'wb', buffering=0))
except FileNotFoundError:
try:
os.makedirs(os.path.dirname(path))
except FileExistsError:
# Another thread may have created the directory already
pass
dest = es.enter_context(open(tmpname, 'wb', buffering=0))
dest.write(b's3ql_1\n')
dest.write(struct.pack('<H', len(buf)))
dest.write(buf)
copyfh(fh, dest, len_)
size = dest.tell()
os.rename(tmpname, path)
return size
def contains(self, key):
path = self._key_to_path(key)
try:
os.lstat(path)
except FileNotFoundError:
return False
return True
def delete_multi(self, keys):
for i, key in enumerate(keys):
try:
self.METHOD_NAME(key)
except:
del keys[:i]
raise
del keys[:]
def METHOD_NAME(self, key):
path = self._key_to_path(key)
try:
os.unlink(path)
except FileNotFoundError:
pass
def list(self, prefix=''):
if prefix:
base = os.path.dirname(self._key_to_path(prefix))
else:
base = self.prefix
for path, dirnames, filenames in os.walk(base, topdown=True):
# Do not look in wrong directories
if prefix:
rpath = path[len(self.prefix) :] # path relative to base
prefix_l = ''.join(rpath.split('/'))
dirs_to_walk = list()
for name in dirnames:
prefix_ll = unescape(prefix_l + name)
if prefix_ll.startswith(prefix[: len(prefix_ll)]):
dirs_to_walk.append(name)
dirnames[:] = dirs_to_walk
for name in filenames:
# Skip temporary files
if '#' in name:
continue
key = unescape(name)
if not prefix or key.startswith(prefix):
yield key
def _key_to_path(self, key):
'''Return path for given key'''
# NOTE: We must not split the path in the middle of an
# escape sequence, or list() will fail to work.
key = escape(key)
if not key.startswith('s3ql_data_'):
return os.path.join(self.prefix, key)
no = key[10:]
path = [self.prefix, 's3ql_data_']
for i in range(0, len(no), 3):
path.append(no[:i])
path.append(key)
return os.path.join(*path)
def _read_meta(fh):
buf = fh.read(9)
if not buf.startswith(b's3ql_1\n'):
raise CorruptedObjectError('Invalid object header: %r' % buf)
len_ = struct.unpack('<H', buf[-2:])[0]
try:
return thaw_basic_mapping(fh.read(len_))
except ThawError:
raise CorruptedObjectError('Invalid metadata')
def escape(s):
'''Escape '/', '=' and '.' in s'''
s = s.replace('=', '=3D')
s = s.replace('/', '=2F')
s = s.replace('#', '=23')
return s
def unescape(s):
'''Un-Escape '/', '=' and '.' in s'''
s = s.replace('=2F', '/')
s = s.replace('=23', '#')
s = s.replace('=3D', '=')
return s
|
2,535 |
test missing final newline
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.py's improved tokenizer."""
from __future__ import annotations
import os.path
import re
import textwrap
import warnings
import pytest
from coverage import env
from coverage.phystokens import source_token_lines, source_encoding
from coverage.python import get_python_source
from tests.coveragetest import CoverageTest, TESTS_DIR
# A simple program and its token stream.
SIMPLE = """\
# yay!
def foo():
say('two = %d' % 2)
"""
SIMPLE_TOKENS = [
[('com', "# yay!")],
[('key', 'def'), ('ws', ' '), ('nam', 'foo'), ('op', '('), ('op', ')'), ('op', ':')],
[('ws', ' '), ('nam', 'say'), ('op', '('),
('str', "'two = %d'"), ('ws', ' '), ('op', '%'),
('ws', ' '), ('num', '2'), ('op', ')')],
]
# Mixed-white-space program, and its token stream.
MIXED_WS = """\
def hello():
a="Hello world!"
\tb="indented"
"""
MIXED_WS_TOKENS = [
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ('op', ')'), ('op', ':')],
[('ws', ' '), ('nam', 'a'), ('op', '='), ('str', '"Hello world!"')],
[('ws', ' '), ('nam', 'b'), ('op', '='), ('str', '"indented"')],
]
# https://github.com/nedbat/coveragepy/issues/822
BUG_822 = """\
print( "Message 1" )
array = [ 1,2,3,4, # 4 numbers \\
5,6,7 ] # 3 numbers
print( "Message 2" )
"""
class PhysTokensTest(CoverageTest):
"""Tests for coverage.py's improved tokenizer."""
run_in_temp_dir = False
def check_tokenization(self, source: str) -> None:
"""Tokenize `source`, then put it back together, should be the same."""
tokenized = ""
for line in source_token_lines(source):
text = "".join(t for _, t in line)
tokenized += text + "\n"
# source_token_lines doesn't preserve trailing spaces, so trim all that
# before comparing.
source = source.replace('\r\n', '\n')
source = re.sub(r"(?m)[ \t]+$", "", source)
tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized)
assert source == tokenized
def check_file_tokenization(self, fname: str) -> None:
"""Use the contents of `fname` for `check_tokenization`."""
self.check_tokenization(get_python_source(fname))
def test_simple(self) -> None:
assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS
self.check_tokenization(SIMPLE)
def METHOD_NAME(self) -> None:
# We can tokenize source that is missing the final newline.
assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS
def test_tab_indentation(self) -> None:
# Mixed tabs and spaces...
assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS
def test_bug_822(self) -> None:
self.check_tokenization(BUG_822)
def test_tokenize_real_file(self) -> None:
# Check the tokenization of a real file (large, btw).
real_file = os.path.join(TESTS_DIR, "test_coverage.py")
self.check_file_tokenization(real_file)
@pytest.mark.parametrize("fname", [
"stress_phystoken.tok",
"stress_phystoken_dos.tok",
])
def test_stress(self, fname: str) -> None:
# Check the tokenization of the stress-test files.
# And check that those files haven't been incorrectly "fixed".
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=r".*invalid escape sequence")
stress = os.path.join(TESTS_DIR, fname)
self.check_file_tokenization(stress)
with open(stress) as fstress:
assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space."
@pytest.mark.skipif(not env.PYBEHAVIOR.soft_keywords, reason="Soft keywords are new in Python 3.10")
class SoftKeywordTest(CoverageTest):
"""Tests the tokenizer handling soft keywords."""
run_in_temp_dir = False
def test_soft_keywords(self) -> None:
source = textwrap.dedent("""\
match re.match(something):
case ["what"]:
match = case("hello")
case [_]:
match("hello")
match another.thing:
case 1:
pass
class case(): pass
def match():
global case
""")
tokens = list(source_token_lines(source))
assert tokens[0][0] == ("key", "match")
assert tokens[0][4] == ("nam", "match")
assert tokens[1][1] == ("key", "case")
assert tokens[2][1] == ("nam", "match")
assert tokens[2][5] == ("nam", "case")
assert tokens[3][1] == ("key", "case")
assert tokens[4][1] == ("nam", "match")
assert tokens[5][1] == ("key", "match")
assert tokens[6][1] == ("key", "case")
assert tokens[9][2] == ("nam", "case")
assert tokens[10][2] == ("nam", "match")
assert tokens[11][3] == ("nam", "case")
# The default source file encoding.
DEF_ENCODING = "utf-8"
ENCODING_DECLARATION_SOURCES = [
# Various forms from http://www.python.org/dev/peps/pep-0263/
(1, b"# coding=cp850\n\n", "cp850"),
(1, b"# coding=latin-1\n", "iso-8859-1"),
(1, b"# coding=iso-latin-1\n", "iso-8859-1"),
(1, b"#!/usr/bin/python\n# -*- coding: cp850 -*-\n", "cp850"),
(1, b"#!/usr/bin/python\n# vim: set fileencoding=cp850:\n", "cp850"),
(1, b"# This Python file uses this encoding: cp850\n", "cp850"),
(1, b"# This file uses a different encoding:\n# coding: cp850\n", "cp850"),
(1, b"\n# coding=cp850\n\n", "cp850"),
(2, b"# -*- coding:cp850 -*-\n# vim: fileencoding=cp850\n", "cp850"),
]
class SourceEncodingTest(CoverageTest):
"""Tests of source_encoding() for detecting encodings."""
run_in_temp_dir = False
def test_detect_source_encoding(self) -> None:
for _, source, expected in ENCODING_DECLARATION_SOURCES:
assert source_encoding(source) == expected, f"Wrong encoding in {source!r}"
def test_detect_source_encoding_not_in_comment(self) -> None:
# Should not detect anything here
source = b'def parse(src, encoding=None):\n pass'
assert source_encoding(source) == DEF_ENCODING
def test_dont_detect_source_encoding_on_third_line(self) -> None:
# A coding declaration doesn't count on the third line.
source = b"\n\n# coding=cp850\n\n"
assert source_encoding(source) == DEF_ENCODING
def test_detect_source_encoding_of_empty_file(self) -> None:
# An important edge case.
assert source_encoding(b"") == DEF_ENCODING
def test_bom(self) -> None:
# A BOM means utf-8.
source = b"\xEF\xBB\xBFtext = 'hello'\n"
assert source_encoding(source) == 'utf-8-sig'
def test_bom_with_encoding(self) -> None:
source = b"\xEF\xBB\xBF# coding: utf-8\ntext = 'hello'\n"
assert source_encoding(source) == 'utf-8-sig'
def test_bom_is_wrong(self) -> None:
# A BOM with an explicit non-utf8 encoding is an error.
source = b"\xEF\xBB\xBF# coding: cp850\n"
with pytest.raises(SyntaxError, match="encoding problem: utf-8"):
source_encoding(source)
def test_unknown_encoding(self) -> None:
source = b"# coding: klingon\n"
with pytest.raises(SyntaxError, match="unknown encoding: klingon"):
source_encoding(source)
|
2,536 |
test index
|
import pytest
from typing import Optional
from mlserver.errors import ModelNotFound
from mlserver.registry import MultiModelRegistry
from mlserver.handlers import ModelRepositoryHandlers
from mlserver.settings import ModelSettings
from mlserver.types import RepositoryIndexRequest, State
async def METHOD_NAME(
model_repository_handlers: ModelRepositoryHandlers,
repository_index_request: RepositoryIndexRequest,
sum_model_settings: ModelSettings,
):
repo_index = list(await model_repository_handlers.index(repository_index_request))
assert len(repo_index) == 1
assert repo_index[0].name == sum_model_settings.name
assert (
repo_index[0].version == sum_model_settings.parameters.version # type: ignore
)
assert repo_index[0].state == State.READY
async def test_index_unavailable_model(
model_repository_handlers: ModelRepositoryHandlers,
repository_index_request: RepositoryIndexRequest,
sum_model_settings: ModelSettings,
):
await model_repository_handlers.unload(sum_model_settings.name)
repo_index = list(await model_repository_handlers.index(repository_index_request))
assert len(repo_index) == 1
assert repo_index[0].name == sum_model_settings.name
assert (
repo_index[0].version == sum_model_settings.parameters.version # type: ignore
)
assert repo_index[0].state == State.UNAVAILABLE
@pytest.mark.parametrize("ready,expected", [(None, 1), (True, 0), (False, 1)])
async def test_index_filter_ready(
model_repository_handlers: ModelRepositoryHandlers,
repository_index_request: RepositoryIndexRequest,
sum_model_settings: ModelSettings,
ready: Optional[bool],
expected: int,
):
await model_repository_handlers.unload(sum_model_settings.name)
repository_index_request.ready = ready
repo_index = list(await model_repository_handlers.index(repository_index_request))
assert len(repo_index) == expected
async def test_unload(
model_repository_handlers: ModelRepositoryHandlers,
model_registry: MultiModelRegistry,
sum_model_settings: ModelSettings,
):
await model_repository_handlers.unload(sum_model_settings.name)
with pytest.raises(ModelNotFound):
await model_registry.get_model(sum_model_settings.name)
async def test_unload_not_found(
model_repository_handlers: ModelRepositoryHandlers,
):
with pytest.raises(ModelNotFound):
await model_repository_handlers.unload("not-existing")
async def test_load_not_found(
model_repository_handlers: ModelRepositoryHandlers,
):
with pytest.raises(ModelNotFound):
await model_repository_handlers.load("not-existing")
async def test_load_removes_stale_models(
model_repository_handlers: ModelRepositoryHandlers,
repository_index_request: RepositoryIndexRequest,
model_registry: MultiModelRegistry,
sum_model_settings: ModelSettings,
):
# Load a few models which are not present on the repository (including a
# default one), therefore they will be stale
stale_settings = sum_model_settings.copy(deep=True)
stale_settings.parameters.version = None
await model_registry.load(stale_settings)
to_load = ["v0", "v1", "v2"]
for version in to_load:
stale_settings = sum_model_settings.copy(deep=True)
stale_settings.parameters.version = version
await model_registry.load(stale_settings)
# Validate that the stale test models have been loaded
registry_models = await model_registry.get_models(sum_model_settings.name)
stale_length = (
len(to_load)
+ 1 # Count the (stale) default model
+ 1 # Count the previous (non-stale) model
)
assert len(registry_models) == stale_length
# Reload our model and validate whether stale models have been removed
await model_repository_handlers.load(sum_model_settings.name)
# Assert that stale models have been removed from both the registry (and
# ensure they are not present on the repository either)
registry_models = await model_registry.get_models(sum_model_settings.name)
repo_models = list(await model_repository_handlers.index(repository_index_request))
expected_version = sum_model_settings.parameters.version
assert len(registry_models) == 1
assert registry_models[0].version == expected_version
assert len(repo_models) == 1
assert repo_models[0].version == expected_version
|
2,537 |
test intrinsicparamschema
|
#
# Copyright (C) 2019 ifm electronic, gmbh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribtued on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import time
from multiprocessing.dummy import Pool as ThreadPool
import ifm3dpy
pytest.skip("skipping as not ported to new naming convention", allow_module_level=True)
def test_factorydefaults():
cam = ifm3dpy.Camera()
cam.factory_reset()
time.sleep(6)
cam.device_type()
def test_waitforframe():
cam = ifm3dpy.Camera()
fg = ifm3dpy.FrameGrabber(cam)
buff = ifm3dpy.ImageBuffer()
count = 0
for i in range(10):
assert fg.wait_for_frame(buff, 1000)
count = count + 1
assert count == 10
def test_customschema():
mask = ifm3dpy.IMG_AMP | ifm3dpy.IMG_RDIS | ifm3dpy.IMG_UVEC
cam = ifm3dpy.Camera()
fg = ifm3dpy.FrameGrabber(cam, mask)
buff = ifm3dpy.ImageBuffer()
assert fg.wait_for_frame(buff, 1000)
def METHOD_NAME():
mask = ifm3dpy.IMG_AMP | ifm3dpy.IMG_RDIS | ifm3dpy.INTR_CAL
cam = ifm3dpy.Camera()
if cam.is_O3X():
with pytest.raises(RuntimeError):
fg = ifm3dpy.FrameGrabber(cam, mask)
elif (cam.is_O3D() and
cam.check_minimum_firmware_version(
ifm3dpy.O3D_INTRINSIC_PARAM_SUPPORT_MAJOR,
ifm3dpy.O3D_INTRINSIC_PARAM_SUPPORT_MINOR,
ifm3dpy.O3D_INTRINSIC_PARAM_SUPPORT_PATCH)):
fg = ifm3dpy.FrameGrabber(cam, mask)
buff = ifm3dpy.ImageBuffer()
assert fg.wait_for_frame(buff, 1000)
elif (cam.is_O3D()):
with pytest.raises(RuntimeError):
fg = ifm3dpy.FrameGrabber(cam, mask)
def test_inverseintrinsicparamschema():
mask = (ifm3dpy.IMG_AMP | ifm3dpy.IMG_RDIS |
ifm3dpy.INTR_CAL | ifm3dpy.INV_INTR_CAL)
cam = ifm3dpy.Camera()
if cam.is_O3X():
with pytest.raises(RuntimeError):
fg = ifm3dpy.FrameGrabber(cam, mask)
elif (cam.is_O3D() and
cam.check_minimum_firmware_version(
ifm3dpy.O3D_INVERSE_INTRINSIC_PARAM_SUPPORT_MAJOR,
ifm3dpy.O3D_INVERSE_INTRINSIC_PARAM_SUPPORT_MINOR,
ifm3dpy.O3D_INVERSE_INTRINSIC_PARAM_SUPPORT_PATCH)):
fg = ifm3dpy.FrameGrabber(cam, mask)
buff = ifm3dpy.ImageBuffer()
assert fg.wait_for_frame(buff, 1000)
elif (cam.is_O3D()):
with pytest.raises(RuntimeError):
fg = ifm3dpy.FrameGrabber(cam, mask)
def test_framegrabberrecycling():
cam = ifm3dpy.Camera()
fg = ifm3dpy.FrameGrabber(cam)
buff = ifm3dpy.ImageBuffer()
for i in range(5):
assert fg.wait_for_frame(buff, 1000)
fg.reset(cam)
for i in range(5):
assert fg.wait_for_frame(buff, 1000)
def test_softwaretrigger():
cam = ifm3dpy.Camera()
idx = cam.active_application()
config = cam.to_json()
config['ifm3d']['Apps'][idx-1]['TriggerMode'] = \
str(int(ifm3dpy.Camera.trigger_mode.SW))
cam.from_json(config)
fg = ifm3dpy.FrameGrabber(cam)
buff = ifm3dpy.ImageBuffer()
# waiting for an image should now timeout
assert not fg.wait_for_frame(buff, 1000)
# now, get image data by explicitly s/w triggering the device
for i in range(10):
fg.sw_trigger()
assert fg.wait_for_frame(buff, 1000)
# set the camera back into free-run mode
config['ifm3d']['Apps'][idx-1]['TriggerMode'] = \
str(int(ifm3dpy.Camera.trigger_mode.FREE_RUN))
cam.from_json(config)
def test_swtriggermultipleclients():
cam = ifm3dpy.Camera()
# O3X cannot handle multiple client connections to PCIC
# so this test does not apply
if cam.is_O3X():
return
# mark the current active application as sw triggered
idx = cam.active_application()
config = cam.to_json()
config['ifm3d']['Apps'][idx-1]['TriggerMode'] = \
str(int(ifm3dpy.Camera.trigger_mode.SW))
cam.from_json(config)
# create two framegrabbers with same camera
fg1 = ifm3dpy.FrameGrabber(cam)
fg2 = ifm3dpy.FrameGrabber(cam)
# launch two threads where each of the framegrabbers will
# wait for a new frame
def get_frame(fg):
buff = ifm3dpy.ImageBuffer()
if not fg.wait_for_frame(buff, 5000):
buff = None
return buff
pool = ThreadPool(2)
res = pool.map_async(get_frame, [fg1, fg2])
# Let's S/W trigger from the first -- this could have been a third
# framegrabber
fg1.sw_trigger()
pool.close()
pool.join()
# Did they both get a frame?
frames = res.get()
assert all(frames)
# Check that the data are the same
if all(frames):
assert (frames[0].distance_image() ==
frames[1].distance_image()).all()
assert (frames[0].unit_vectors() ==
frames[1].unit_vectors()).all()
assert (frames[0].gray_image() ==
frames[1].gray_image()).all()
assert (frames[0].amplitude_image() ==
frames[1].amplitude_image()).all()
assert (frames[0].raw_amplitude_image() ==
frames[1].raw_amplitude_image()).all()
assert (frames[0].confidence_image() ==
frames[1].confidence_image()).all()
assert (frames[0].xyz_image() ==
frames[1].xyz_image()).all()
# set the camera back into free-run mode
config['ifm3d']['Apps'][idx-1]['TriggerMode'] = \
str(int(ifm3dpy.Camera.trigger_mode.FREE_RUN))
cam.from_json(config)
def test_json_model():
cam = ifm3dpy.Camera()
mask = ifm3dpy.IMG_AMP | ifm3dpy.JSON_MODEL
if cam.is_O3X():
with pytest.raises(RuntimeError):
fg = ifm3dpy.FrameGrabber(cam, mask)
else:
fg = ifm3dpy.FrameGrabber(cam, mask)
buff = ifm3dpy.ImageBuffer()
assert fg.wait_for_frame(buff, 1000)
model = buff.json_model()
assert model
|
2,538 |
graph
|
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A structure used to describe the network of a model."""
import sys
import types
import paddle
from paddleslim.core import GraphWrapper
from .METHOD_NAME import Graph, Node
from paddleslim.core.dygraph import dygraph2program
__all__ = ["GraphTracer"]
def _apply(layer, func):
for name, child in layer.named_children():
func(child)
_apply(child, func)
def _add_call_hook(module,
function_new,
method_name='forward',
backup_name='__forward_orig__'):
def _call_hook_enable(op):
# do not patch the top level modules. makes it easy to invoke by self.module(x)
if op is not module:
assert not hasattr(
op, backup_name
), f'in {op.__class__.__name__} detected an existing function {backup_name} : please double check'
# backup the original forward of op into backup_name
method_orig = getattr(op, method_name)
setattr(op, backup_name, method_orig)
# set new method
method_new = types.MethodType(function_new, op)
setattr(op, method_name, method_new)
_apply(module, _call_hook_enable)
def _remove_call_hook(module,
method_name='forward',
backup_name='__forward_orig__'):
def _call_hook_disable(op):
if op is not module:
if hasattr(op, backup_name):
method_new = getattr(op, method_name)
method_orig = getattr(op, backup_name)
setattr(op, method_name, method_orig)
# delete the backup
setattr(op, backup_name, method_new)
delattr(op, backup_name)
_apply(module, _call_hook_disable)
class GraphTracer(paddle.nn.Layer):
""" A tool used to trace the execution of the model.
Call the forward of the model decorated by this tracer
and it will create a graph.
Args:
model(paddle.nn.Layer): The model to be traced.
Examples:
.. code-block:: python
from paddeslim.core.graph_tracer import GraphTracer
from paddle.vision.models import resnet18
model = resnet18()
x = paddle.rand([1, 3, 224, 224])
tracer = GraphTracer(model)
tracer(x)
print(tracer.graph)
"""
def __init__(self, model: paddle.nn.Layer):
super(GraphTracer, self).__init__()
self._model = model
self._graph = None
self._call_count = {}
self._tensor_previous = {}
@property
def METHOD_NAME(self) -> Graph:
assert self._graph is not None, "Please trace the graph by calling forward function of current tracer."
return self._graph
def forward(self, inputs, *args, **kwargs):
self._graph = Graph()
_add_call_hook(self._model, self._analyze_modules_op)
self._model(inputs, *args, **kwargs)
_remove_call_hook(self._model)
def _analyze_modules_op(self, op, inputs, *args, **kwargs):
node = self._trace_in(op, inputs)
#print(f"inputs: {inputs.name}")
outputs = op.__forward_orig__(inputs, *args, **kwargs)
#print(f"outputs: {outputs.name}")
self._trace_out(node, outputs)
return outputs
def _call_layer(self, layer):
layer_name = layer.full_name()
if layer_name not in self._call_count:
self._call_count[layer_name] = 0
self._call_count[layer_name] += 1
return self._call_count[layer_name]
def _trace_in(self, layer, inputs):
inputs = self._convert_to_list(inputs)
call_cout = self._call_layer(layer)
current_node = Node(layer, call_cout)
if current_node.name not in self._graph._name2node:
self._graph._name2node[current_node.name] = current_node
current_node = self._graph._name2node[current_node.name]
for inp in inputs:
last_node = self._tensor_previous.get(inp.name, None)
if last_node is not None:
assert isinstance(last_node, Node)
if last_node not in current_node.previous_nodes:
current_node.previous_nodes.append(last_node)
if current_node not in last_node.next_nodes:
last_node.next_nodes.append(current_node)
return current_node
def _trace_out(self, current_node, outputs):
assert current_node is not None, "The current node has not been visited."
if current_node.is_leaf():
outputs = self._convert_to_list(outputs)
for out in outputs:
self._tensor_previous[out.name] = current_node
def _convert_to_list(self, tensors):
""" Convert tensor to list.
It is important to convert the inputs to a list.
Because visiting the tensor by 'for ... in' will create new
temp variables and break the tracing process.
"""
if isinstance(tensors, paddle.Tensor):
return [tensors]
elif isinstance(tensors, (list, tuple)):
for _t in tensors:
assert isinstance(_t, paddle.Tensor)
return tensors
raise TypeError(
f"Unsopported type: {type(tensors)}; The inputs type should be paddle.Tensor' or list of paddle.Tensor."
)
|
2,539 |
test scalar param array
|
from __future__ import annotations
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import sqlalchemy as sa
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis import _
try:
from google.api_core.exceptions import BadRequest as GoogleBadRequest
except ImportError:
GoogleBadRequest = None
@pytest.mark.parametrize(
("column", "raw_value"),
[
("double_col", 0.0),
("double_col", 10.1),
("float_col", 1.1),
("float_col", 2.2),
],
)
@pytest.mark.notimpl(["datafusion"])
def test_floating_scalar_parameter(backend, alltypes, df, column, raw_value):
value = ibis.param(dt.double)
expr = (alltypes[column] + value).name("tmp")
expected = df[column] + raw_value
result = expr.execute(params={value: raw_value})
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
("start_string", "end_string"),
[("2009-03-01", "2010-07-03"), ("2014-12-01", "2017-01-05")],
)
@pytest.mark.notimpl(["datafusion", "mssql", "trino", "druid"])
@pytest.mark.broken(["oracle"], raises=sa.exc.DatabaseError)
def test_date_scalar_parameter(backend, alltypes, start_string, end_string):
start, end = ibis.param(dt.date), ibis.param(dt.date)
col = alltypes.timestamp_col.date()
expr = col.between(start, end).name("output")
expected_expr = col.between(start_string, end_string).name("output")
result = expr.execute(params={start: start_string, end: end_string})
expected = expected_expr.execute()
backend.assert_series_equal(result, expected)
@pytest.mark.notimpl(["datafusion"])
def test_timestamp_accepts_date_literals(alltypes):
date_string = "2009-03-01"
param = ibis.param(dt.timestamp)
expr = alltypes.mutate(param=param)
params = {param: date_string}
assert expr.compile(params=params) is not None
@pytest.mark.notimpl(
["dask", "datafusion", "impala", "pandas", "pyspark", "druid", "oracle"]
)
@pytest.mark.never(
["mysql", "sqlite", "mssql"], reason="backend will never implement array types"
)
def METHOD_NAME(con):
value = [1, 2, 3]
param = ibis.param(dt.Array(dt.int64))
result = con.execute(param.length().name("tmp"), params={param: value})
assert result == len(value)
@pytest.mark.notimpl(["datafusion", "impala", "postgres", "pyspark", "druid", "oracle"])
@pytest.mark.never(
["mysql", "sqlite", "mssql"],
reason="mysql and sqlite will never implement struct types",
)
def test_scalar_param_struct(con):
value = dict(a=1, b="abc", c=3.0)
param = ibis.param("struct<a: int64, b: string, c: float64>")
result = con.execute(param["a"], params={param: value})
assert result == value["a"]
@pytest.mark.notimpl(
["clickhouse", "datafusion", "impala", "pyspark", "polars", "druid", "oracle"]
)
@pytest.mark.never(
["mysql", "sqlite", "mssql"],
reason="mysql and sqlite will never implement map types",
)
@pytest.mark.notyet(["bigquery"])
def test_scalar_param_map(con):
value = {"a": "ghi", "b": "def", "c": "abc"}
param = ibis.param(dt.Map(dt.string, dt.string))
result = con.execute(param["b"], params={param: value})
assert result == value["b"]
@pytest.mark.parametrize(
("value", "dtype", "col"),
[
param("0", "string", "string_col", id="string"),
param(0, "int64", "int_col", id="int"),
param(0.0, "float64", "double_col", id="double"),
param(
True,
"bool",
"bool_col",
id="bool",
marks=[pytest.mark.notimpl(["druid"])],
),
param(
"2009-01-20 01:02:03",
"timestamp",
"timestamp_col",
id="string_timestamp",
marks=[
pytest.mark.notimpl(["druid"]),
pytest.mark.broken(
["bigquery"],
raises=GoogleBadRequest,
reason="No matching for operator = for argument types: DATETIME, TIMESTAMP",
),
],
),
param(
datetime.date(2009, 1, 20),
"timestamp",
"timestamp_col",
id="date_timestamp",
marks=[
pytest.mark.notimpl(["druid"]),
pytest.mark.broken(
["bigquery"],
raises=GoogleBadRequest,
reason="No matching for operator = for argument types: DATETIME, TIMESTAMP",
),
],
),
param(
datetime.datetime(2009, 1, 20, 1, 2, 3),
"timestamp",
"timestamp_col",
id="datetime_timestamp",
marks=[
pytest.mark.notimpl(["druid"]),
pytest.mark.broken(
["bigquery"],
raises=GoogleBadRequest,
reason="No matching for operator = for argument types: DATETIME, TIMESTAMP",
),
],
),
],
)
@pytest.mark.notimpl(["datafusion"])
def test_scalar_param(alltypes, df, value, dtype, col):
param = ibis.param(dtype)
expr = alltypes.filter([_[col] == param])
result = (
expr.execute(params={param: value}).sort_values("id").reset_index(drop=True)
)
expected = df.loc[df[col] == value].sort_values("id").reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"value",
["2009-01-20", datetime.date(2009, 1, 20), datetime.datetime(2009, 1, 20)],
ids=["string", "date", "datetime"],
)
@pytest.mark.notimpl(["datafusion", "druid", "oracle"])
@pytest.mark.notyet(["impala"], reason="impala doesn't support dates")
def test_scalar_param_date(backend, alltypes, value):
param = ibis.param("date")
ds_col = alltypes.date_string_col
month = ds_col[:2]
day = ds_col[3:5]
year = "20" + ds_col[6:8]
date_col = (year + "-" + month + "-" + day).cast(param.type())
base = alltypes.mutate(date_col=date_col)
expr = (
alltypes.mutate(date_col=date_col)
.filter(lambda t: t.date_col == param)
.drop("date_col")
)
result = (
expr.execute(params={param: value}).sort_values("id").reset_index(drop=True)
)
df = base.execute()
expected = (
df.loc[df.date_col.dt.normalize() == pd.Timestamp(value).normalize()]
.sort_values("id")
.reset_index(drop=True)
.drop(columns=["date_col"])
)
backend.assert_frame_equal(result, expected)
@pytest.mark.notyet(["mysql"], reason="no struct support")
@pytest.mark.notimpl(
[
"postgres",
"datafusion",
"clickhouse",
"polars",
"duckdb",
"sqlite",
"snowflake",
"impala",
"oracle",
"pyspark",
"mssql",
"trino",
"druid",
]
)
def test_scalar_param_nested(con):
param = ibis.param("struct<x: array<struct<y: array<double>>>>")
value = OrderedDict([("x", [OrderedDict([("y", [1.0, 2.0, 3.0])])])])
result = con.execute(param, {param: value})
assert pytest.approx(result["x"][0]["y"]) == np.array([1.0, 2.0, 3.0])
|
2,540 |
test fopen with disallowed fds
|
"""
Unit Tests for functions located in salt/utils/files.py
"""
import copy
import io
import os
import pytest
import salt.utils.files
from tests.support.mock import MagicMock, patch
def test_safe_rm():
with patch("os.remove") as os_remove_mock:
salt.utils.files.safe_rm("dummy_tgt")
assert os_remove_mock.called is True
def test_safe_rm_exceptions(tmp_path):
assert (
salt.utils.files.safe_rm(str(tmp_path / "no_way_this_is_a_file_nope.sh"))
is None
)
def test_safe_walk_symlink_recursion(tmp_path):
if tmp_path.stat().st_ino == 0:
pytest.xfail(reason="inodes not supported in {}".format(tmp_path))
tmp_path = str(tmp_path)
os.mkdir(os.path.join(tmp_path, "fax"))
os.makedirs(os.path.join(tmp_path, "foo", "bar"))
os.symlink(os.path.join("..", ".."), os.path.join(tmp_path, "foo", "bar", "baz"))
os.symlink("foo", os.path.join(tmp_path, "root"))
expected = [
(os.path.join(tmp_path, "root"), ["bar"], []),
(os.path.join(tmp_path, "root", "bar"), ["baz"], []),
(os.path.join(tmp_path, "root", "bar", "baz"), ["fax", "foo", "root"], []),
(os.path.join(tmp_path, "root", "bar", "baz", "fax"), [], []),
]
paths = []
for root, dirs, names in salt.utils.files.safe_walk(os.path.join(tmp_path, "root")):
paths.append((root, sorted(dirs), names))
assert paths == expected
def METHOD_NAME():
"""
This is safe to have as a unit test since we aren't going to actually
try to read or write. We want to ensure that we are raising a
TypeError. Python 3's open() builtin will treat the booleans as file
descriptor numbers and try to open stdin/stdout. We also want to test
fd 2 which is stderr.
"""
for invalid_fn in (False, True, 0, 1, 2):
try:
with salt.utils.files.fopen(invalid_fn):
pass
except TypeError:
# This is expected. We aren't using an assertRaises here
# because we want to ensure that if we did somehow open the
# filehandle, that it doesn't remain open.
pass
else:
# We probably won't even get this far if we actually opened
# stdin/stdout as a file descriptor. It is likely to cause the
# integration suite to die since, news flash, closing
# stdin/stdout/stderr is usually not a wise thing to do in the
# middle of a program's execution.
pytest.fail(
"fopen() should have been prevented from opening a file "
"using {} as the filename".format(invalid_fn)
)
def test_fopen_binary_line_buffering(tmp_path):
tmp_file = os.path.join(tmp_path, "foobar")
with patch("builtins.open") as open_mock, patch(
"salt.utils.files.is_fcntl_available", MagicMock(return_value=False)
):
salt.utils.files.fopen(os.path.join(tmp_path, "foobar"), mode="b", buffering=1)
assert open_mock.called
assert open_mock.call_args[1]["buffering"] == io.DEFAULT_BUFFER_SIZE
def _create_temp_structure(temp_directory, structure):
for folder, files in structure.items():
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in files.items():
path = os.path.join(temp_directory, folder, name)
with salt.utils.files.fopen(path, "w+") as fh:
fh.write(content)
def _validate_folder_structure_and_contents(target_directory, desired_structure):
for folder, files in desired_structure.items():
for name, content in files.items():
path = os.path.join(target_directory, folder, name)
with salt.utils.files.fopen(path) as fh:
assert fh.read().strip() == content
def test_recursive_copy(tmp_path):
src = str(tmp_path / "src")
dest = str(tmp_path / "dest")
src_structure = {
"foo": {"foofile.txt": "fooSTRUCTURE"},
"bar": {"barfile.txt": "barSTRUCTURE"},
}
dest_structure = {
"foo": {"foo.txt": "fooTARGET_STRUCTURE"},
"baz": {"baz.txt": "bazTARGET_STRUCTURE"},
}
# Create the file structures in both src and dest dirs
_create_temp_structure(src, src_structure)
_create_temp_structure(dest, dest_structure)
# Perform the recursive copy
salt.utils.files.recursive_copy(src, dest)
# Confirm results match expected results
desired_structure = copy.copy(dest_structure)
desired_structure.update(src_structure)
_validate_folder_structure_and_contents(dest, desired_structure)
@pytest.mark.skip_unless_on_windows
def test_case_sensitive_filesystem_win():
"""
Test case insensitivity on Windows.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True
@pytest.mark.skip_unless_on_linux
def test_case_sensitive_filesystem_lin():
"""
Test case insensitivity on Linux.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is False
@pytest.mark.skip_unless_on_darwin
def test_case_sensitive_filesystem_dar():
"""
Test case insensitivity on Darwin.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True
|
2,541 |
drop user granted directly
|
from testflows.core import *
from testflows.asserts import error
from rbac.requirements import *
from rbac.helper.common import *
import rbac.helper.errors as errors
@TestSuite
def METHOD_NAME(self, node=None):
"""Check that a user is able to execute `DROP USER` with privileges are granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
Suite(run=drop_user,
examples=Examples("privilege grant_target_name user_name", [
tuple(list(row)+[user_name,user_name]) for row in drop_user.examples
], args=Args(name="check privilege={privilege}", format_name=True)))
@TestSuite
def drop_user_granted_via_role(self, node=None):
"""Check that a user is able to execute `DROP USER` with privileges are granted through a role.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(run=drop_user,
examples=Examples("privilege grant_target_name user_name", [
tuple(list(row)+[role_name,user_name]) for row in drop_user.examples
], args=Args(name="check privilege={privilege}", format_name=True)))
@TestOutline(Suite)
@Examples("privilege",[
("ALL",),
("ACCESS MANAGEMENT",),
("DROP USER",),
])
def drop_user(self, privilege, grant_target_name, user_name, node=None):
"""Check that user is only able to execute `DROP USER` when they have the necessary privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with Scenario("DROP USER without privilege"):
drop_user_name = f"drop_user_{getuid()}"
with user(node, drop_user_name):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with When("I check the user can't drop a user"):
node.query(f"DROP USER {drop_user_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DROP USER with privilege"):
drop_user_name = f"drop_user_{getuid()}"
with user(node, drop_user_name):
with When(f"I grant {privilege}"):
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
with Then("I check the user can drop a user"):
node.query(f"DROP USER {drop_user_name}", settings = [("user", f"{user_name}")])
with Scenario("DROP USER on cluster"):
drop_user_name = f"drop_user_{getuid()}"
try:
with Given("I have a user on a cluster"):
node.query(f"CREATE USER {drop_user_name} ON CLUSTER sharded_cluster")
with When(f"I grant {privilege}"):
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
with Then("I check the user can drop a user"):
node.query(f"DROP USER {drop_user_name} ON CLUSTER sharded_cluster",
settings = [("user", f"{user_name}")])
finally:
with Finally("I drop the user"):
node.query(f"DROP USER IF EXISTS {drop_user_name} ON CLUSTER sharded_cluster")
with Scenario("DROP USER with revoked privilege"):
drop_user_name = f"drop_user_{getuid()}"
with user(node, drop_user_name):
with When(f"I grant {privilege}"):
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
with And(f"I revoke {privilege}"):
node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}")
with Then("I check the user can't drop a user"):
node.query(f"DROP USER {drop_user_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
@TestFeature
@Name("drop user")
@Requirements(
RQ_SRS_006_RBAC_Privileges_DropUser("1.0"),
RQ_SRS_006_RBAC_Privileges_All("1.0"),
RQ_SRS_006_RBAC_Privileges_None("1.0")
)
def feature(self, node="clickhouse1"):
"""Check the RBAC functionality of DROP USER.
"""
self.context.node = self.context.cluster.node(node)
Suite(run=METHOD_NAME, setup=instrument_clickhouse_server_log)
Suite(run=drop_user_granted_via_role, setup=instrument_clickhouse_server_log)
|
2,542 |
evaluate
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from functools import partial
import paddle
from utils import convert_example, reader, unify_prompt_name
from paddlenlp.datasets import MapDataset, load_dataset
from paddlenlp.metrics import SpanEvaluator
from paddlenlp.transformers import UIE, AutoTokenizer
from paddlenlp.utils.log import logger
@paddle.no_grad()
def METHOD_NAME(model, metric, data_loader):
"""
Given a dataset, it evals model and computes the metric.
Args:
model(obj:`paddle.nn.Layer`): A model to classify texts.
metric(obj:`paddle.metric.Metric`): The evaluation metric.
data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.
"""
model.eval()
metric.reset()
for batch in data_loader:
input_ids, token_type_ids, att_mask, pos_ids, start_ids, end_ids = batch
start_prob, end_prob = model(input_ids, token_type_ids, att_mask, pos_ids)
start_ids = paddle.cast(start_ids, "float32")
end_ids = paddle.cast(end_ids, "float32")
num_correct, num_infer, num_label = metric.compute(start_prob, end_prob, start_ids, end_ids)
metric.update(num_correct, num_infer, num_label)
precision, recall, f1 = metric.accumulate()
model.train()
return precision, recall, f1
def do_eval():
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
model = UIE.from_pretrained(args.model_path)
test_ds = load_dataset(reader, data_path=args.test_path, max_seq_len=args.max_seq_len, lazy=False)
class_dict = {}
if args.debug:
for data in test_ds:
class_name = unify_prompt_name(data["prompt"])
# Only positive examples are evaluated in debug mode
if len(data["result_list"]) != 0:
class_dict.setdefault(class_name, []).append(data)
else:
class_dict["all_classes"] = test_ds
for key in class_dict.keys():
if args.debug:
test_ds = MapDataset(class_dict[key])
else:
test_ds = class_dict[key]
test_ds = test_ds.map(partial(convert_example, tokenizer=tokenizer, max_seq_len=args.max_seq_len))
test_batch_sampler = paddle.io.BatchSampler(dataset=test_ds, batch_size=args.batch_size, shuffle=False)
test_data_loader = paddle.io.DataLoader(dataset=test_ds, batch_sampler=test_batch_sampler, return_list=True)
metric = SpanEvaluator(args.limit)
precision, recall, f1 = METHOD_NAME(model, metric, test_data_loader)
logger.info("-----------------------------")
logger.info("Class Name: %s" % key)
logger.info("Evaluation Precision: %.5f | Recall: %.5f | F1: %.5f" % (precision, recall, f1))
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="The path of saved model that you want to load.")
parser.add_argument("--test_path", type=str, default=None, help="The path of test set.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", type=int, default=512, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--debug", action='store_true', help="Precision, recall and F1 score are calculated for each class separately if this option is enabled.")
parser.add_argument("--limit", type=float, default=0.5, help="The limit when using SpanEvaluator, when the last dimension in probability arrays is greater than the limit, the corresponding span will be returned.")
args = parser.parse_args()
# yapf: enable
do_eval()
|
2,543 |
method
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm extension image list-versions",
)
class ListVersions(AAZCommand):
"""List the versions for available extensions.
:example: Find the available versions for the Docker extension.
az vm extension image list-versions --publisher Microsoft.Azure.Extensions -l westus -n DockerExtension -o table
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions", "2022-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
help="Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.",
required=True,
id_part="name",
)
_args_schema.publisher_name = AAZStrArg(
options=["-p", "--publisher", "--publisher-name"],
help="Image publisher name.",
required=True,
id_part="child_name_1",
)
_args_schema.name = AAZStrArg(
options=["-n", "--name", "--type"],
help="Name of the extension.",
required=True,
id_part="child_name_3",
)
_args_schema.filter = AAZStrArg(
options=["--filter"],
help="The filter to apply on the operation. Default value is None.",
)
_args_schema.orderby = AAZStrArg(
options=["--orderby"],
help="The $orderby odata query option.",
)
_args_schema.top = AAZIntArg(
options=["--top"],
help="The $top odata query option.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualMachineExtensionImagesListVersions(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualMachineExtensionImagesListVersions(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"publisherName", self.ctx.args.publisher_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"type", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$filter", self.ctx.args.filter,
),
**self.serialize_query_param(
"$orderby", self.ctx.args.orderby,
),
**self.serialize_query_param(
"$top", self.ctx.args.top,
),
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZListType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.Element = AAZObjectType()
_element = cls._schema_on_200.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"required": True, "read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.Element.properties
properties.compute_role = AAZStrType(
serialized_name="computeRole",
flags={"required": True},
)
properties.handler_schema = AAZStrType(
serialized_name="handlerSchema",
flags={"required": True},
)
properties.operating_system = AAZStrType(
serialized_name="operatingSystem",
flags={"required": True},
)
properties.supports_multiple_extensions = AAZBoolType(
serialized_name="supportsMultipleExtensions",
)
properties.vm_scale_set_enabled = AAZBoolType(
serialized_name="vmScaleSetEnabled",
)
tags = cls._schema_on_200.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ListVersionsHelper:
"""Helper class for ListVersions"""
__all__ = ["ListVersions"]
|
2,544 |
test distance
|
"""GridLAB-D Geodata Distance Package
The distance package computes the shortest distance between consecutive
positions.
INPUT
id - record id (see column_names configuration)
latitude - latitude of record (see column_names configuration)
longitude - longitude of record (see column_names configuration)
OUTPUT
distance - distance from previous record id or first record (see relative option and
column_names configuration)
OPTIONS
units - units in which distance is measured (default is "meters"). Valid
units are "meters", "m", "kilometers", "km", "miles", "mi", "yards", "yd",
"ft", or "feet". The default is "meters".
relative - boolean value to use last record as distance reference
(default is False)
precision - dictionary of precision options
distance - decimals with which distance values are delivered (default is 0)
CONFIGURATION
method - specifies the method used to calculate distances (default 'haversine').
Haversine is the only method currently supported. This method does not account
for the oblate spheroid shape of the earth.
column_names - dictionary of column names to use
LAT - latitude column name (default "latitude")
LON - longitude column name (default "longitude")
ID - record id column name (default "id")
DIST - distance column name (default "distance")
"""
version = 1 # specify API version
import sys
import json
import math, numpy
from pandas import DataFrame
import haversine
#
# Defaults
#
default_options = {
"units" : "meters",
"relative" : False,
"precision" :
{
"distance" : 0,
}
}
default_config = {
"method" : "haversine",
"column_names" :
{
"LAT" : "latitude",
"LON" : "longitude",
"ID" : "id",
"DIST" : "distance",
},
}
valid_units = {
"m" : 1.0,
"meters" : 1.0,
"km" : 1e-3,
"kilometers" : 1e-3,
"mi" : 0.000621371,
"miles" : 0.000621371,
"yards" : 1.09361296,
"yd" : 1.09361296,
"ft" : 3.28083888,
"feet" : 3.28083888,
}
#
# Implementation of address package
#
def apply(data, options=default_options, config=default_config, warning=print):
"""Perform distance calculation at locations in data
ARGUMENTS:
data (pandas.DataFrame)
The data frame must contain `latitude` and `longitude` fields between which
distances will be computed.
options (dict)
"units" specifies the units in which distances are measured. Valid units
are ["meters","m"], ["kilometers","km"], ["feet","ft"], ["yards","yd",
and ["miles","mi"].
config (dict)
There are no configuration options
RETURNS:
pandas.DataFrame
The first (and only) return value is the `data` data frame with either the
`distance` fields updated/added for consecutive fields.
"""
# convert lat,lon to address
try:
path = list(zip(data[config["column_names"]["LAT"]],data[config["column_names"]["LON"]],data[config["column_names"]["ID"]]))
except Exception as err:
path = None
if type(path) == type(None):
raise Exception("distance calculation requires 'latitude', 'longitude', and 'id' fields")
if len(path) == 0:
dist = []
else:
dist = [0.0]
pos1 = path[0]
lat1 = pos1[0]
lon1 = pos1[1]
if config["method"] == "haversine":
for pos2 in path[1:]:
id = pos2[2]
lat2 = pos2[0]
lon2 = pos2[1]
d = haversine.haversine([lat1,lon1],[lat2,lon2],unit=haversine.Unit.METERS)
if options["relative"]:
if math.isnan(id):
dist.append(d)
else:
lat1 = lat2
lon1 = lon2
dist.append(0.0)
else:
lat1 = lat2
lon1 = lon2
dist.append(d+dist[-1])
else:
raise Exception(f"method '{config[method]}' is not recognized")
try:
global valid_units
data[config["column_names"]["DIST"]] = (numpy.array(dist) * valid_units[options["units"]]).round(options["precision"]["distance"])
except:
raise Exception(f"unit '{options['units']}' is not recognized")
return data
#
# Perform validation tests
#
if __name__ == '__main__':
import unittest
class TestDistance(unittest.TestCase):
def METHOD_NAME(self):
test = DataFrame({
"id" : [0,1],
"latitude" : [37.4205,37.5205],
"longitude" : [-122.2046,-122.3046],
})
result = apply(test)
self.assertEqual(result["distance"][1],14196.0)
unittest.main()
|
2,545 |
read mat ark
|
# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
# needs to be installed. This is a light wrapper around kaldi_io that returns
# torch.Tensors.
from typing import Any, Callable, Iterable, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
if _mod_utils.is_module_available("numpy"):
import numpy as np
__all__ = [
"read_vec_int_ark",
"read_vec_flt_scp",
"read_vec_flt_ark",
"read_mat_scp",
"read_mat_ark",
]
def _convert_method_output_to_tensor(
file_or_fd: Any, fn: Callable, convert_contiguous: bool = False
) -> Iterable[Tuple[str, Tensor]]:
r"""Takes a method invokes it. The output is converted to a tensor.
Args:
file_or_fd (str/FileDescriptor): File name or file descriptor
fn (Callable): Function that has the signature (file name/descriptor) and converts it to
Iterable[Tuple[str, Tensor]].
convert_contiguous (bool, optional): Determines whether the array should be converted into a
contiguous layout. (Default: ``False``)
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
"""
for key, np_arr in fn(file_or_fd):
if convert_contiguous:
np_arr = np.ascontiguousarray(np_arr)
yield key, torch.from_numpy(np_arr)
@_mod_utils.requires_module("kaldi_io", "numpy")
def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) }
"""
import kaldi_io
# Requires convert_contiguous to be True because elements from int32 vector are
# sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error
# in from_numpy as it expects strides to be a multiple of 4 (int32).
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True)
@_mod_utils.requires_module("kaldi_io", "numpy")
def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read scp to a 'dictionary'
>>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) }
"""
import kaldi_io
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp)
@_mod_utils.requires_module("kaldi_io", "numpy")
def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) }
"""
import kaldi_io
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark)
@_mod_utils.requires_module("kaldi_io", "numpy")
def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read scp to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) }
"""
import kaldi_io
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp)
@_mod_utils.requires_module("kaldi_io", "numpy")
def METHOD_NAME(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) }
"""
import kaldi_io
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.METHOD_NAME)
|
2,546 |
cf security adaptive network hardenings
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def _cf_security(cli_ctx, **_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.security import SecurityCenter
return get_mgmt_service_client(cli_ctx, SecurityCenter, asc_location="centralus")
def cf_security_tasks(cli_ctx, _):
# do not return cli_ctx.tasks for home region compatibility
return _cf_security(cli_ctx)
def cf_security_alerts(cli_ctx, _):
return _cf_security(cli_ctx).alerts
def cf_security_alerts_suppression_rule(cli_ctx, _):
return _cf_security(cli_ctx).alerts_suppression_rules
def cf_security_settings(cli_ctx, _):
return _cf_security(cli_ctx).settings
def cf_security_contacts(cli_ctx, _):
return _cf_security(cli_ctx).security_contacts
def cf_security_auto_provisioning_settings(cli_ctx, _):
return _cf_security(cli_ctx).auto_provisioning_settings
def cf_security_discovered_security_solutions(cli_ctx, _):
# do not return cli_ctx.discovered_security_solutions for home region compatibility
return _cf_security(cli_ctx)
def cf_security_external_security_solutions(cli_ctx, _):
# do not return cli_ctx.external_security_solutions for home region compatibility
return _cf_security(cli_ctx)
def cf_security_jit_network_access_policies(cli_ctx, _):
return _cf_security(cli_ctx).jit_network_access_policies
def cf_security_locations(cli_ctx, _):
return _cf_security(cli_ctx).locations
def cf_security_pricings(cli_ctx, _):
return _cf_security(cli_ctx).pricings
def cf_security_topology(cli_ctx, _):
# do not return cli_ctx.topology for home region compatibility
return _cf_security(cli_ctx)
def cf_security_workspace_settings(cli_ctx, _):
return _cf_security(cli_ctx).workspace_settings
def cf_security_advanced_threat_protection(cli_ctx, _):
return _cf_security(cli_ctx).advanced_threat_protection
def cf_sql_vulnerability_assessment_scans(cli_ctx, _):
return _cf_security(cli_ctx).sql_vulnerability_assessment_scans
def cf_sql_vulnerability_assessment_results(cli_ctx, _):
return _cf_security(cli_ctx).sql_vulnerability_assessment_scan_results
def cf_sql_vulnerability_assessment_baseline(cli_ctx, _):
return _cf_security(cli_ctx).sql_vulnerability_assessment_baseline_rules
def cf_security_assessment(cli_ctx, _):
return _cf_security(cli_ctx).assessments
def cf_security_assessment_metadata(cli_ctx, _):
return _cf_security(cli_ctx).assessments_metadata
def cf_security_sub_assessment(cli_ctx, _):
return _cf_security(cli_ctx).sub_assessments
def cf_security_iot_solution(cli_ctx, _):
return _cf_security(cli_ctx).iot_security_solution
def cf_security_iot_analytics(cli_ctx, _):
return _cf_security(cli_ctx).iot_security_solution_analytics
def cf_security_iot_alerts(cli_ctx, _):
return _cf_security(cli_ctx).iot_security_solutions_analytics_aggregated_alert
def cf_security_iot_recommendations(cli_ctx, _):
return _cf_security(cli_ctx).iot_security_solutions_analytics_recommendation
def cf_security_regulatory_compliance_standards(cli_ctx, _):
return _cf_security(cli_ctx).regulatory_compliance_standards
def cf_security_regulatory_compliance_control(cli_ctx, _):
return _cf_security(cli_ctx).regulatory_compliance_controls
def cf_security_regulatory_compliance_assessment(cli_ctx, _):
return _cf_security(cli_ctx).regulatory_compliance_assessments
def cf_security_adaptive_application_controls(cli_ctx, _):
# do not return cli_ctx.adaptive_application_controls for home region compatibility
return _cf_security(cli_ctx).adaptive_application_controls
def METHOD_NAME(cli_ctx, _):
# do not return cli_ctx.adaptive_network_hardenings for home region compatibility
return _cf_security(cli_ctx).adaptive_network_hardenings
def cf_security_allowed_connections(cli_ctx, _):
# do not return cli_ctx.allowed_connections for home region compatibility
return _cf_security(cli_ctx)
def cf_security_secure_scores(cli_ctx, _):
return _cf_security(cli_ctx).secure_scores
def cf_security_secure_score_controls(cli_ctx, _):
return _cf_security(cli_ctx).secure_score_controls
def cf_security_secure_score_control_definitions(cli_ctx, _):
return _cf_security(cli_ctx).secure_score_control_definitions
def cf_security_security_solutions_reference_data(cli_ctx, _):
return _cf_security(cli_ctx).security_solutions_reference_data
def cf_security_automations(cli_ctx, _):
return _cf_security(cli_ctx).automations
def cf_security_security_solutions(cli_ctx, _):
return _cf_security(cli_ctx)
|
2,547 |
double data type
|
from __future__ import annotations
from typing import Collection
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
from typing_extensions import override
from metricflow.errors.errors import UnsupportedEngineFeatureError
from metricflow.sql.render.expr_renderer import (
DefaultSqlExpressionRenderer,
SqlExpressionRenderer,
SqlExpressionRenderResult,
)
from metricflow.sql.render.sql_plan_renderer import DefaultSqlQueryPlanRenderer
from metricflow.sql.sql_bind_parameters import SqlBindParameters
from metricflow.sql.sql_exprs import (
SqlGenerateUuidExpression,
SqlPercentileExpression,
SqlPercentileFunctionType,
SqlTimeDeltaExpression,
)
class PostgresSqlExpressionRenderer(DefaultSqlExpressionRenderer):
"""Expression renderer for the PostgreSQL engine."""
@property
@override
def METHOD_NAME(self) -> str:
"""Custom double data type for the PostgreSQL engine."""
return "DOUBLE PRECISION"
@property
@override
def supported_percentile_function_types(self) -> Collection[SqlPercentileFunctionType]:
return {SqlPercentileFunctionType.CONTINUOUS, SqlPercentileFunctionType.DISCRETE}
@override
def visit_time_delta_expr(self, node: SqlTimeDeltaExpression) -> SqlExpressionRenderResult:
"""Render time delta operations for PostgreSQL, which needs custom support for quarterly granularity."""
arg_rendered = node.arg.accept(self)
if node.grain_to_date:
return SqlExpressionRenderResult(
sql=f"DATE_TRUNC('{node.granularity.value}', {arg_rendered.sql}::timestamp)",
bind_parameters=arg_rendered.bind_parameters,
)
count = node.count
granularity = node.granularity
if granularity == TimeGranularity.QUARTER:
granularity = TimeGranularity.MONTH
count *= 3
return SqlExpressionRenderResult(
sql=f"{arg_rendered.sql} - MAKE_INTERVAL({granularity.value}s => {count})",
bind_parameters=arg_rendered.bind_parameters,
)
@override
def visit_generate_uuid_expr(self, node: SqlGenerateUuidExpression) -> SqlExpressionRenderResult:
return SqlExpressionRenderResult(
sql="GEN_RANDOM_UUID()",
bind_parameters=SqlBindParameters(),
)
@override
def visit_percentile_expr(self, node: SqlPercentileExpression) -> SqlExpressionRenderResult:
"""Render a percentile expression for Postgres."""
arg_rendered = self.render_sql_expr(node.order_by_arg)
params = arg_rendered.bind_parameters
percentile = node.percentile_args.percentile
if node.percentile_args.function_type is SqlPercentileFunctionType.CONTINUOUS:
function_str = "PERCENTILE_CONT"
elif node.percentile_args.function_type is SqlPercentileFunctionType.DISCRETE:
function_str = "PERCENTILE_DISC"
elif node.percentile_args.function_type is SqlPercentileFunctionType.APPROXIMATE_CONTINUOUS:
raise UnsupportedEngineFeatureError(
"Approximate continuous percentile aggregate not supported for Postgres. Set "
+ "use_approximate_percentile to false in all percentile measures."
)
elif node.percentile_args.function_type is SqlPercentileFunctionType.APPROXIMATE_DISCRETE:
raise UnsupportedEngineFeatureError(
"Approximate discrete percentile aggregate not supported for Postgres. Set "
+ "use_approximate_percentile to false in all percentile measures."
)
else:
assert_values_exhausted(node.percentile_args.function_type)
return SqlExpressionRenderResult(
sql=f"{function_str}({percentile}) WITHIN GROUP (ORDER BY ({arg_rendered.sql}))",
bind_parameters=params,
)
class PostgresSQLSqlQueryPlanRenderer(DefaultSqlQueryPlanRenderer):
"""Plan renderer for the PostgreSQL engine."""
EXPR_RENDERER = PostgresSqlExpressionRenderer()
@property
@override
def expr_renderer(self) -> SqlExpressionRenderer:
return self.EXPR_RENDERER
|
2,548 |
test transform skipped
|
import base64
import json
import unittest
import zlib
from unittest.mock import patch, ANY
import asyncio
from orangecontrib.text.vectorization.sbert import SBERT, EMB_DIM
from orangecontrib.text import Corpus
PATCH_METHOD = 'httpx.AsyncClient.post'
RESPONSES = {
t: [i] * EMB_DIM for i, t in enumerate(Corpus.from_file("deerwester").documents)
}
RESPONSE_NONE = RESPONSES.copy()
RESPONSE_NONE[list(RESPONSE_NONE.keys())[-1]] = None
IDEAL_RESPONSE = [[i] * EMB_DIM for i in range(9)]
class DummyResponse:
def __init__(self, content):
self.content = content
def _decompress_text(instance):
return zlib.decompress(base64.b64decode(instance.encode("utf-8"))).decode("utf-8")
def make_dummy_post(responses, sleep=0):
@staticmethod
async def dummy_post(url, headers, data=None, content=None):
assert data or content
await asyncio.sleep(sleep)
data = json.loads(content.decode("utf-8", "replace"))
data_ = data if isinstance(data, list) else [data]
texts = [_decompress_text(instance) for instance in data_]
responses_ = [responses[t] for t in texts]
r = {"embedding": responses_ if isinstance(data, list) else responses_[0]}
return DummyResponse(content=json.dumps(r).encode("utf-8"))
return dummy_post
class TestSBERT(unittest.TestCase):
def setUp(self):
self.sbert = SBERT()
self.sbert.clear_cache()
self.corpus = Corpus.from_file('deerwester')
def tearDown(self):
self.sbert.clear_cache()
@patch(PATCH_METHOD)
def test_empty_corpus(self, mock):
self.assertEqual(len(self.sbert(self.corpus.documents[:0])), 0)
mock.request.assert_not_called()
mock.get_response.assert_not_called()
self.assertEqual(
self.sbert._server_communicator._cache._cache_dict,
dict()
)
@patch(PATCH_METHOD, make_dummy_post(RESPONSES))
def test_success(self):
result = self.sbert(self.corpus.documents)
self.assertEqual(result, IDEAL_RESPONSE)
@patch(PATCH_METHOD, make_dummy_post(RESPONSE_NONE))
def test_none_result(self):
result = self.sbert(self.corpus.documents)
self.assertEqual(result, IDEAL_RESPONSE[:-1] + [None])
@patch(PATCH_METHOD, make_dummy_post(RESPONSES))
def test_transform(self):
res, skipped = self.sbert.transform(self.corpus)
self.assertIsNone(skipped)
self.assertEqual(len(self.corpus), len(res))
self.assertTupleEqual(self.corpus.domain.metas, res.domain.metas)
self.assertEqual(384, len(res.domain.attributes))
@patch(PATCH_METHOD, make_dummy_post(RESPONSE_NONE))
def METHOD_NAME(self):
res, skipped = self.sbert.transform(self.corpus)
self.assertEqual(len(self.corpus) - 1, len(res))
self.assertTupleEqual(self.corpus.domain.metas, res.domain.metas)
self.assertEqual(384, len(res.domain.attributes))
self.assertEqual(1, len(skipped))
self.assertTupleEqual(self.corpus.domain.metas, skipped.domain.metas)
self.assertEqual(0, len(skipped.domain.attributes))
@patch(PATCH_METHOD, make_dummy_post(RESPONSES))
def test_batches_success(self):
for i in range(1, 11): # try different batch sizes
result = self.sbert.embed_batches(self.corpus.documents, i)
self.assertEqual(result, IDEAL_RESPONSE)
@patch(PATCH_METHOD, make_dummy_post(RESPONSE_NONE))
def test_batches_none_result(self):
for i in range(1, 11): # try different batch sizes
result = self.sbert.embed_batches(self.corpus.documents, i)
self.assertEqual(result, IDEAL_RESPONSE[:-1] + [None])
@patch("orangecontrib.text.vectorization.sbert._ServerCommunicator.embedd_data")
def test_reordered(self, mock):
"""Test that texts are reordered according to their length"""
self.sbert(self.corpus.documents)
mock.assert_called_with(
tuple(sorted(self.corpus.documents, key=len, reverse=True)), callback=ANY
)
self.sbert([["1", "2"], ["4", "5", "6"], ["0"]])
mock.assert_called_with((["4", "5", "6"], ["1", "2"], ["0"]), callback=ANY)
if __name__ == "__main__":
unittest.main()
|
2,549 |
output size
|
import numpy as np
import pandas as pd
from torch import nn
from torch.utils.data import sampler
from clinicadl.utils.exceptions import ClinicaDLArgumentError
from clinicadl.utils.task_manager.task_manager import TaskManager
class RegressionManager(TaskManager):
def __init__(
self,
mode,
):
super().__init__(mode)
@property
def columns(self):
return [
"participant_id",
"session_id",
f"{self.mode}_id",
"true_label",
"predicted_label",
]
@property
def evaluation_metrics(self):
return ["MSE", "MAE"]
@property
def save_outputs(self):
return False
def generate_test_row(self, idx, data, outputs):
return [
[
data["participant_id"][idx],
data["session_id"][idx],
data[f"{self.mode}_id"][idx].item(),
data["label"][idx].item(),
outputs[idx].item(),
]
]
def compute_metrics(self, results_df):
return self.metrics_module.apply(
results_df.true_label.values,
results_df.predicted_label.values,
)
@staticmethod
def generate_label_code(df, label):
return None
@staticmethod
def METHOD_NAME(input_size, df, label):
return 1
@staticmethod
def generate_sampler(dataset, sampler_option="random", n_bins=5):
df = dataset.df
count = np.zeros(n_bins)
values = df[dataset.label].values.astype(float)
thresholds = [
min(values) + i * (max(values) - min(values)) / n_bins
for i in range(n_bins)
]
for idx in df.index:
label = df.loc[idx, dataset.label]
key = max(np.where((label >= np.array(thresholds))[0]))
count[[key]] += 1
weight_per_class = 1 / np.array(count)
weights = []
for idx, label in enumerate(df[dataset.label].values):
key = max(np.where((label >= np.array(thresholds)))[0])
weights += [weight_per_class[key]] * dataset.elem_per_image
if sampler_option == "random":
return sampler.RandomSampler(weights)
elif sampler_option == "weighted":
return sampler.WeightedRandomSampler(weights, len(weights))
else:
raise NotImplementedError(
f"The option {sampler_option} for sampler on regression task is not implemented"
)
def ensemble_prediction(
self,
performance_df,
validation_df,
selection_threshold=None,
use_labels=True,
method="hard",
):
"""
Compute the results at the image-level by assembling the results on parts of the image.
Args:
performance_df (pd.DataFrame): results that need to be assembled.
validation_df (pd.DataFrame): results on the validation set used to compute the performance
of each separate part of the image.
selection_threshold (float): with soft-voting method, allows to exclude some parts of the image
if their associated performance is too low.
use_labels (bool): If True, metrics are computed and the label column values must be different
from None.
method (str): method to assemble the results. Current implementation proposes only hard-voting.
Returns:
df_final (pd.DataFrame) the results on the image level
results (Dict[str, float]) the metrics on the image level
"""
if method != "hard":
raise NotImplementedError(
f"You asked for {method} ensemble method. "
f"The only method implemented for regression is hard-voting."
)
n_modes = validation_df[f"{self.mode}_id"].nunique()
weight_series = np.ones(n_modes)
# Sort to allow weighted average computation
performance_df.sort_values(
["participant_id", "session_id", f"{self.mode}_id"], inplace=True
)
# Soft majority vote
df_final = pd.DataFrame(columns=self.columns)
for (subject, session), subject_df in performance_df.groupby(
["participant_id", "session_id"]
):
label = subject_df["true_label"].unique().item()
prediction = np.average(
subject_df["predicted_label"], weights=weight_series
)
row = [[subject, session, 0, label, prediction]]
row_df = pd.DataFrame(row, columns=self.columns)
df_final = pd.concat([df_final, row_df])
if use_labels:
results = self.compute_metrics(df_final)
else:
results = None
return df_final, results
@staticmethod
def get_criterion(criterion=None):
compatible_losses = [
"L1Loss",
"MSELoss",
"KLDivLoss",
"BCEWithLogitsLoss",
"HuberLoss",
"SmoothL1Loss",
]
if criterion is None:
return nn.MSELoss()
if criterion not in compatible_losses:
raise ClinicaDLArgumentError(
f"Regression loss must be chosen in {compatible_losses}."
)
return getattr(nn, criterion)()
@staticmethod
def get_default_network():
return "Conv5_FC3"
|
2,550 |
width value changed
|
"""
@package rdigit.toolbars
@brief rdigit toolbars and icons.
Classes:
- toolbars::RDigitToolbar
(C) 2014 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
@author Anna Petrasova <kratochanna gmail.com>
"""
import wx
from gui_core.toolbars import BaseToolbar
from icons.icon import MetaIcon
from gui_core.widgets import FloatValidator
import wx.lib.colourselect as csel
from gui_core.wrap import TextCtrl, StaticText, ColourSelect
from gui_core.toolbars import BaseIcons
rdigitIcons = {
"area": MetaIcon(img="polygon-create", label=_("Digitize area")),
"line": MetaIcon(img="line-create", label=_("Digitize line")),
"point": MetaIcon(img="point-create", label=_("Digitize point")),
"save": MetaIcon(img="save", label=_("Save raster map")),
"undo": MetaIcon(img="undo", label=_("Undo")),
}
class RDigitToolbar(BaseToolbar):
"""RDigit toolbar"""
def __init__(self, parent, giface, controller, toolSwitcher):
"""RDigit toolbar constructor"""
BaseToolbar.__init__(self, parent, toolSwitcher)
self._controller = controller
self._giface = giface
self.InitToolbar(self._toolbarData())
self._mapSelectionCombo = wx.ComboBox(
self, id=wx.ID_ANY, value=_("Select raster map"), choices=[], size=(120, -1)
)
self._mapSelectionCombo.Bind(wx.EVT_COMBOBOX, self.OnMapSelection)
self._mapSelectionCombo.SetEditable(False)
self.InsertControl(0, self._mapSelectionCombo)
self._previousMap = self._mapSelectionCombo.GetValue()
self._color = ColourSelect(parent=self, colour=wx.GREEN, size=(30, 30))
self._color.Bind(csel.EVT_COLOURSELECT, lambda evt: self._changeDrawColor())
self._color.SetToolTip(_("Set drawing color (not raster cell color)"))
self.InsertControl(4, self._color)
self._cellValues = set(["1"])
# validator does not work with combobox, SetBackgroundColor is not
# working
self._valueCombo = wx.ComboBox(
self,
id=wx.ID_ANY,
choices=list(self._cellValues),
size=(80, -1),
validator=FloatValidator(),
)
self._valueCombo.Bind(wx.EVT_COMBOBOX, lambda evt: self._cellValueChanged())
self._valueCombo.Bind(wx.EVT_TEXT, lambda evt: self._cellValueChanged())
self._valueCombo.SetSelection(0)
self._cellValueChanged()
labelValue = StaticText(self, label=" %s" % _("Cell value:"))
self.InsertControl(6, labelValue)
self.InsertControl(7, self._valueCombo)
# validator does not work with combobox, SetBackgroundColor is not
# working
self._widthValue = TextCtrl(
self, id=wx.ID_ANY, value="0", size=(80, -1), validator=FloatValidator()
)
self._widthValue.Bind(wx.EVT_TEXT, lambda evt: self.METHOD_NAME())
self.METHOD_NAME()
self._widthValue.SetToolTip(
_(
"Width of currently digitized line or diameter of a digitized point in map units."
)
)
labelWidth = StaticText(self, label=" %s" % _("Width:"))
self.InsertControl(8, labelWidth)
self.InsertControl(9, self._widthValue)
for tool in (self.area, self.line, self.point):
self.toolSwitcher.AddToolToGroup(group="mouseUse", toolbar=self, tool=tool)
self.toolSwitcher.toggleToolChanged.connect(self.CheckSelectedTool)
self._default = self.area
# realize the toolbar
self.Realize()
# workaround Mac bug
for t in (
self._mapSelectionCombo,
self._color,
self._valueCombo,
self._widthValue,
labelValue,
labelWidth,
):
t.Hide()
t.Show()
def _toolbarData(self):
"""Toolbar data"""
return self._getToolbarData(
(
(
("area", rdigitIcons["area"].label),
rdigitIcons["area"],
lambda event: self._controller.SelectType("area"),
wx.ITEM_CHECK,
),
(
("line", rdigitIcons["line"].label),
rdigitIcons["line"],
lambda event: self._controller.SelectType("line"),
wx.ITEM_CHECK,
),
(
("point", rdigitIcons["point"].label),
rdigitIcons["point"],
lambda event: self._controller.SelectType("point"),
wx.ITEM_CHECK,
),
(None,),
(None,),
(
("undo", rdigitIcons["undo"].label),
rdigitIcons["undo"],
lambda event: self._controller.Undo(),
),
(
("save", rdigitIcons["save"].label),
rdigitIcons["save"],
lambda event: self._controller.Save(),
),
(
("help", BaseIcons["help"].label),
BaseIcons["help"],
lambda event: self._giface.Help("wxGUI.rdigit"),
),
(
("quit", BaseIcons["quit"].label),
BaseIcons["quit"],
lambda event: self._controller.Stop(),
),
)
)
def CheckSelectedTool(self, id):
if self.toolSwitcher.IsToolInGroup(tool=id, group="mouseUse") and id not in (
self.area,
self.line,
self.point,
):
self._controller.SelectType(None)
def UpdateRasterLayers(self, rasters):
new = _("New raster map")
items = [raster.name for raster in rasters if raster.name is not None]
items.insert(0, new)
self._mapSelectionCombo.SetItems(items)
def OnMapSelection(self, event=None):
"""!Either map to edit or create new map selected."""
idx = self._mapSelectionCombo.GetSelection()
if idx == 0:
ret = self._controller.SelectNewMap()
else:
ret = self._controller.SelectOldMap(self._mapSelectionCombo.GetString(idx))
if not ret:
# in wxpython 3 we can't set value which is not in the items
# when not editable
self._mapSelectionCombo.SetEditable(True)
self._mapSelectionCombo.SetValue(self._previousMap)
self._mapSelectionCombo.SetEditable(False)
# we need to get back to previous
self._previousMap = self._mapSelectionCombo.GetValue()
def NewRasterAdded(self, name):
idx = self._mapSelectionCombo.Append(name)
self._mapSelectionCombo.SetSelection(idx)
def UpdateCellValues(self, values=None):
orig = self._valueCombo.GetValue()
if not values:
values = [orig]
for value in values:
self._cellValues.add(str(value))
valList = sorted(list(self._cellValues), key=float)
self._valueCombo.SetItems(valList)
self._valueCombo.SetStringSelection(orig)
def _cellValueChanged(self):
value = self._valueCombo.GetValue()
try:
value = float(value)
self._controller.SetCellValue(value)
except ValueError:
return
def METHOD_NAME(self):
value = self._widthValue.GetValue()
try:
value = float(value)
self._controller.SetWidthValue(value)
except ValueError:
self._controller.SetWidthValue(0)
return
def _changeDrawColor(self):
color = self._color.GetColour()
self._controller.ChangeDrawColor(color=color)
|
2,551 |
proc7
|
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import time as clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = list(map(lambda x: x[:], [Array1Glob]*51))
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = METHOD_NAME(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = METHOD_NAME(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = METHOD_NAME(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def METHOD_NAME(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
main(LOOPS)
|
2,552 |
kickin cost
|
"""
When an attendee or group preregisters or changes their registration, we want a way to determine the potential costs and
credits to add to their receipt. These items are defined here. Each cost/credit should return None if there is no applicable
charge for that model or a tuple of the cost description, the cost price, and (optionally) the number of items. If the cost
price is 0, the item is printed as "Free" on the receipt. All cost prices should be in cents.
"""
from collections import defaultdict
from uber.config import c
from uber.decorators import cost_calculation, credit_calculation
from uber.models import Attendee, ArtShowApplication, Group
@cost_calculation.MarketplaceApplication
def app_cost(app):
if app.status == c.APPROVED:
return ("Marketplace Application Fee", app.overridden_price * 100 or c.MARKETPLACE_FEE * 100 or 0, None)
ArtShowApplication.cost_changes = {
'overridden_price': ('Custom App Price', "calc_app_price_change"),
'panels': ('General Panels', "calc_app_price_change"),
'panels_ad': ('Mature Panels', "calc_app_price_change"),
'tables': ('General Tables', "calc_app_price_change"),
'tables_ad': ('Mature Tables', "calc_app_price_change"),
}
@cost_calculation.ArtShowApplication
def overridden_app_cost(app):
if app.status == c.APPROVED and app.overridden_price != None:
return ("Art Show Application (Custom Price)", app.overridden_price * 100, 'overridden_price')
@cost_calculation.ArtShowApplication
def panel_cost(app):
return ("General Panel", c.COST_PER_PANEL * 100, app.panels, None) if app.panels else None
@cost_calculation.ArtShowApplication
def table_cost(app):
return ("General Table", c.COST_PER_TABLE * 100, app.tables, None) if app.tables else None
@cost_calculation.ArtShowApplication
def mature_panel_cost(app):
return ("Mature Panel", c.COST_PER_PANEL * 100, app.panels_ad, None) if app.panels_ad else None
@cost_calculation.ArtShowApplication
def mature_table_cost(app):
return ("Mature Table", c.COST_PER_TABLE * 100, app.tables_ad, None) if app.tables_ad else None
@cost_calculation.ArtShowApplication
def mailing_fee_cost(app):
return ("Mailing fee", c.ART_MAILING_FEE * 100, None) if app.delivery_method == c.BY_MAIL else None
Attendee.cost_changes = {
'overridden_price': ('Custom Badge Price', "calc_badge_cost_change"),
'badge_type': ('Badge ({})', "calc_badge_cost_change", c.BADGES),
'amount_extra': ('Preordered Merch ({})', None, c.DONATION_TIERS),
'extra_donation': ('Extra Donation', None),
}
Attendee.credit_changes = {
'paid': ('Badge Comp', "calc_badge_comp_change"),
'birthdate': ('Age Discount', "calc_age_discount_change"),
'promo_code': ('Promo Code', "calc_promo_discount_change"),
}
@cost_calculation.Attendee
def badge_cost(attendee):
if attendee.paid == c.PAID_BY_GROUP or attendee.promo_code_groups or getattr(attendee, 'badges', None):
cost = 0
else:
cost = attendee.calculate_badge_cost() * 100
if cost or attendee.badge_type in c.BADGE_TYPE_PRICES:
if attendee.badge_type in c.BADGE_TYPE_PRICES or not attendee.badge_type_label:
label = "Attendee badge for {}{}".format(attendee.full_name, "" if cost else " (paid by group)")
else:
label = "{} badge for {}".format(attendee.badge_type_label, attendee.full_name)
return (label, cost, None)
@cost_calculation.Attendee
def badge_upgrade_cost(attendee):
if attendee.badge_type in c.BADGE_TYPE_PRICES:
return ("{} badge upgrade for {}".format(attendee.badge_type_label, attendee.full_name),
attendee.calculate_badge_prices_cost() * 100, 'badge_type')
@cost_calculation.Attendee
def shipping_fee_cost(attendee):
if attendee.badge_status == c.DEFERRED_STATUS and attendee.amount_extra:
return ("Merch Shipping Fee", attendee.calculate_shipping_fee_cost() * 100, None)
@cost_calculation.Attendee
def donation_cost(attendee):
return ("Extra Donation", attendee.extra_donation * 100, 'extra_donation') if attendee.extra_donation else None
@cost_calculation.Attendee
def METHOD_NAME(attendee):
return ("Preordered Merch ({})".format(attendee.amount_extra_label),
attendee.amount_extra * 100, 'amount_extra') if attendee.amount_extra else None
@credit_calculation.Attendee
def age_discount(attendee):
if attendee.qualifies_for_discounts and attendee.age_discount:
if abs(attendee.age_discount) > attendee.calculate_badge_cost():
age_discount = attendee.calculate_badge_cost() * 100 * -1
else:
age_discount = attendee.age_discount * 100
return ("Age Discount", age_discount, None)
@credit_calculation.Attendee
def group_discount(attendee):
if c.GROUP_DISCOUNT and attendee.qualifies_for_discounts and not attendee.age_discount and (
attendee.promo_code_groups or attendee.group):
return ("Group Discount", c.GROUP_DISCOUNT * 100 * -1, None)
@credit_calculation.Attendee
def promo_code_discount(attendee):
if attendee.promo_code:
discount = attendee.calculate_badge_cost() - attendee.badge_cost_with_promo_code
return ("Promo Code", discount * 100 * -1, None)
Group.cost_changes = {
'cost': ('Custom Group Price', "calc_group_price_change"),
'tables': ('Tables', "calc_group_price_change"),
'badges': ('Badges', "calc_group_price_change"),
}
@cost_calculation.Group
def table_cost(group):
table_count = int(float(group.tables))
if table_count and group.auto_recalc:
return ("{} Tables".format(table_count), c.get_table_price(table_count) * 100, None)
@cost_calculation.Group
def badge_cost(group):
cost_table = defaultdict(int)
if not group.auto_recalc:
return
for attendee in group.attendees:
if attendee.paid == c.PAID_BY_GROUP and attendee.badge_cost:
cost_table[attendee.badge_cost * 100] += 1
return ("Group badge ({})".format(group.name), cost_table, None)
@cost_calculation.Group
def set_cost(group):
if not group.auto_recalc:
return ("Custom fee for group {}".format(group.name), group.cost * 100, None)
@cost_calculation.Attendee
def promo_code_group_cost(attendee):
cost_table = defaultdict(int)
if getattr(attendee, 'badges', None):
# During prereg we set the number of promo code badges on the attendee model
cost_table[c.get_group_price() * 100] = int(attendee.badges)
elif attendee.promo_code_groups:
for code in attendee.promo_code_groups[0].promo_codes:
cost_table[code.cost * 100] += 1
else:
return
return ("Group badge ({})".format(attendee.promo_code_groups[0].name if attendee.promo_code_groups
else getattr(attendee, 'name', 'Unknown')), cost_table, None)
|
2,553 |
fprime
|
"""
Holds files for l1 regularization of LikelihoodModel, using
scipy.optimize.slsqp
"""
import numpy as np
from scipy.optimize import fmin_slsqp
import statsmodels.base.l1_solvers_common as l1_solvers_common
def fit_l1_slsqp(
f, score, start_params, args, kwargs, disp=False, maxiter=1000,
callback=None, retall=False, full_output=False, hess=None):
"""
Solve the l1 regularized problem using scipy.optimize.fmin_slsqp().
Specifically: We convert the convex but non-smooth problem
.. math:: \\min_\\beta f(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem in twice
as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} f(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
Parameters
----------
All the usual parameters from LikelhoodModel.fit
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been zero
if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode === 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and do not allow auto trim when (ii) in "Theory" (above)
is violated by this much.
qc_verbose : bool
If true, print out a full QC report upon failure
acc : float (default 1e-6)
Requested accuracy as used by slsqp
"""
start_params = np.array(start_params).ravel('F')
### Extract values
# k_params is total number of covariates,
# possibly including a leading constant.
k_params = len(start_params)
# The start point
x0 = np.append(start_params, np.fabs(start_params))
# alpha is the regularization parameter
alpha = np.array(kwargs['alpha_rescaled']).ravel('F')
# Make sure it's a vector
alpha = alpha * np.ones(k_params)
assert alpha.min() >= 0
# Convert display parameters to scipy.optimize form
disp_slsqp = _get_disp_slsqp(disp, retall)
# Set/retrieve the desired accuracy
acc = kwargs.setdefault('acc', 1e-10)
### Wrap up for use in fmin_slsqp
func = lambda x_full: _objective_func(f, x_full, k_params, alpha, *args)
f_ieqcons_wrap = lambda x_full: _f_ieqcons(x_full, k_params)
fprime_wrap = lambda x_full: METHOD_NAME(score, x_full, k_params, alpha)
fprime_ieqcons_wrap = lambda x_full: _fprime_ieqcons(x_full, k_params)
### Call the solver
results = fmin_slsqp(
func, x0, f_ieqcons=f_ieqcons_wrap, fprime=fprime_wrap, acc=acc,
iter=maxiter, disp=disp_slsqp, full_output=full_output,
fprime_ieqcons=fprime_ieqcons_wrap)
params = np.asarray(results[0][:k_params])
### Post-process
# QC
qc_tol = kwargs['qc_tol']
qc_verbose = kwargs['qc_verbose']
passed = l1_solvers_common.qc_results(
params, alpha, score, qc_tol, qc_verbose)
# Possibly trim
trim_mode = kwargs['trim_mode']
size_trim_tol = kwargs['size_trim_tol']
auto_trim_tol = kwargs['auto_trim_tol']
params, trimmed = l1_solvers_common.do_trim_params(
params, k_params, alpha, score, passed, trim_mode, size_trim_tol,
auto_trim_tol)
### Pack up return values for statsmodels optimizers
# TODO These retvals are returned as mle_retvals...but the fit was not ML.
# This could be confusing someday.
if full_output:
x_full, fx, its, imode, smode = results
fopt = func(np.asarray(x_full))
converged = (imode == 0)
warnflag = str(imode) + ' ' + smode
iterations = its
gopt = float('nan') # Objective is non-differentiable
hopt = float('nan')
retvals = {
'fopt': fopt, 'converged': converged, 'iterations': iterations,
'gopt': gopt, 'hopt': hopt, 'trimmed': trimmed,
'warnflag': warnflag}
### Return
if full_output:
return params, retvals
else:
return params
def _get_disp_slsqp(disp, retall):
if disp or retall:
if disp:
disp_slsqp = 1
if retall:
disp_slsqp = 2
else:
disp_slsqp = 0
return disp_slsqp
def _objective_func(f, x_full, k_params, alpha, *args):
"""
The regularized objective function
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
## Return
return f(x_params, *args) + (alpha * x_added).sum()
def METHOD_NAME(score, x_full, k_params, alpha):
"""
The regularized derivative
"""
x_params = x_full[:k_params]
# The derivative just appends a vector of constants
return np.append(score(x_params), alpha)
def _f_ieqcons(x_full, k_params):
"""
The inequality constraints.
"""
x_params = x_full[:k_params]
x_added = x_full[k_params:]
# All entries in this vector must be \geq 0 in a feasible solution
return np.append(x_params + x_added, x_added - x_params)
def _fprime_ieqcons(x_full, k_params):
"""
Derivative of the inequality constraints
"""
I = np.eye(k_params) # noqa:E741
A = np.concatenate((I, I), axis=1)
B = np.concatenate((-I, I), axis=1)
C = np.concatenate((A, B), axis=0)
## Return
return C
|
2,554 |
sbc compare audio frames
|
#!/usr/bin/env python3
import numpy as np
import wave
import struct
import sys
from sbc import *
from sbc_encoder import *
from sbc_decoder import *
error = 0.99
max_error = -1
def sbc_compare_subband_samples(frame_count, actual_frame, expected_frame):
global error, max_error
for blk in range(actual_frame.nr_blocks):
for ch in range(actual_frame.nr_channels):
M = mse(actual_frame.sb_sample[blk][ch], expected_frame.sb_sample[blk][ch])
if M > max_error:
max_error = M
if max_error > error:
print ("Frame %d: sb_sample error %f (ch %d, blk %d)" % (frame_count, max_error, ch, blk))
print (actual_frame.sb_sample[blk])
print (expected_frame.sb_sample[blk])
return -1
return 0
def METHOD_NAME(frame_count, actual_frame, expected_frame):
global error, max_error
for blk in range(actual_frame.nr_blocks):
for ch in range(actual_frame.nr_channels):
M = mse(actual_frame.audio_sample[blk][ch], expected_frame.audio_sample[blk][ch])
if M > max_error:
max_error = M
if max_error > error:
print ("audio_sample error (%d, %f ) " % (frame_count, max_error))
print (actual_frame.audio_sample[blk])
print (expected_frame.audio_sample[blk])
return -1
return 0
def sbc_compare_headers(frame_count, actual_frame, expected_frame):
if actual_frame.syncword != expected_frame.syncword:
print ("syncword wrong ", actual_frame.syncword)
return -1
if actual_frame.sampling_frequency != expected_frame.sampling_frequency:
print ("sampling_frequency wrong ", actual_frame.sampling_frequency)
return -1
if actual_frame.nr_blocks != expected_frame.nr_blocks:
print ("nr_blocks wrong ", actual_frame.nr_blocks)
return -1
if actual_frame.channel_mode != expected_frame.channel_mode:
print ("channel_mode wrong ", actual_frame.channel_mode)
return -1
if actual_frame.nr_channels != expected_frame.nr_channels:
print ("nr_channels wrong ", actual_frame.nr_channels)
return -1
if actual_frame.allocation_method != expected_frame.allocation_method:
print ("allocation_method wrong ", actual_frame.allocation_method)
return -1
if actual_frame.nr_subbands != expected_frame.nr_subbands:
print ("nr_subbands wrong ", actual_frame.nr_subbands)
return -1
if actual_frame.bitpool != expected_frame.bitpool:
print ("bitpool wrong (E: %d, D: %d)" % (actual_frame.bitpool, expected_frame.bitpool))
return -1
if mse(actual_frame.join, expected_frame.join) > 0:
print ("join error \nE:\n %s \nD:\n %s" % (actual_frame.join, expected_frame.join))
return -1
if mse(actual_frame.scale_factor, expected_frame.scale_factor) > 0:
print ("scale_factor error %d \nE:\n %s \nD:\n %s" % (frame_count, actual_frame.scale_factor, expected_frame.scale_factor))
return -1
if mse(actual_frame.scalefactor, expected_frame.scalefactor) > 0:
print ("scalefactor error %d \nE:\n %s \nD:\n %s" % (frame_count, actual_frame.scalefactor, expected_frame.scalefactor))
return -1
if mse(actual_frame.bits, expected_frame.bits) > 0:
print ("bits error %d \nE:\n %s \nD:\n %s" % (frame_count, actual_frame.bits, expected_frame.bits))
return -1
if actual_frame.crc_check != expected_frame.crc_check:
print ("crc_check wrong (E: %d, D: %d)" % (actual_frame.crc_check, expected_frame.crc_check))
return -1
return 0
def get_actual_frame(fin, nr_blocks, nr_subbands, nr_channels, sampling_frequency, bitpool, allocation_method, force_channel_mode):
actual_frame = SBCFrame(nr_blocks, nr_subbands, nr_channels, sampling_frequency, bitpool, allocation_method)
fetch_samples_for_next_sbc_frame(fin, actual_frame)
sbc_encode(actual_frame, force_channel_mode)
return actual_frame
file_size = 0
def get_expected_frame(fin_expected):
global file_size
expected_frame = SBCFrame()
sbc_unpack_frame(fin_expected, file_size - fin_expected.tell(), expected_frame)
sbc_reconstruct_subband_samples(expected_frame)
return expected_frame
usage = '''
Usage: ./sbc_encoder_test.py encoder_input.wav blocks subbands bitpool allocation_method force_channel_mode encoder_expected_output.sbc
Example: ./sbc_encoder_test.py fanfare.wav 16 4 31 0 2 fanfare-4sb.sbc
'''
if (len(sys.argv) < 8):
print(usage)
sys.exit(1)
try:
encoder_input_wav = sys.argv[1]
nr_blocks = int(sys.argv[2])
nr_subbands = int(sys.argv[3])
bitpool = int(sys.argv[4])
allocation_method = int(sys.argv[5])
encoder_expected_sbc = sys.argv[6]
force_channel_mode = int(sys.argv[7])
sampling_frequency = 44100
if not encoder_input_wav.endswith('.wav'):
print(usage)
sys.exit(1)
if not encoder_expected_sbc.endswith('.sbc'):
print(usage)
sys.exit(1)
fin = wave.open(encoder_input_wav, 'rb')
nr_channels = fin.getnchannels()
sampling_frequency = fin.getframerate()
nr_audio_frames = fin.getnframes()
fin_expected = open(encoder_expected_sbc, 'rb')
fin_expected.seek(0,2)
file_size = fin_expected.tell()
fin_expected.seek(0,0)
subband_frame_count = 0
audio_frame_count = 0
nr_samples = nr_blocks * nr_subbands
while audio_frame_count < nr_audio_frames:
if subband_frame_count % 200 == 0:
print("== Frame %d ==" % (subband_frame_count))
actual_frame = get_actual_frame(fin, nr_blocks, nr_subbands, nr_channels, bitpool, sampling_frequency, allocation_method, force_channel_mode)
expected_frame = get_expected_frame(fin_expected)
err = sbc_compare_headers(subband_frame_count, actual_frame, expected_frame)
if err < 0:
exit(1)
err = METHOD_NAME(subband_frame_count, actual_frame, expected_frame)
if err < 0:
exit(1)
audio_frame_count += nr_samples
subband_frame_count += 1
print ("Max MSE audio sample error %f" % max_error)
fin.close()
fin_expected.close()
except TypeError:
print ("Max MSE audio sample error %f" % max_error)
fin.close()
fin_expected.close()
except IOError:
print(usage)
sys.exit(1)
|
2,555 |
tags
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBatchEndpointResult',
'AwaitableGetBatchEndpointResult',
'get_batch_endpoint',
'get_batch_endpoint_output',
]
@pulumi.output_type
class GetBatchEndpointResult:
def __init__(__self__, batch_endpoint_details=None, id=None, identity=None, kind=None, location=None, name=None, sku=None, system_data=None, METHOD_NAME=None, type=None):
if batch_endpoint_details and not isinstance(batch_endpoint_details, dict):
raise TypeError("Expected argument 'batch_endpoint_details' to be a dict")
pulumi.set(__self__, "batch_endpoint_details", batch_endpoint_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="batchEndpointDetails")
def batch_endpoint_details(self) -> 'outputs.BatchEndpointResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "batch_endpoint_details")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity (system assigned and/or user assigned identities)
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Sku details required for ARM contract for Autoscaling.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetBatchEndpointResult(GetBatchEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBatchEndpointResult(
batch_endpoint_details=self.batch_endpoint_details,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
sku=self.sku,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_batch_endpoint(endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBatchEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: Name for the Batch Endpoint.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20220201preview:getBatchEndpoint', __args__, opts=opts, typ=GetBatchEndpointResult).value
return AwaitableGetBatchEndpointResult(
batch_endpoint_details=pulumi.get(__ret__, 'batch_endpoint_details'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_batch_endpoint)
def get_batch_endpoint_output(endpoint_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBatchEndpointResult]:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: Name for the Batch Endpoint.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
...
|
2,556 |
test get document url with deeplinking url
|
from unittest.mock import create_autospec, sentinel
import pytest
from lms.product.canvas._plugin.misc import CanvasMiscPlugin
from lms.resources._js_config import JSConfig
from tests import factories
class TestCanvasMiscPlugin:
@pytest.mark.parametrize("is_gradable", [True, False])
@pytest.mark.parametrize("is_learner", [True, False])
@pytest.mark.parametrize("grading_id", [None, sentinel.grading_id])
@pytest.mark.parametrize("focused_user", [None, sentinel.focused_user])
def test_post_launch_assignment_hook(
self,
request,
plugin,
js_config,
pyramid_request,
is_gradable,
is_learner,
grading_id,
focused_user,
):
assignment = factories.Assignment(is_gradable=is_gradable)
pyramid_request.lti_params["lis_result_sourcedid"] = grading_id
pyramid_request.params["focused_user"] = focused_user
if is_learner:
request.getfixturevalue("user_is_learner")
plugin.post_launch_assignment_hook(pyramid_request, js_config, assignment)
if assignment.is_gradable and is_learner and grading_id:
js_config.add_canvas_speedgrader_settings.assert_called_once_with(
assignment.document_url
)
else:
js_config.add_canvas_speedgrader_settings.assert_not_called()
if focused_user:
js_config.set_focused_user.assert_called_once_with(focused_user)
else:
js_config.set_focused_user.assert_not_called()
def test_get_document_url(self, plugin, pyramid_request):
assert not plugin.get_document_url(
pyramid_request, sentinel.assignment, sentinel.historical_assignment
)
@pytest.mark.parametrize(
"url,expected",
(
(None, None),
# URL encoded paths
(
"https%3A%2F%2Fexample.com%2Fpath%3Fparam%3Dvalue",
"https://example.com/path?param=value",
),
(
"http%3A%2F%2Fexample.com%2Fpath%3Fparam%3Dvalue",
"http://example.com/path?param=value",
),
(
"HTTP%3a%2F%2Fexample.com%2Fpath%3Fparam%3Dvalue",
"HTTP://example.com/path?param=value",
),
(
"canvas%3A%2F%2Ffile%2Fcourse_id%2FCOURSE_ID%2Ffile_if%2FFILE_ID",
"canvas://file/course_id/COURSE_ID/file_if/FILE_ID",
),
(
"jstor%3A%2F%2FDOI",
"jstor://DOI",
),
(
"vitalsource%3A%2F%2Fbook%2FbookID%2FL-999-70469%2Fcfi%2F%2F6%2F8",
"vitalsource://book/bookID/L-999-70469/cfi//6/8",
),
# Non-URL encoded paths
(
"https://example.com/path?param=value",
"https://example.com/path?param=value",
),
(
"http://example.com/path?param=%25foo%25",
"http://example.com/path?param=%25foo%25",
),
(
"canvas://file/course_id/COURSE_ID/file_if/FILE_ID",
"canvas://file/course_id/COURSE_ID/file_if/FILE_ID",
),
("jstor://DOI", "jstor://DOI"),
(
"vitalsource://book/bookID/L-999-70469/cfi//6/8",
"vitalsource://book/bookID/L-999-70469/cfi//6/8",
),
# Unknown but valid (RFC3986) schemas get decoded
(
"j5-tor.r%3A%2F%2FDOI",
"j5-tor.r://DOI",
),
# Invalid schemas don't get decoded
(
"1stor%3A%2F%2FDOI",
"1stor%3A%2F%2FDOI",
),
),
)
def METHOD_NAME(
self, plugin, pyramid_request, url, expected
):
if url:
pyramid_request.params["url"] = url
assert (
plugin.get_document_url(
pyramid_request, sentinel.assignment, sentinel.historical_assignment
)
== expected
)
def test_get_document_url_with_canvas_files(self, plugin, pyramid_request):
pyramid_request.params["canvas_file"] = "any"
pyramid_request.params["file_id"] = "FILE_ID"
pyramid_request.lti_params["custom_canvas_course_id"] = "COURSE_ID"
assert (
plugin.get_document_url(
pyramid_request, sentinel.assignment, sentinel.historical_assignment
)
== "canvas://file/course/COURSE_ID/file_id/FILE_ID"
)
@pytest.mark.parametrize("cfi", (None, sentinel.cfi))
def test_get_document_url_with_legacy_vitalsource_book(
self, plugin, pyramid_request, VSBookLocation, cfi
):
pyramid_request.params["vitalsource_book"] = "any"
pyramid_request.params["book_id"] = sentinel.book_id
if cfi:
pyramid_request.params["cfi"] = cfi
result = plugin.get_document_url(
pyramid_request, sentinel.assignment, sentinel.historical_assignment
)
VSBookLocation.assert_called_once_with(book_id=sentinel.book_id, cfi=cfi)
assert result == VSBookLocation.return_value.document_url
def test_get_deeplinking_launch_url(self, plugin, pyramid_request):
config = {"param": "value"}
assert (
plugin.get_deeplinking_launch_url(pyramid_request, config)
== "http://example.com/lti_launches?param=value"
)
def test_factory(self, pyramid_request):
plugin = CanvasMiscPlugin.factory(sentinel.context, pyramid_request)
assert isinstance(plugin, CanvasMiscPlugin)
@pytest.fixture
def plugin(self):
return CanvasMiscPlugin()
@pytest.fixture
def js_config(self):
return create_autospec(JSConfig, spec_set=True, instance=True)
@pytest.fixture
def VSBookLocation(self, patch):
return patch("lms.product.canvas._plugin.misc.VSBookLocation")
|
2,557 |
test step1b
|
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer import PorterStemmer
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
def test_step1a():
word_str_ser = cudf.Series(
["caresses", "ponies", "ties", "caress", "cats"]
)
st = PorterStemmer()
got = st._step1a(word_str_ser)
expect = ["caress", "poni", "tie", "caress", "cat"]
assert list(got.to_pandas().values) == expect
# mask test
mask = cudf.Series([True, False, True, True, False])
expect = ["caress", "ponies", "tie", "caress", "cats"]
got = st._step1a(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def METHOD_NAME():
word_str_ser_ls = [
"feed",
"agreed",
"plastered",
"bled",
"motoring",
"sing",
"conflated",
"troubled",
"sized",
"hopping",
"tanned",
"falling",
"hissing",
"fizzed",
"failing",
"filing",
]
expected = [
"feed",
"agree",
"plaster",
"bled",
"motor",
"sing",
"conflate",
"trouble",
"size",
"hop",
"tan",
"fall",
"hiss",
"fizz",
"fail",
"file",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step1b(word_str_ser)
assert list(got.to_pandas().values) == expected
# mask test
expected = expected[:-3] + ["fizzed", "failing", "filing"]
mask = cudf.Series([True] * (len(expected) - 3) + [False] * 3)
got = st._step1b(word_str_ser, mask)
assert list(got.to_pandas().values) == expected
def test_step1c():
word_str_ser_ls = ["happy", "sky", "enjoy", "boy", "toy", "y"]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step1c(word_str_ser)
expect = ["happi", "ski", "enjoy", "boy", "toy", "y"]
assert list(got.to_pandas().values) == expect
# mask test
expect = ["happi", "sky", "enjoy", "boy", "toy", "y"]
mask = cudf.Series([True, False, False, False, False, True])
got = st._step1c(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step2():
word_str_ser_ls = [
"relational",
"conditional",
"rational",
"valenci",
"hesitanci",
"digitizer",
"conformabli",
"radicalli",
"differentli",
"vileli",
"analogousli",
"vietnamization",
"predication",
"operator",
"feudalism",
"decisiveness",
"hopefulness",
"callousness",
"formaliti",
"sensitiviti",
"sensibiliti",
]
expect = [
"relate",
"condition",
"rational",
"valence",
"hesitance",
"digitize",
"conformable",
"radical",
"different",
"vile",
"analogous",
"vietnamize",
"predicate",
"operate",
"feudal",
"decisive",
"hopeful",
"callous",
"formal",
"sensitive",
"sensible",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step2(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-3] + ["formaliti", "sensitiviti", "sensibiliti"]
mask = cudf.Series([True] * (len(expect) - 3) + [False] * 3)
got = st._step2(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step3():
word_str_ser_ls = [
"triplicate",
"formative",
"formalize",
"electriciti",
"electriciti",
"hopeful",
"goodness",
]
expect = [
"triplic",
"form",
"formal",
"electric",
"electric",
"hope",
"good",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step3(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["hopeful", "goodness"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step3(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step4():
word_str_ser_ls = [
"revival",
"allowance",
"inference",
"airliner",
"gyroscopic",
"adjustable",
"defensible",
"irritant",
"replacement",
"adjustment",
"dependent",
"adoption",
"homologou",
"communism",
"activate",
"angulariti",
"homologous",
"effective",
"bowdlerize",
]
expect = [
"reviv",
"allow",
"infer",
"airlin",
"gyroscop",
"adjust",
"defens",
"irrit",
"replac",
"adjust",
"depend",
"adopt",
"homolog",
"commun",
"activ",
"angular",
"homolog",
"effect",
"bowdler",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step4(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["effective", "bowdlerize"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step4(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step5a():
word_str_ser_ls = ["probate", "rate", "cease", "ones"]
word_str_ser = cudf.Series(word_str_ser_ls)
expect = ["probat", "rate", "ceas", "ones"]
st = PorterStemmer()
got = st._step5a(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["cease", "ones"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step5a(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step5b():
word_str_ser_ls = ["controll", "roll"]
word_str_ser = cudf.Series(word_str_ser_ls)
expect = ["control", "roll"]
st = PorterStemmer()
got = st._step5b(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = ["controll", "roll"]
mask = cudf.Series([False, True])
got = st._step5b(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
|
2,558 |
create n user
|
#!/usr/bin/env python3
import subprocess
import random
import re
alphabet = 'azertyuiopqsdfghjklmwxcvbnAZERTYUIOPQSDFGHJKLMWXCVBN123456789-_'
a_length = len(alphabet)
""" ACL support attribute """
ACL4_SUPPORT_ALLOW_ACL = 0x00000001
ACL4_SUPPORT_DENY_ACL = 0x00000002
ACL4_SUPPORT_AUDIT_ACL = 0x00000004
ACL4_SUPPORT_ALARM_ACL = 0x00000008
class RandomGen(object):
""" List of ACE possible who fields """
ace_who=["OWNER@","GROUP@","EVERYONE@","ANONYMOUS@","AUTHENTICATED@"]
""" List of GID than can be used to do the tests """
gList=[]
gListSize = len(gList)
uList = []
uListSize = len(uList)
fList=[]
fListSize = len(fList)
""" Create a user in available groups to do the tests """
def createUser(self,username):
group = self.gList[random.randint(0,len(self.gList)-1)][0]
opts = "-g" + group + " -p" + "1pilot" + " -m " + username
u = subprocess.getoutput('/usr/sbin/useradd '+ opts)
if u != "":
print("create user " + username + "failed" + u)
def createFile(self,path,n):
for i in range(n):
fName = 'file' + str(i)
u = subprocess.getoutput('touch ' + path + '/'+ fName)
self.fList.append(fName)
def createGroup(self, grpname, gid):
u = subprocess.getoutput('/usr/sbin/groupadd -g' + gid + " " + grpname)
if u != "":
print(u)
def createNGroup(self, n):
for i in range(n):
gName = 'grp' + str(i)
gid = str(500+i)
self.createGroup(gName, gid)
""" Random creation of n user """
def METHOD_NAME(self,n):
for i in range(n):
userName= "user" + str(i)
self.createUser(userName)
""" clean all users created to do the tests """
def cleanUsers(self):
for name in self.uList:
u = subprocess.getoutput('/usr/sbin/userdel -r '+ name)
self.uList = []
""" clean all users created to do the tests """
def cleanGroups(self):
for name in self.gList:
u = subprocess.getoutput('/usr/sbin/groupdel '+ name[0])
self.gList = []
""" Retrieve the list of user from /etc/passwd file """
def getUserList(self):
f = open('/etc/passwd','r')
lines = f.readlines()
for line in lines:
splitedline = line.split(':')
userName = splitedline[0]
gid = splitedline[3]
# TO FIX: verify that the group is OK (in the right range)
NameOK = re.match("user",userName)
# We keep only usernames starting with "user"
if NameOK != None:
self.uList.append(userName)
f.close()
def getFileList(self,path):
u = subprocess.getoutput('ls ' + path)
tmp = u.split('\n')
for i in range (len(tmp)-1):
NameOK = re.match("file",tmp[i])
if NameOK != None:
self.fList.append(tmp[i])
def getNUserList(self,nb):
f = open('/etc/passwd','r')
lines = f.readlines()
n = 0
for line in lines:
splitedline = line.split(':');
userName = splitedline[0]
gid = splitedline[3]
# TO FIX: verify that the group is OK (in the right range)
NameOK = re.match("user",userName)
# We keep only usernames starting with "user"
if NameOK != None:
self.uList.append(userName)
n = n+1
if n==nb:
break;
f.close()
""" Get group list """
def getGroupList(self):
f = open('/etc/group','r')
lines = f.readlines()
for line in lines:
splitedline = line.split(':');
groupName = splitedline[0]
gid = splitedline[2]
NameOK = re.match("grp",groupName)
if NameOK != None:
self.gList.append([groupName,gid])
f.close()
""" Get a list of n group """
def getNGroupList(self,nb):
f = open('/etc/group','r')
lines = f.readlines()
n = 0
for line in lines:
splitedline = line.split(':');
groupName = splitedline[0]
gid = splitedline[2]
NameOK = re.match("grp",groupName)
if NameOK != None:
self.gList.append([groupName,gid])
n = n+1
if n==nb:
break;
f.close()
def printUserList(self):
print(self.uList)
def printGroupList(self):
print(self.gList)
""" Create a random name of random length """
def createOneNameRandomLength(self,maxlength):
outputString =""
l=random.randint(0,maxlength)
for i in range(l):
a = random.randint(0,a_length-1)
outputString =outputString + alphabet[a]
return outputString
""" Create a random name of fixed length """
def createOneName(self,lenght):
outputString =""
for i in range(length):
a = random.randint(0,a_length-1)
outputString = outputString + alphabet[a]
return outputString
""" Create Random User List with fixed length user names """
def createRandomUserList(self,listlength,usernamelength):
userlist = []
for i in range(listlength):
user = createOneName(lenght)
userlist.append(user)
return userlist
""" Create Random ACE for a file and a given usr """
def createRandomACE(self,user):
type = ace_type[random.randint(0,len(ace_type))]
flag = ace_flags[random.randint(0,len(ace_flags))]
mask = ace_mask[random.randint(0,len(ace_mask))]
who = ace_who[random.randint(0,len(ace_who))]
return nfsace4(type, flag, mask, who)
""" Create Random ACL for a file with a fixed number a entries """
def createRandomACL(self,acl_size):
acl = []
userList = uList
userListSize = uListSize
for i in range(acl_size):
n = random.randint(0,userListSize-1)
usr = userList.pop(n)
newace = createRandomACE(usr)
acl.append(newace)
return acl
""" Return a mode string like 'xwr' or 'x' """
def createRandomMode(self):
out_str = ""
while (out_str == ""):
if random.randint(0,1) == 1:
out_str += 'x'
if random.randint(0,1) == 1:
out_str += 'w'
if random.randint(0,1) == 1:
out_str += 'r'
return out_str
""" Create a random ACL operation (delete / remove / modify on user / group ) """
def randomOp(self,path):
a = random.randint(1,4)
mode = self.createRandomMode()
file = self.fList[random.randint(0,len(self.fList)-1)]
if a == 1: # creation/modification
user = self.uList[random.randint(0,len(self.uList)-1)]
u = subprocess.getoutput('setfacl -m u:' + user + ':' + mode + " " + path + "/" + file)
if a == 2: # with group
group = self.gList[random.randint(0,len(self.gList)-1)][0]
u = subprocess.getoutput('setfacl -m g:' + group + ':' + mode + " " + path + "/" + file)
if a == 3: # deletation
user = self.uList[random.randint(0,len(self.uList)-1)]
u = subprocess.getoutput('setfacl -x u:' + user + " " + path + "/" + file)
if a == 4: # with group
group = self.gList[random.randint(0,len(self.gList)-1)][0]
u = subprocess.getoutput('setfacl -x g:' + group + " " + path + "/" + file)
# request on a unexisting group
'''if a == 5:
group = self.createOneNameRandomLength(16)
print 'setfacl -x g:' + group + " " + path + "/" + file
u = commands.getoutput('setfacl -x g:' + group + " " + path + "/" + file)
if a == 6:
user = self.createOneNameRandomLength(16)
u = commands.getoutput('setfacl -x u:' + user + " " + path + "/" + file)
if a == 7: # creation/modification
user = self.createOneNameRandomLength(16)
u = commands.getoutput('setfacl -m u:' + user + ':' + mode + " " + path + "/" + file)
if a == 8: # with group
group = self.createOneNameRandomLength(16)
u = commands.getoutput('setfacl -m g:' + group + ':' + mode + " " + path + "/" + file)
if a == 9: #Copying the ACL of one file to another
file2 = self.fList[random.randint(0,len(self.fList)-1)]
u = commands.getoutput('getfacl ' + path + "/" + file + "| setfacl --set-file=- " + path + "/" + file2)
if u!="":
print u'''
|
2,559 |
hash code
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
from plc4py.api.messages.PlcMessage import PlcMessage
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDU
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDUBuilder
from plc4py.spi.generation.ReadBuffer import ReadBuffer
from plc4py.spi.generation.WriteBuffer import WriteBuffer
from typing import List
import math
@dataclass
class ModbusPDUReadWriteMultipleHoldingRegistersRequest(PlcMessage, ModbusPDU):
read_starting_address: int
read_quantity: int
write_starting_address: int
write_quantity: int
value: List[int]
# Accessors for discriminator values.
error_flag: bool = False
function_flag: int = 0x17
response: bool = False
def __post_init__(self):
super().__init__()
def serialize_modbus_pdu_child(self, write_buffer: WriteBuffer):
write_buffer.push_context("ModbusPDUReadWriteMultipleHoldingRegistersRequest")
# Simple Field (readStartingAddress)
write_buffer.write_unsigned_short(
self.read_starting_address, logical_name="readStartingAddress"
)
# Simple Field (readQuantity)
write_buffer.write_unsigned_short(
self.read_quantity, logical_name="readQuantity"
)
# Simple Field (writeStartingAddress)
write_buffer.write_unsigned_short(
self.write_starting_address, logical_name="writeStartingAddress"
)
# Simple Field (writeQuantity)
write_buffer.write_unsigned_short(
self.write_quantity, logical_name="writeQuantity"
)
# Implicit Field (byte_count) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
byte_count: int = int(len(self.value))
write_buffer.write_unsigned_byte(byte_count, logical_name="byteCount")
# Array Field (value)
write_buffer.write_byte_array(self.value, logical_name="value")
write_buffer.pop_context("ModbusPDUReadWriteMultipleHoldingRegistersRequest")
def length_in_bytes(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0)))
def get_length_in_bits(self) -> int:
length_in_bits: int = super().get_length_in_bits()
_value: ModbusPDUReadWriteMultipleHoldingRegistersRequest = self
# Simple field (readStartingAddress)
length_in_bits += 16
# Simple field (readQuantity)
length_in_bits += 16
# Simple field (writeStartingAddress)
length_in_bits += 16
# Simple field (writeQuantity)
length_in_bits += 16
# Implicit Field (byteCount)
length_in_bits += 8
# Array field
if self.value != None:
length_in_bits += 8 * len(self.value)
return length_in_bits
@staticmethod
def static_parse_builder(read_buffer: ReadBuffer, response: bool):
read_buffer.push_context("ModbusPDUReadWriteMultipleHoldingRegistersRequest")
self.read_starting_address = read_simple_field(
"readStartingAddress", read_unsigned_int
)
self.read_quantity = read_simple_field("readQuantity", read_unsigned_int)
self.write_starting_address = read_simple_field(
"writeStartingAddress", read_unsigned_int
)
self.write_quantity = read_simple_field("writeQuantity", read_unsigned_int)
byte_count: int = read_implicit_field("byteCount", read_unsigned_short)
self.value = read_buffer.read_byte_array("value", int(byte_count))
read_buffer.pop_context("ModbusPDUReadWriteMultipleHoldingRegistersRequest")
# Create the instance
return ModbusPDUReadWriteMultipleHoldingRegistersRequestBuilder(
read_starting_address,
read_quantity,
write_starting_address,
write_quantity,
value,
)
def equals(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusPDUReadWriteMultipleHoldingRegistersRequest):
return False
that: ModbusPDUReadWriteMultipleHoldingRegistersRequest = (
ModbusPDUReadWriteMultipleHoldingRegistersRequest(o)
)
return (
(self.read_starting_address == that.read_starting_address)
and (self.read_quantity == that.read_quantity)
and (self.write_starting_address == that.write_starting_address)
and (self.write_quantity == that.write_quantity)
and (self.value == that.value)
and super().equals(that)
and True
)
def METHOD_NAME(self) -> int:
return hash(self)
def __str__(self) -> str:
write_buffer_box_based: WriteBufferBoxBased = WriteBufferBoxBased(True, True)
try:
write_buffer_box_based.writeSerializable(self)
except SerializationException as e:
raise RuntimeException(e)
return "\n" + str(write_buffer_box_based.get_box()) + "\n"
@dataclass
class ModbusPDUReadWriteMultipleHoldingRegistersRequestBuilder(ModbusPDUBuilder):
readStartingAddress: int
readQuantity: int
writeStartingAddress: int
writeQuantity: int
value: List[int]
def __post_init__(self):
pass
def build(
self,
) -> ModbusPDUReadWriteMultipleHoldingRegistersRequest:
modbus_pdu_read_write_multiple_holding_registers_request: ModbusPDUReadWriteMultipleHoldingRegistersRequest = ModbusPDUReadWriteMultipleHoldingRegistersRequest(
self.read_starting_address,
self.read_quantity,
self.write_starting_address,
self.write_quantity,
self.value,
)
return modbus_pdu_read_write_multiple_holding_registers_request
|
2,560 |
test invalid json data
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.reporters.zulip import ZulipStatusPush
from buildbot.test.fake import fakemaster
from buildbot.test.fake import httpclientservice as fakehttpclientservice
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.test.util.logging import LoggingMixin
from buildbot.test.util.reporter import ReporterTestMixin
class TestZulipStatusPush(unittest.TestCase, ReporterTestMixin, LoggingMixin, ConfigErrorsMixin,
TestReactorMixin):
def setUp(self):
self.setup_test_reactor()
self.setup_reporter_test()
self.master = fakemaster.make_master(
testcase=self, wantData=True, wantDb=True, wantMq=True)
@defer.inlineCallbacks
def tearDown(self):
if self.master.running:
yield self.master.stopService()
@defer.inlineCallbacks
def setupZulipStatusPush(self, endpoint="http://example.com", token="123", stream=None):
self.sp = ZulipStatusPush(
endpoint=endpoint, token=token, stream=stream)
self._http = yield fakehttpclientservice.HTTPClientService.getService(
self.master, self, endpoint, debug=None, verify=None)
yield self.sp.setServiceParent(self.master)
yield self.master.startService()
@defer.inlineCallbacks
def test_build_started(self):
yield self.setupZulipStatusPush(stream="xyz")
build = yield self.insert_build_new()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123&stream=xyz',
json={
"event": 'new',
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000001
})
yield self.sp._got_event(('builds', 20, 'new'), build)
@defer.inlineCallbacks
def test_build_finished(self):
yield self.setupZulipStatusPush(stream="xyz")
build = yield self.insert_build_finished()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123&stream=xyz',
json={
"event": "finished",
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000005,
"results": 0
})
yield self.sp._got_event(('builds', 20, 'finished'), build)
@defer.inlineCallbacks
def test_stream_none(self):
yield self.setupZulipStatusPush(stream=None)
build = yield self.insert_build_finished()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123',
json={
"event": "finished",
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000005,
"results": 0
})
yield self.sp._got_event(('builds', 20, 'finished'), build)
def test_endpoint_string(self):
with self.assertRaisesConfigError(
"Endpoint must be a string"):
ZulipStatusPush(endpoint=1234, token="abcd")
def test_token_string(self):
with self.assertRaisesConfigError(
"Token must be a string"):
ZulipStatusPush(endpoint="http://example.com", token=1234)
@defer.inlineCallbacks
def METHOD_NAME(self):
yield self.setupZulipStatusPush(stream="xyz")
build = yield self.insert_build_new()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123&stream=xyz',
json={
"event": 'new',
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000001
}, code=500)
self.setUpLogging()
yield self.sp._got_event(('builds', 20, 'new'), build)
self.assertLogged('500: Error pushing build status to Zulip')
@defer.inlineCallbacks
def test_invalid_url(self):
yield self.setupZulipStatusPush(stream="xyz")
build = yield self.insert_build_new()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123&stream=xyz',
json={
"event": 'new',
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000001
}, code=404)
self.setUpLogging()
yield self.sp._got_event(('builds', 20, 'new'), build)
self.assertLogged('404: Error pushing build status to Zulip')
@defer.inlineCallbacks
def test_invalid_token(self):
yield self.setupZulipStatusPush(stream="xyz")
build = yield self.insert_build_new()
self._http.expect(
'post',
'/api/v1/external/buildbot?api_key=123&stream=xyz',
json={
"event": 'new',
"buildid": 20,
"buildername": "Builder0",
"url": "http://localhost:8080/#/builders/79/builds/0",
"project": "testProject",
"timestamp": 10000001
}, code=401, content_json={"result": "error", "msg": "Invalid API key",
"code": "INVALID_API_KEY"})
self.setUpLogging()
yield self.sp._got_event(('builds', 20, 'new'), build)
self.assertLogged('401: Error pushing build status to Zulip')
|
2,561 |
save model
|
#
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from evennia.scripts.models import ScriptDB
from . import utils as adminutils
from .attributes import AttributeInline
from .tags import TagInline
class ScriptForm(forms.ModelForm):
db_key = forms.CharField(
label="Name/Key", help_text="Script identifier, shown in listings etc."
)
db_typeclass_path = forms.ChoiceField(
label="Typeclass",
help_text="This is the Python-path to the class implementing the actual script functionality. "
"<BR>If your custom class is not found here, it may not be imported into Evennia yet.",
choices=lambda: adminutils.get_and_load_typeclasses(
parent=ScriptDB, excluded_parents=["evennia.prototypes.prototypes.DbPrototype"]
),
)
db_lock_storage = forms.CharField(
label="Locks",
required=False,
widget=forms.Textarea(attrs={"cols": "100", "rows": "2"}),
help_text="In-game lock definition string. If not given, defaults will be used. "
"This string should be on the form "
"<i>type:lockfunction(args);type2:lockfunction2(args);...",
)
db_interval = forms.IntegerField(
label="Repeat Interval",
help_text="Optional timer component.<BR>How often to call the Script's<BR>`at_repeat` hook, in seconds."
"<BR>Set to 0 to disable.",
)
db_repeats = forms.IntegerField(
help_text="Only repeat this many times." "<BR>Set to 0 to run indefinitely."
)
db_start_delay = forms.BooleanField(help_text="Wait <B>Interval</B> seconds before first call.")
db_persistent = forms.BooleanField(
label="Survives reboot", help_text="If unset, a server reboot will remove the timer."
)
class ScriptTagInline(TagInline):
"""
Inline script tags.
"""
model = ScriptDB.db_tags.through
related_field = "scriptdb"
class ScriptAttributeInline(AttributeInline):
"""
Inline attribute tags.
"""
model = ScriptDB.db_attributes.through
related_field = "scriptdb"
@admin.register(ScriptDB)
class ScriptAdmin(admin.ModelAdmin):
"""
Displaying the main Script page.
"""
list_display = (
"id",
"db_key",
"db_typeclass_path",
"db_obj",
"db_interval",
"db_repeats",
"db_persistent",
"db_date_created",
)
list_display_links = ("id", "db_key")
ordering = ["-db_date_created", "-id"]
search_fields = ["=id", "^db_key", "db_typeclass_path"]
readonly_fields = ["serialized_string"]
form = ScriptForm
save_as = True
save_on_top = True
list_select_related = True
view_on_site = False
raw_id_fields = ("db_obj",)
fieldsets = (
(
None,
{
"fields": (
("db_key", "db_typeclass_path"),
("db_interval", "db_repeats", "db_start_delay", "db_persistent"),
"db_obj",
"db_lock_storage",
"serialized_string",
)
},
),
)
inlines = [ScriptTagInline, ScriptAttributeInline]
def serialized_string(self, obj):
"""
Get the serialized version of the object.
"""
from evennia.utils import dbserialize
return str(dbserialize.pack_dbobj(obj))
serialized_string.help_text = (
"Copy & paste this string into an Attribute's `value` field to store this script there."
)
def get_form(self, request, obj=None, **kwargs):
"""
Overrides help texts.
"""
help_texts = kwargs.get("help_texts", {})
help_texts["serialized_string"] = self.serialized_string.help_text
kwargs["help_texts"] = help_texts
return super().get_form(request, obj, **kwargs)
def METHOD_NAME(self, request, obj, form, change):
"""
Model-save hook.
Args:
request (Request): Incoming request.
obj (Object): Database object.
form (Form): Form instance.
change (bool): If this is a change or a new object.
"""
obj.save()
if not change:
# adding a new object
# have to call init with typeclass passed to it
obj.set_class_from_typeclass(typeclass_path=obj.db_typeclass_path)
|
2,562 |
analyze python coverage
|
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
from __future__ import annotations
import os
import typing as t
from .....encoding import (
to_text,
)
from .....data import (
data_context,
)
from .....util_common import (
ResultType,
)
from .....executor import (
Delegate,
)
from .....provisioning import (
prepare_profiles,
HostState,
)
from ... import (
enumerate_powershell_lines,
enumerate_python_arcs,
get_collection_path_regexes,
get_powershell_coverage_files,
get_python_coverage_files,
get_python_modules,
initialize_coverage,
PathChecker,
)
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
write_report,
)
from . import (
Arcs,
Lines,
TargetIndexes,
)
class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets generate` command."""
def __init__(self, args: t.Any) -> None:
super().__init__(args)
self.input_dir: str = args.input_dir or ResultType.COVERAGE.path
self.output_file: str = args.output_file
def command_coverage_analyze_targets_generate(args: CoverageAnalyzeTargetsGenerateConfig) -> None:
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
host_state = prepare_profiles(args) # coverage analyze targets generate
if args.delegate:
raise Delegate(host_state)
root = data_context().content.root
target_indexes: TargetIndexes = {}
arcs = dict((os.path.relpath(path, root), data) for path, data in METHOD_NAME(args, host_state, args.input_dir, target_indexes).items())
lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
report = make_report(target_indexes, arcs, lines)
write_report(args, report, args.output_file)
def METHOD_NAME(
args: CoverageAnalyzeTargetsGenerateConfig,
host_state: HostState,
path: str,
target_indexes: TargetIndexes,
) -> Arcs:
"""Analyze Python code coverage."""
results: Arcs = {}
collection_search_re, collection_sub_re = get_collection_path_regexes()
modules = get_python_modules()
python_files = get_python_coverage_files(path)
coverage = initialize_coverage(args, host_state)
for python_file in python_files:
if not is_integration_coverage_file(python_file):
continue
target_name = get_target_name(python_file)
target_index = get_target_index(target_name, target_indexes)
for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
arcs = results.setdefault(filename, {})
for covered_arc in covered_arcs:
arc = arcs.setdefault(covered_arc, set())
arc.add(target_index)
prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
return results
def analyze_powershell_coverage(
args: CoverageAnalyzeTargetsGenerateConfig,
path: str,
target_indexes: TargetIndexes,
) -> Lines:
"""Analyze PowerShell code coverage"""
results: Lines = {}
collection_search_re, collection_sub_re = get_collection_path_regexes()
powershell_files = get_powershell_coverage_files(path)
for powershell_file in powershell_files:
if not is_integration_coverage_file(powershell_file):
continue
target_name = get_target_name(powershell_file)
target_index = get_target_index(target_name, target_indexes)
for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
lines = results.setdefault(filename, {})
for covered_line in hits:
line = lines.setdefault(covered_line, set())
line.add(target_index)
prune_invalid_filenames(args, results)
return results
def prune_invalid_filenames(
args: CoverageAnalyzeTargetsGenerateConfig,
results: dict[str, t.Any],
collection_search_re: t.Optional[t.Pattern] = None,
) -> None:
"""Remove invalid filenames from the given result set."""
path_checker = PathChecker(args, collection_search_re)
for path in list(results.keys()):
if not path_checker.check_path(path):
del results[path]
def get_target_name(path: str) -> str:
"""Extract the test target name from the given coverage path."""
return to_text(os.path.basename(path).split('=')[1])
def is_integration_coverage_file(path: str) -> bool:
"""Returns True if the coverage file came from integration tests, otherwise False."""
return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
|
2,563 |
bad message
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def METHOD_NAME(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message): self.METHOD_NAME(message)
def on_verack(self, message): self.METHOD_NAME(message)
def on_reject(self, message): self.METHOD_NAME(message)
def on_inv(self, message): self.METHOD_NAME(message)
def on_addr(self, message): self.METHOD_NAME(message)
def on_getdata(self, message): self.METHOD_NAME(message)
def on_getblocks(self, message): self.METHOD_NAME(message)
def on_tx(self, message): self.METHOD_NAME(message)
def on_block(self, message): self.METHOD_NAME(message)
def on_getaddr(self, message): self.METHOD_NAME(message)
def on_headers(self, message): self.METHOD_NAME(message)
def on_getheaders(self, message): self.METHOD_NAME(message)
def on_ping(self, message): self.METHOD_NAME(message)
def on_mempool(self, message): self.METHOD_NAME(message)
def on_pong(self, message): self.METHOD_NAME(message)
def on_sendheaders(self, message): self.METHOD_NAME(message)
def on_sendcmpct(self, message): self.METHOD_NAME(message)
def on_cmpctblock(self, message): self.METHOD_NAME(message)
def on_getblocktxn(self, message): self.METHOD_NAME(message)
def on_blocktxn(self, message): self.METHOD_NAME(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if raptoreumd ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message): pass
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False)
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
network_thread_start()
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.is_connected
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
if __name__ == '__main__':
P2PLeakTest().main()
|
2,564 |
cookies
|
"""Code to run before and after certain events during testing."""
import pymongo
import requests
from behave.model import Step
from behave.runner import Context
JSON_CONTENT_TYPE = "application/json"
def before_all(context: Context) -> None: # noqa: C901
"""Create shortcuts to send requests to the server API."""
timeout = 20 # Make timeout long enough to not time out when generating the PDF.
def METHOD_NAME() -> dict[str, str]:
"""Return the cookies."""
return {"session_id": context.session_id} if context.session_id else {}
def api_url(api: str) -> str:
"""Return the API URL."""
return f"{context.base_api_url}/{api}"
def get(api: str, headers: dict[str, str] | None = None) -> requests.Response | dict | list:
"""Get the resource."""
url = api_url(api)
for attribute in ("report_date", "min_report_date"):
if value := getattr(context, attribute):
sep = "&" if "?" in url else "?"
url += f"{sep}{attribute}={value}"
context.response = response = requests.get(url, headers=headers, METHOD_NAME=METHOD_NAME(), timeout=timeout)
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def post(api: str, json: dict | list | None = None) -> requests.Response | dict | list:
"""Post the resource."""
url = api_url(api)
response = requests.post(url, json=json, METHOD_NAME=METHOD_NAME(), timeout=timeout)
context.post_response = context.response = response
if not response.ok:
return response
if "session_id" in response.METHOD_NAME:
context.session_id = response.METHOD_NAME["session_id"]
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def put(api: str, json: dict | list | None = None) -> requests.Response | dict | list:
"""Post the resource."""
url = api_url(api)
response = requests.put(url, json=json, METHOD_NAME=METHOD_NAME(), timeout=timeout)
context.put_response = context.response = response
# Ignore non-ok responses for now since we don't have testcases where they apply
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
def delete(api: str) -> requests.Response | dict | list:
"""Delete the resource."""
context.response = response = requests.delete(api_url(api), METHOD_NAME=METHOD_NAME(), timeout=timeout)
return response.json() if response.headers.get("Content-Type") == JSON_CONTENT_TYPE else response
context.base_api_url = "http://localhost:5001/api/v3"
context.database = pymongo.MongoClient("mongodb://root:root@localhost:27017")["quality_time_db"]
context.session_id = None
context.report_date = None
context.min_report_date = None
context.response = None # Most recent respone
context.post_response = None # Most recent post response
# Create a typed local variable to prevent mypy error: Type cannot be declared in assignment to non-self attribute
uuid: dict[str, str] = {}
context.uuid = uuid # Keep track of the most recent uuid per item type
context.get = get
context.post = post
context.put = put
context.delete = delete
context.public_key = """-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEApLaktGOguW3bcC0xILmf
ToucM7eYx3oXKSKKg2aX8TNwX6qendovmUw0X6ooM+vcKEqL/h8F26RdmvIxoJLa
uK7BrqW4zDlYtLqmnsVE7rXLAFfgc+r8vxhlAvXGZIMqLd6KM/WTJu6+cxDwNJT7
TVr9Fxy6vP7CxqYrzPFcau/iNZQxvUSp8M7vHgRRsF4Ux8uQk2WqEjJ9gFYF6y/l
2MYGTjHSe2FzdzvpmPdwiSeZU42+zd9hqvjNdhc04rxNKu1xvpQthBY2d497Idkg
5380siuYrFMb46VtL3hdIoOH5934/nBVU35aXDirPcoZazMN2D3BaWULeEcvmKq1
pmUcidkMuTLeiOksl/d3GBT6dvdSVEsHG5rg9SB3XCrA3Fk3R1Dp/b9WHZko+tqx
nivGYzlaMI/gzLCiWSiL4FuJIttiqdZM2xWFTHIdpQXO3jmogV2ouYJ/IoDIyIR9
M9uddlTPkf3y6mSLwtl3tJ6eDk4EoWFKc8q8F0hza5PLQD5P8O7ddLZ5SAVEoeLP
oRo4ZewdU/XOhYKw3Jgpj1GFPwO/wxpKmYmjGR7lzG4uzae4o/3pEBi2KnSlUhC9
Fm+YDdqKwPSXu1L2DfJBISqpc2ua29O1WBQlsFo9QfSuESSRBnwvt4IbIwH5CVMJ
hv23LX3At2kFGKAPC0jM1YUCAwEAAQ==
-----END PUBLIC KEY-----
"""
def before_step(context: Context, step: Step) -> None:
"""Make the step available in the context."""
context.step = step
|
2,565 |
on 204
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway delete",
)
class Delete(AAZCommand):
"""Delete a virtual network gateway.
In order to delete a Virtual Network Gateway, you must first delete ALL Connection objects in Azure that are connected to the Gateway. After deleting the Gateway, proceed to delete other resources now not in use. For more information, follow the order of instructions on this page: https://docs.microsoft.com/azure/vpn-gateway/vpn-gateway-delete-vnet-gateway-portal
:example: Delete a virtual network gateway.
az network vnet-gateway delete -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2015-06-15",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}", "2015-06-15"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualNetworkGatewaysDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2015-06-15",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def METHOD_NAME(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
2,566 |
add f aliases
|
import inspect
import ast
import astunparse
from ast import NodeVisitor
from typing import Any, Dict, List
from dlt.common.reflection.utils import find_outer_func_def
import dlt.reflection.names as n
class PipelineScriptVisitor(NodeVisitor):
def __init__(self, source: str):
self.source = source
self.source_lines: List[str] = ast._splitlines_no_ff(source) # type: ignore
self.mod_aliases: Dict[str, str] = {}
self.func_aliases: Dict[str, str] = {}
# self.source_aliases: Dict[str, str] = {}
self.is_destination_imported: bool = False
self.known_calls: Dict[str, List[inspect.BoundArguments]] = {}
self.known_sources: Dict[str, ast.FunctionDef] = {}
self.known_source_calls: Dict[str, List[ast.Call]] = {}
self.known_resources: Dict[str, ast.FunctionDef] = {}
self.known_resource_calls: Dict[str, List[ast.Call]] = {}
# updated in post visit
self.known_sources_resources: Dict[str, ast.FunctionDef] = {}
self.known_sources_resources_calls: Dict[str, List[ast.Call]] = {}
def visit_passes(self, tree: ast.AST) -> None:
self._curr_pass = 1
self.visit(tree)
self._curr_pass = 2
self.visit(tree)
self._post_visit()
def visit_Import(self, node: ast.Import) -> Any:
if self._curr_pass == 1:
# reflect on imported modules
for alias in node.names:
# detect dlt import
if alias.name == n.DLT:
eff_name = alias.asname or alias.name
self.mod_aliases[eff_name] = alias.name
self.METHOD_NAME(eff_name)
if alias.name.startswith(f"{n.DLT}.") and alias.asname is None:
# this also imports dlt
self.mod_aliases[alias.name] = alias.name
self.METHOD_NAME(alias.name)
if alias.name.startswith(f"{n.DESTINATIONS}."):
self.is_destination_imported = True
super().generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Any:
if self._curr_pass == 1:
# reflect on pipeline functions and decorators
if node.module == n.DLT:
for alias in node.names:
if alias.name in n.DETECTED_FUNCTIONS:
self.func_aliases[alias.asname or alias.name] = alias.name
if node.module == n.DESTINATIONS:
self.is_destination_imported = True
super().generic_visit(node)
def visit_FunctionDef(self, node: ast.FunctionDef) -> Any:
if self._curr_pass == 1:
# find all sources and resources by inspecting decorators
for deco in node.decorator_list:
# decorators can be function calls, attributes or names
if isinstance(deco, (ast.Name, ast.Attribute)):
alias_name = astunparse.unparse(deco).strip()
elif isinstance(deco, ast.Call):
alias_name = astunparse.unparse(deco.func).strip()
else:
raise ValueError(self.source_segment(deco), type(deco), "Unknown decorator form")
fn = self.func_aliases.get(alias_name)
if fn == n.SOURCE:
self.known_sources[str(node.name)] = node
elif fn == n.RESOURCE:
self.known_resources[str(node.name)] = node
super().generic_visit(node)
def visit_Call(self, node: ast.Call) -> Any:
if self._curr_pass == 2:
# check if this is a call to any of known functions
alias_name = astunparse.unparse(node.func).strip()
fn = self.func_aliases.get(alias_name)
if not fn:
# try a fallback to "run" function that may be called on pipeline or source
if isinstance(node.func, ast.Attribute) and node.func.attr == n.RUN:
fn = n.RUN
if fn:
# set parent to the outer function
node.parent = find_outer_func_def(node) # type: ignore
sig = n.SIGNATURES[fn]
try:
# bind the signature where the argument values are the corresponding ast nodes
bound_args = sig.bind(*node.args, **{str(kwd.arg):kwd.value for kwd in node.keywords})
bound_args.apply_defaults()
# print(f"ALIAS: {alias_name} of {self.func_aliases.get(alias_name)} with {bound_args}")
fun_calls = self.known_calls.setdefault(fn, [])
fun_calls.append(bound_args)
except TypeError:
# skip the signature
pass
else:
# check if this is a call to any known source
if alias_name in self.known_sources or alias_name in self.known_resources:
# set parent to the outer function
node.parent = find_outer_func_def(node) # type: ignore
if alias_name in self.known_sources:
decorated_calls = self.known_source_calls.setdefault(alias_name, [])
else:
decorated_calls = self.known_resource_calls.setdefault(alias_name, [])
decorated_calls.append(node)
# visit the children
super().generic_visit(node)
def _post_visit(self) -> None:
self.known_sources_resources = self.known_sources.copy()
self.known_sources_resources.update(self.known_resources)
self.known_sources_resources_calls = self.known_source_calls.copy()
self.known_sources_resources_calls.update(self.known_resource_calls)
def source_segment(self, node: ast.AST) -> str:
# TODO: this must cache parsed source. right now the full source is tokenized on every call
return ast.get_source_segment(self.source, node)
def METHOD_NAME(self, module_name: str) -> None:
for fn in n.DETECTED_FUNCTIONS:
self.func_aliases[f"{module_name}.{fn}"] = fn
|
2,567 |
paypal
|
"""Provides data related to payment."""
import re
import string
import typing as t
from mimesis.data import CREDIT_CARD_NETWORKS
from mimesis.enums import CardType, Gender
from mimesis.exceptions import NonEnumerableError
from mimesis.locales import Locale
from mimesis.providers.base import BaseProvider
from mimesis.providers.person import Person
from mimesis.shortcuts import luhn_checksum
__all__ = ["Payment"]
class Payment(BaseProvider):
"""Class that provides data related to payments."""
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
"""Initialize attributes.
:param args: Arguments.
:param kwargs: Keyword arguments.
"""
super().__init__(*args, **kwargs)
self._person = Person(
locale=Locale.EN,
seed=self.seed,
random=self.random,
)
class Meta:
name = "payment"
def cid(self) -> str:
"""Generate a random CID.
:return: CID code.
:Example:
7452
"""
return f"{self.random.randint(1, 9999):04d}"
def METHOD_NAME(self) -> str:
"""Generate a random PayPal account.
:return: Email of PapPal user.
:Example:
[email protected]
"""
return self._person.email()
def bitcoin_address(self) -> str:
"""Generate a random bitcoin address.
Keep in mind that although it generates **valid-looking** addresses,
it does not mean that they are actually valid.
:return: Bitcoin address.
:Example:
3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX
"""
type_ = self.random.choice(["1", "3"])
characters = string.ascii_letters + string.digits
return type_ + "".join(self.random.choices(characters, k=33))
def ethereum_address(self) -> str:
"""Generate a random Ethereum address.
..note: The address will look like Ethereum address,
but keep in mind that it is not the valid address.
:return: Ethereum address.
:Example:
0xe8ece9e6ff7dba52d4c07d37418036a89af9698d
"""
bits = self.random.getrandbits(160)
address = bits.to_bytes(20, byteorder="big")
return "0x" + address.hex()
def credit_card_network(self) -> str:
"""Generate a random credit card network.
:return: Credit card network
:Example:
MasterCard
"""
return self.random.choice(CREDIT_CARD_NETWORKS)
def credit_card_number(self, card_type: t.Optional[CardType] = None) -> str:
"""Generate a random credit card number.
:param card_type: Issuing Network. Default is Visa.
:return: Credit card number.
:raises NotImplementedError: if card_type not supported.
:Example:
4455 5299 1152 2450
"""
length = 16
regex = re.compile(r"(\d{4})(\d{4})(\d{4})(\d{4})")
if card_type is None:
card_type = self.random.choice_enum_item(CardType)
if card_type == CardType.VISA:
number = self.random.randint(4000, 4999)
elif card_type == CardType.MASTER_CARD:
number = self.random.choice(
[
self.random.randint(2221, 2720),
self.random.randint(5100, 5599),
]
)
elif card_type == CardType.AMERICAN_EXPRESS:
number = self.random.choice([34, 37])
length = 15
regex = re.compile(r"(\d{4})(\d{6})(\d{5})")
else:
raise NonEnumerableError(CardType)
str_num = str(number)
while len(str_num) < length - 1:
str_num += self.random.choice(string.digits)
groups = regex.search( # type: ignore
str_num + luhn_checksum(str_num),
).groups()
card = " ".join(groups)
return card
def credit_card_expiration_date(self, minimum: int = 16, maximum: int = 25) -> str:
"""Generate a random expiration date for credit card.
:param minimum: Date of issue.
:param maximum: Maximum of expiration_date.
:return: Expiration date of credit card.
:Example:
03/19.
"""
month = self.random.randint(1, 12)
year = self.random.randint(minimum, maximum)
return f"{month:02d}/{year}"
def cvv(self) -> str:
"""Generate a random CVV.
:return: CVV code.
:Example:
069
"""
return f"{self.random.randint(1, 999):03d}"
def credit_card_owner(
self,
gender: t.Optional[Gender] = None,
) -> t.Dict[str, str]:
"""Generate credit card owner.
:param gender: Gender of credit card owner.
:type gender: Gender's enum object.
:return:
"""
owner = {
"credit_card": self.credit_card_number(),
"expiration_date": self.credit_card_expiration_date(),
"owner": self._person.full_name(gender=gender).upper(),
}
return owner
|
2,568 |
splus rule1
|
# Leo colorizer control file for splus mode.
# This file is in the public domain.
# Properties for splus mode.
properties = {
"doubleBracketIndent": "false",
"indentCloseBrackets": "}",
"indentNextLine": "\\s*(((if|while)\\s*\\(|else\\s*|else\\s+if\\s*\\(|for\\s*\\(.*\\))[^{;]*)",
"indentOpenBrackets": "{",
"lineComment": "#",
"lineUpClosingBracket": "true",
"wordBreakChars": "_,+-=<>/?^&*",
}
# Attributes dict for splus_main ruleset.
splus_main_attributes_dict = {
"default": "null",
"digit_re": "(0x[[:xdigit:]]+[lL]?|[[:digit:]]+(e[[:digit:]]*)?[lLdDfF]?)",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for splus mode.
attributesDictDict = {
"splus_main": splus_main_attributes_dict,
}
# Keywords dict for splus_main ruleset.
splus_main_keywords_dict = {
"F": "literal2",
"T": "literal2",
"break": "keyword1",
"case": "keyword1",
"continue": "keyword1",
"default": "keyword1",
"do": "keyword1",
"else": "keyword1",
"for": "keyword1",
"function": "keyword1",
"goto": "keyword1",
"if": "keyword1",
"return": "keyword1",
"sizeof": "keyword1",
"switch": "keyword1",
"while": "keyword1",
}
# Dictionary of keywords dictionaries for splus mode.
keywordsDictDict = {
"splus_main": splus_main_keywords_dict,
}
# Rules for splus_main ruleset.
def splus_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
no_line_break=True)
def METHOD_NAME(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
no_line_break=True)
def splus_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#")
def splus_rule3(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="=")
def splus_rule4(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="!")
def splus_rule5(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="_")
def splus_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">=")
def splus_rule7(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<=")
def splus_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<-")
def splus_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
def splus_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="-")
def splus_rule11(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/")
def splus_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="*")
def splus_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">")
def splus_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<")
def splus_rule15(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="%")
def splus_rule16(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="&")
def splus_rule17(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="|")
def splus_rule18(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="^")
def splus_rule19(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="~")
def splus_rule20(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="}")
def splus_rule21(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="{")
def splus_rule22(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_whitespace_end=True,
exclude_match=True)
def splus_rule23(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
exclude_match=True)
def splus_rule24(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for splus_main ruleset.
rulesDict1 = {
"!": [splus_rule4,],
"\"": [splus_rule0,],
"#": [splus_rule2,],
"%": [splus_rule15,],
"&": [splus_rule16,],
"'": [METHOD_NAME,],
"(": [splus_rule23,],
"*": [splus_rule12,],
"+": [splus_rule9,],
"-": [splus_rule10,],
"/": [splus_rule11,],
"0": [splus_rule24,],
"1": [splus_rule24,],
"2": [splus_rule24,],
"3": [splus_rule24,],
"4": [splus_rule24,],
"5": [splus_rule24,],
"6": [splus_rule24,],
"7": [splus_rule24,],
"8": [splus_rule24,],
"9": [splus_rule24,],
":": [splus_rule22,],
"<": [splus_rule7, splus_rule8, splus_rule14,],
"=": [splus_rule3,],
">": [splus_rule6, splus_rule13,],
"@": [splus_rule24,],
"A": [splus_rule24,],
"B": [splus_rule24,],
"C": [splus_rule24,],
"D": [splus_rule24,],
"E": [splus_rule24,],
"F": [splus_rule24,],
"G": [splus_rule24,],
"H": [splus_rule24,],
"I": [splus_rule24,],
"J": [splus_rule24,],
"K": [splus_rule24,],
"L": [splus_rule24,],
"M": [splus_rule24,],
"N": [splus_rule24,],
"O": [splus_rule24,],
"P": [splus_rule24,],
"Q": [splus_rule24,],
"R": [splus_rule24,],
"S": [splus_rule24,],
"T": [splus_rule24,],
"U": [splus_rule24,],
"V": [splus_rule24,],
"W": [splus_rule24,],
"X": [splus_rule24,],
"Y": [splus_rule24,],
"Z": [splus_rule24,],
"^": [splus_rule18,],
"_": [splus_rule5,],
"a": [splus_rule24,],
"b": [splus_rule24,],
"c": [splus_rule24,],
"d": [splus_rule24,],
"e": [splus_rule24,],
"f": [splus_rule24,],
"g": [splus_rule24,],
"h": [splus_rule24,],
"i": [splus_rule24,],
"j": [splus_rule24,],
"k": [splus_rule24,],
"l": [splus_rule24,],
"m": [splus_rule24,],
"n": [splus_rule24,],
"o": [splus_rule24,],
"p": [splus_rule24,],
"q": [splus_rule24,],
"r": [splus_rule24,],
"s": [splus_rule24,],
"t": [splus_rule24,],
"u": [splus_rule24,],
"v": [splus_rule24,],
"w": [splus_rule24,],
"x": [splus_rule24,],
"y": [splus_rule24,],
"z": [splus_rule24,],
"{": [splus_rule21,],
"|": [splus_rule17,],
"}": [splus_rule20,],
"~": [splus_rule19,],
}
# x.rulesDictDict for splus mode.
rulesDictDict = {
"splus_main": rulesDict1,
}
# Import dict for splus mode.
importDict = {}
|
2,569 |
remove current task
|
"""Undocumented Module"""
__all__ = ['TaskManagerPanel', 'TaskManagerWidget']
from direct.tkwidgets.AppShell import AppShell
from direct.showbase.DirectObject import DirectObject
import Pmw
import tkinter as tk
from tkinter.messagebox import askokcancel
class TaskManagerPanel(AppShell):
# Override class variables here
appname = 'TaskManager Panel'
frameWidth = 300
frameHeight = 400
usecommandarea = 0
usestatusarea = 0
def __init__(self, taskMgr, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
)
self.defineoptions(kw, optiondefs)
self.taskMgr = taskMgr
# Call superclass initialization function
AppShell.__init__(self, parent = parent)
self.initialiseoptions(TaskManagerPanel)
def createInterface(self):
# FILE MENU
# Get a handle on the file menu so commands can be inserted
# before quit item
self.taskMgrWidget = TaskManagerWidget(
self.interior(), self.taskMgr)
def onDestroy(self, event):
self.taskMgrWidget.onDestroy()
class TaskManagerWidget(DirectObject):
"""
TaskManagerWidget class: this class contains methods for creating
a panel to control taskManager tasks.
"""
def __init__(self, parent, taskMgr):
"""
TaskManagerWidget class pops up a control panel to view/delete
tasks managed by the taskManager.
"""
# Record parent (used by ok cancel dialog boxes)
self.parent = parent
# Record taskManager
self.taskMgr = taskMgr
# Init current task
self.currentTask = None
self.__taskDict = {}
# Create widgets
# Create a listbox
self.taskListBox = Pmw.ScrolledListBox(
parent,
labelpos = tk.NW, label_text = 'Tasks:',
label_font=('MSSansSerif', 10, 'bold'),
listbox_takefocus = 1,
items = [],
selectioncommand = self.setCurrentTask)
self.taskListBox.pack(expand = 1, fill = tk.BOTH)
self._popupMenu = tk.Menu(self.taskListBox.component('listbox'),
tearoff = 0)
self._popupMenu.add_command(
label = 'Remove Task',
command = self.METHOD_NAME)
self._popupMenu.add_command(
label = 'Remove Matching Tasks',
command = self.removeMatchingTasks)
# Controls Frame
controlsFrame = tk.Frame(parent)
self.removeButton = tk.Button(controlsFrame, text = 'Remove Task',
command = self.METHOD_NAME)
#self.removeButton.pack(expand = 1, fill = tk.X, side = LEFT)
self.removeButton.grid(row = 0, column = 0, sticky = tk.EW)
self.removeMatchingButton = tk.Button(controlsFrame,
text = 'Remove Matching Tasks',
command = self.removeMatchingTasks)
#self.removeMatchingButton.pack(expand = 1, fill = tk.X, side = LEFT)
self.removeMatchingButton.grid(row = 0, column = 1, sticky = tk.EW)
self.taskMgrVerbose = tk.IntVar()
self.taskMgrVerbose.set(0)
self.update = tk.Button(
controlsFrame,
text = 'Update',
command = self.updateTaskListBox)
#self.update.pack(expand = 1, fill = tk.X, side = LEFT)
self.update.grid(row = 1, column = 0, sticky = tk.EW)
self.dynamicUpdate = tk.Checkbutton(
controlsFrame,
text = 'Dynamic Update',
variable = self.taskMgrVerbose,
command = self.toggleTaskMgrVerbose)
#self.dynamicUpdate.pack(expand = 1, fill = tk.X, side = LEFT)
self.dynamicUpdate.grid(row = 1, column = 1, sticky = tk.EW)
# Pack frames
controlsFrame.pack(fill = tk.X)
controlsFrame.grid_columnconfigure(0, weight = 1)
controlsFrame.grid_columnconfigure(1, weight = 1)
# Add hook to spawnTaskEvents
self.accept('TaskManager-spawnTask', self.spawnTaskHook)
self.accept('TaskManager-removeTask', self.removeTaskHook)
# Get listbox
listbox = self.taskListBox.component('listbox')
# Bind updates to arrow buttons
listbox.bind('<KeyRelease-Up>', self.setCurrentTask)
listbox.bind('<KeyRelease-Down>', self.setCurrentTask)
listbox.bind('<ButtonPress-3>', self.popupMenu)
# And grab focus (to allow keyboard navigation)
listbox.focus_set()
# Update listbox values
self.updateTaskListBox()
def popupMenu(self, event):
"""
listbox = self.taskListBox.component('listbox')
index = listbox.nearest(event.y)
listbox.selection_clear(0)
listbox.activate(index)
self.taskListBox.select_set(index)
self.setCurrentTask()
"""
self._popupMenu.post(event.widget.winfo_pointerx(),
event.widget.winfo_pointery())
return "break"
def setCurrentTask(self, event = None):
if len(self.taskListBox.curselection()) > 0: # [gjeon] to avoid crash when nothing is selected
index = int(self.taskListBox.curselection()[0])
self.currentTask = self.__taskDict[index]
else:
self.currentTask = None
def updateTaskListBox(self):
# Get a list of task names
taskNames = []
self.__taskDict = {}
count = 0
for task in sorted(self.taskMgr.getTasks(), key=lambda t: t.getName()):
taskNames.append(task.getName())
self.__taskDict[count] = task
count += 1
if taskNames:
self.taskListBox.setlist(taskNames)
# And set current index (so keypresses will start with index 0)
self.taskListBox.component('listbox').activate(0)
# Select first item
#self.taskListBox.select_set(0) # [gjeon] commented out to avoid focus problem with other lists
self.setCurrentTask()
def toggleTaskMgrVerbose(self):
if self.taskMgrVerbose.get():
self.updateTaskListBox()
def spawnTaskHook(self, task):
if self.taskMgrVerbose.get():
self.updateTaskListBox()
def removeTaskHook(self, task):
if self.taskMgrVerbose.get():
self.updateTaskListBox()
def METHOD_NAME(self):
if self.currentTask:
name = self.currentTask.name
ok = 1
if ((name == 'dataLoop') or
(name == 'resetPrevTransform') or
(name == 'tkLoop') or
(name == 'eventManager') or
(name == 'igLoop')):
ok = askokcancel('TaskManagerControls',
'Remove: %s?' % name,
parent = self.parent,
default = 'cancel')
if ok:
self.taskMgr.remove(self.currentTask)
self.updateTaskListBox()
def removeMatchingTasks(self):
name = self.taskListBox.getcurselection()[0]
ok = 1
if ((name == 'dataLoop') or
(name == 'resetPrevTransform') or
(name == 'tkLoop') or
(name == 'eventManager') or
(name == 'igLoop')):
ok = askokcancel('TaskManagerControls',
'Remove tasks named: %s?' % name,
parent = self.parent,
default = 'cancel')
if ok:
self.taskMgr.remove(name)
self.updateTaskListBox()
def onDestroy(self):
self.ignore('TaskManager-spawnTask')
self.ignore('TaskManager-removeTask')
|
2,570 |
execute
|
"""
Abstracts the various concerns of external table creation as much as possible
This operator originally ran using airflow's bigquery hooks. However, for the
version we had to use (airflow v1.14) they used an outdated form of authentication.
Now, the pipeline aims to use bigquery's sqlalchemy client where possible.
However, it's cumbersome to convert the http api style schema fields to SQL, so
we provide a fallback for these old-style tasks.
"""
import os
import re
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
from utils import CALITP_BQ_LOCATION
from airflow.models import BaseOperator
# TODO: this should probably be an env var
def get_project_id():
return (
"cal-itp-data-infra-staging"
if os.environ["AIRFLOW_ENV"] == "development"
else "cal-itp-data-infra"
)
def format_table_name(name, is_staging=False, full_name=False):
dataset, table_name = name.split(".")
staging = "__staging" if is_staging else ""
# test_prefix = "zzz_test_" if is_development() else ""
test_prefix = ""
project_id = "cal-itp-data-infra" + "." if full_name else ""
# e.g. test_gtfs_schedule__staging.agency
return f"{project_id}{test_prefix}{dataset}.{table_name}{staging}"
def _bq_client_create_external_table(
table_name,
schema_fields,
source_objects,
source_format,
geojson=False,
hive_options=None,
bucket=None,
post_hook=None,
):
# TODO: must be fully qualified table name
ext = bigquery.ExternalConfig(source_format)
ext.source_uris = source_objects
ext.autodetect = schema_fields is None
ext.ignore_unknown_values = True
if geojson:
ext.json_extension = "GEOJSON"
if hive_options:
assert (
len(source_objects) == 1
), "cannot use hive partitioning with more than one URI"
opt = bigquery.external_config.HivePartitioningOptions()
# _Strongly_ recommend using CUSTOM mode and explicitly-defined
# key schema for more than a trivial number of files
opt.mode = hive_options.get("mode", "AUTO")
opt.require_partition_filter = hive_options.get(
"require_partition_filter", True
)
# TODO: this is very fragile, we should probably be calculating it from
# the source_objects and validating the format (prefix, trailing slashes)
prefix = hive_options["source_uri_prefix"]
if prefix and bucket:
opt.source_uri_prefix = bucket + "/" + prefix
else:
opt.source_uri_prefix = prefix
ext.hive_partitioning = opt
client = bigquery.Client(project=get_project_id(), location=CALITP_BQ_LOCATION)
dataset_name, _ = table_name.split(".")
full_dataset_name = ".".join((get_project_id(), dataset_name))
try:
client.get_dataset(full_dataset_name)
except NotFound:
print(f"Dataset {full_dataset_name} not found, creating.")
dataset = bigquery.Dataset(full_dataset_name)
dataset.location = "us-west2"
client.create_dataset(dataset, timeout=30)
# for some reason, you can set the project name in the bigquery client, and
# it doesn't need to be in the SQL code, but this bigquery API still requires
# the fully qualified table name when initializing a Table.
full_table_name = f"{get_project_id()}.{table_name}"
# First delete table if it exists
print(f"Deleting external table if exists: {full_table_name}")
client.delete_table(full_table_name, not_found_ok=True)
# (re)create table
tbl = bigquery.Table(full_table_name, schema_fields)
tbl.external_data_configuration = ext
print(
f"Creating external table: {full_table_name} {tbl} {source_objects} {hive_options}"
)
created_table = client.create_table(tbl, timeout=300, exists_ok=True)
if post_hook:
client.query(post_hook).result()
print(f"Successfully ran {post_hook}")
return created_table
class ExternalTable(BaseOperator):
template_fields = (
"bucket",
"post_hook",
)
def __init__(
self,
*args,
bucket=None,
prefix_bucket=False,
destination_project_dataset_table=None, # note that the project is optional here
skip_leading_rows=1,
schema_fields=None,
hive_options=None,
source_objects=[],
source_format="CSV",
geojson=False,
use_bq_client=False,
field_delimiter=",",
post_hook=None,
**kwargs,
):
assert bucket is not None
self.bucket = bucket
# This only exists because the prefix_bucket() template isn't working in the yml file for some reason
if self.bucket and prefix_bucket and os.environ["AIRFLOW_ENV"] == "development":
self.bucket = re.sub(r"gs://([\w-]+)", r"gs://test-\1", self.bucket)
self.destination_project_dataset_table = format_table_name(
destination_project_dataset_table
)
self.skip_leading_rows = skip_leading_rows
self.schema_fields = schema_fields
self.source_objects = source_objects
self.source_format = source_format
self.geojson = geojson
self.hive_options = hive_options
self.use_bq_client = use_bq_client
self.field_delimiter = field_delimiter
self.post_hook = post_hook
super().__init__(**kwargs)
def METHOD_NAME(self, context):
# we can't do this in the init because templating occurs in the super init call
self.source_objects = list(map(self.fix_prefix, self.source_objects))
# Basically for backwards support of tasks that have nested fields and
# were created when we were using airflow bigquery hooks.
# e.g. dags/gtfs_schedule_history/validation_report.yml
# These tables should be defined using SqlQueryOperator and raw SQL now.
if self.use_bq_client:
_bq_client_create_external_table(
self.destination_project_dataset_table,
self.schema_fields,
self.source_objects,
self.source_format,
self.geojson,
self.hive_options,
self.bucket,
self.post_hook,
)
else:
if self.hive_options:
raise RuntimeError(
"have to use the bigquery client when creating a hive partitioned table"
)
field_strings = [
f'{entry["name"]} {entry["type"]}' for entry in self.schema_fields
]
fields_spec = ",\n".join(field_strings)
options = [
f'format = "{self.source_format}"',
f"uris = {repr(self.source_objects)}",
]
if self.source_format == "CSV":
options.append(f"skip_leading_rows = {self.skip_leading_rows}")
options.append(f"field_delimiter = {repr(self.field_delimiter)}")
if self.geojson:
options.append("json_extension = 'GEOJSON'")
options_str = ",".join(options)
query = f"""
CREATE OR REPLACE EXTERNAL TABLE `{self.destination_project_dataset_table}` (
{fields_spec}
)
OPTIONS ({options_str})
"""
print(query)
client = bigquery.Client(
project=get_project_id(), location=CALITP_BQ_LOCATION
)
query_job = client.query(query)
query_job.result()
return self.schema_fields
def fix_prefix(self, entry):
entry = entry.replace("gs://", "") if entry.startswith("gs://") else entry
return f"{self.bucket}/{entry}"
|
2,571 |
test stringwrapper eq
|
import copy
import pytest
from astropy.time import Time
from sunpy.net import attr, attrs, hek
@pytest.fixture
def foostrwrap(request):
return hek.attrs._StringParamAttrWrapper("foo")
class HEKResult:
"""
Basic caching class to run the remote query once and return the result many times.
"""
def __init__(self):
self._result = None
def get_result(self):
if self._result is None:
startTime = '2011/08/09 07:23:56'
endTime = '2011/08/09 12:40:29'
eventType = 'FL'
hekTime = attrs.Time(startTime, endTime)
hekEvent = attrs.hek.EventType(eventType)
h = hek.HEKClient()
hek_query = h.search(hekTime, hekEvent)
self._result = hek_query
return copy.deepcopy(self._result)
_hek_result = HEKResult()
@pytest.fixture
def hek_result():
return _hek_result.get_result()
def test_eventtype_collide():
with pytest.raises(TypeError):
attrs.hek.AR & attrs.hek.CE
with pytest.raises(TypeError):
(attrs.hek.AR & attrs.Time((2011, 1, 1),
(2011, 1, 2))) & attrs.hek.CE
with pytest.raises(TypeError):
(attrs.hek.AR | attrs.Time((2011, 1, 1),
(2011, 1, 2))) & attrs.hek.CE
def test_eventtype_or():
assert (attrs.hek.AR | attrs.hek.CE).item == "ar,ce"
def test_HEKAttr():
res = hek.attrs.walker.create(hek.attrs.HEKAttr("foo", "=", "bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '=', 'param0': 'foo'}
def METHOD_NAME(foostrwrap):
res = hek.attrs.walker.create(foostrwrap == "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '=', 'param0': 'foo'}
def test_stringwrapper_lt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap < "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '<', 'param0': 'foo'}
def test_stringwrapper_gt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap > "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '>', 'param0': 'foo'}
def test_stringwrapper_le(foostrwrap):
res = hek.attrs.walker.create(foostrwrap <= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '<=', 'param0': 'foo'}
def test_stringwrapper_ge(foostrwrap):
res = hek.attrs.walker.create(foostrwrap >= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '>=', 'param0': 'foo'}
def test_stringwrapper_ne(foostrwrap):
res = hek.attrs.walker.create(foostrwrap != "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': '!=', 'param0': 'foo'}
def test_stringwrapper_like(foostrwrap):
res = hek.attrs.walker.create(foostrwrap.like("bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'operator0': 'like', 'param0': 'foo'}
def test_err_dummyattr_create():
with pytest.raises(TypeError):
hek.attrs.walker.create(attr.DummyAttr(), {})
def test_err_dummyattr_apply():
with pytest.raises(TypeError):
hek.attrs.walker.apply(attr.DummyAttr(), {})
@pytest.mark.remote_data
def test_hek_client(hek_result):
assert type(hek_result) == hek.hek.HEKTable
@pytest.mark.remote_data
def test_hek_empty_search_result():
startTime = '1985-05-04 00:00:00'
endTime = '1985-05-04 00:00:00'
eventType = 'FL'
hekTime = attrs.Time(startTime, endTime)
hekEvent = attrs.hek.EventType(eventType)
h = hek.HEKClient()
hek_query = h.search(hekTime, hekEvent)
assert type(hek_query) == hek.hek.HEKTable
assert len(hek_query) == 0
@pytest.mark.remote_data
def test_getitem(hek_result):
assert hek_result.__getitem__(0) == hek_result[0]
@pytest.mark.remote_data
def test_get_voevent(hek_result):
ve = hek_result[0].get_voevent()
assert len(ve['voe:VOEvent']) == 7
@pytest.mark.remote_data
def test_hek_time_col(hek_result):
assert isinstance(hek_result[0]['event_starttime'], Time)
assert isinstance(hek_result[0]['event_endtime'], Time)
@pytest.mark.remote_data
def test_vso_time(hek_result):
ve = hek_result[0].vso_time
assert type(ve) == attrs.Time
@pytest.mark.remote_data
def test_vso_instrument(hek_result):
vc = hek_result[1].vso_instrument
assert type(vc) == attrs.Instrument
@pytest.mark.remote_data
def test_HEKRow_get(hek_result):
assert hek_result[0]['event_peaktime'] == hek_result[0].get('event_peaktime')
assert hek_result[0].get('') is None
@pytest.mark.remote_data
def test_mixed_results_get():
# To check that the following bug is fixed:
# https://github.com/sunpy/sunpy/issues/3238
client = hek.HEKClient()
result = client.search(attrs.Time('2013/02/01 00:00:00', '2013/02/01 23:30:00'),
attrs.hek.FRM.Name == 'SPoCA')
assert isinstance(result, hek.hek.HEKTable)
assert len(result) == 89
assert result[0]["SOL_standard"] == 'SOL2013-01-31T20:13:31L199C128'
@pytest.mark.remote_data
def test_mixed_results_get_2():
# To check that the following bug is fixed:
# # https://github.com/sunpy/sunpy/issues/3898
client = hek.HEKClient()
result = client.search(attrs.Time('2011/08/09 07:23:56', '2011/08/09 12:40:29'),
attrs.hek.EventType("FL"))
assert isinstance(result, hek.hek.HEKTable)
assert len(result) == 19
assert result[0]["SOL_standard"] == 'SOL2011-08-08T01:30:04L247C075'
@pytest.mark.remote_data
def test_mixed_results_get_angstrom():
# To check that the following bug is fixed:
# https://github.com/sunpy/sunpy/issues/4087
client = hek.HEKClient()
tstart = '2014/10/24 20:50'
tend = '2014/10/25 00:14'
event_type = 'FL'
result = client.search(attrs.Time(tstart, tend), attrs.hek.EventType(event_type))
assert len(result) == 13
assert result[0]["SOL_standard"] == 'SOL2014-10-24T20:53:46L247C106'
@pytest.mark.remote_data
def test_query_multiple_operators():
event_type = "FL"
tstart = "2013/10/28"
tend = "2013/10/29"
client = hek.HEKClient()
results = client.search(attrs.Time(tstart, tend),
attrs.hek.EventType(event_type),
attrs.hek.FL.GOESCls > "M1.0",
attrs.hek.OBS.Observatory == "GOES")
assert len(results) == 7
|
2,572 |
get namespace authorization rule output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetNamespaceAuthorizationRuleResult',
'AwaitableGetNamespaceAuthorizationRuleResult',
'get_namespace_authorization_rule',
'get_namespace_authorization_rule_output',
]
@pulumi.output_type
class GetNamespaceAuthorizationRuleResult:
"""
Description of a namespace authorization rule.
"""
def __init__(__self__, id=None, location=None, name=None, rights=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rights and not isinstance(rights, list):
raise TypeError("Expected argument 'rights' to be a list")
pulumi.set(__self__, "rights", rights)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rights(self) -> Sequence[str]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs"
"""
return pulumi.get(self, "type")
class AwaitableGetNamespaceAuthorizationRuleResult(GetNamespaceAuthorizationRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceAuthorizationRuleResult(
id=self.id,
location=self.location,
name=self.name,
rights=self.rights,
system_data=self.system_data,
type=self.type)
def get_namespace_authorization_rule(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceAuthorizationRuleResult:
"""
Gets an authorization rule for a namespace by rule name.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20220101preview:getNamespaceAuthorizationRule', __args__, opts=opts, typ=GetNamespaceAuthorizationRuleResult).value
return AwaitableGetNamespaceAuthorizationRuleResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
rights=pulumi.get(__ret__, 'rights'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_namespace_authorization_rule)
def METHOD_NAME(authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNamespaceAuthorizationRuleResult]:
"""
Gets an authorization rule for a namespace by rule name.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
...
|
2,573 |
test interpolate
|
import lenstronomy.Util.util as util
import numpy as np
import numpy.testing as npt
import pytest
import unittest
from lenstronomy.LightModel.Profiles.shapelets import Shapelets, ShapeletSet
class TestShapeletSet(object):
"""Class to test Shapelets."""
def setup_method(self):
self.shapeletSet = ShapeletSet()
self.shapelets = Shapelets(precalc=False)
self.x, self.y = util.make_grid(10, 0.1, 1)
def test_shapelet_set(self):
"""
:return:
"""
n_max = 2
beta = 1.0
amp = [1, 0, 0, 0, 0, 0]
output = self.shapeletSet.function(
np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0
)
assert output == 0.20755374871029739
input = np.array(0.0)
input += output
output = self.shapeletSet.function(
self.x, self.y, amp, n_max, beta, center_x=0, center_y=0
)
assert output[10] == 0.47957022395315946
output = self.shapeletSet.function(
1, 1, amp, n_max, beta, center_x=0, center_y=0
)
assert output == 0.20755374871029739
n_max = -1
beta = 1.0
amp = [1, 0, 0, 0, 0, 0]
output = self.shapeletSet.function(
np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0
)
assert output == 0
beta = 1.0
amp = 1
shapelets = Shapelets(precalc=False, stable_cut=False)
output = shapelets.function(
np.array(1), np.array(1), amp, beta, 0, 0, center_x=0, center_y=0
)
npt.assert_almost_equal(0.2075537487102974, output, decimal=8)
def test_shapelet_basis(self):
num_order = 5
beta = 1
numPix = 10
kernel_list = self.shapeletSet.shapelet_basis_2d(num_order, beta, numPix)
npt.assert_almost_equal(kernel_list[0][4, 4], 0.4393912894677224, decimal=9)
def test_decomposition(self):
"""
:return:
"""
n_max = 2
beta = 10.0
deltaPix = 1
amp = np.array([1, 1, 1, 1, 1, 1])
x, y = util.make_grid(100, deltaPix, 1)
input = self.shapeletSet.function(
x, y, amp, n_max, beta, center_x=0, center_y=0
)
amp_out = self.shapeletSet.decomposition(
input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0
)
for i in range(len(amp)):
npt.assert_almost_equal(amp_out[i], amp[i], decimal=4)
def test_function_split(self):
n_max = 2
beta = 10.0
deltaPix = 0.1
amp = np.array([1, 1, 1, 1, 1, 1])
x, y = util.make_grid(10, deltaPix, 1)
function_set = self.shapeletSet.function_split(
x, y, amp, n_max, beta, center_x=0, center_y=0
)
test_flux = self.shapelets.function(
x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0
)
print(np.shape(function_set))
print(np.shape(test_flux))
assert function_set[0][10] == test_flux[10]
def METHOD_NAME(self):
shapeletsInterp = Shapelets(interpolation=True)
x, y = 0.99, 0
beta = 0.5
flux_full = self.shapelets.function(
x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0
)
flux_interp = shapeletsInterp.function(
x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0
)
npt.assert_almost_equal(flux_interp, flux_full, decimal=10)
def test_hermval(self):
x = np.linspace(0, 2000, 2001)
n_array = [1, 2, 3, 0, 1]
import numpy.polynomial.hermite as hermite
out_true = hermite.hermval(x, n_array)
out_approx = self.shapelets.hermval(x, n_array)
shape_true = out_true * np.exp(-(x**2) / 2.0)
shape_approx = out_approx * np.exp(-(x**2) / 2.0)
npt.assert_almost_equal(shape_approx, shape_true, decimal=6)
x = 2
n_array = [1, 2, 3, 0, 1]
out_true = hermite.hermval(x, n_array)
out_approx = self.shapelets.hermval(x, n_array)
npt.assert_almost_equal(out_approx, out_true, decimal=6)
x = 2001
n_array = [1, 2, 3, 0, 1]
out_true = hermite.hermval(x, n_array)
out_approx = self.shapelets.hermval(x, n_array)
shape_true = out_true * np.exp(-(x**2) / 2.0)
shape_approx = out_approx * np.exp(-(x**2) / 2.0)
npt.assert_almost_equal(shape_approx, shape_true, decimal=6)
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
shapelets = Shapelets()
shapelets.pre_calc(1, 1, beta=1, n_order=200, center_x=0, center_y=0)
if __name__ == "__main__":
pytest.main()
|
2,574 |
test basicauth revoke
|
"""Client authentication tests across all endpoints.
Client authentication in OAuth2 serve two purposes, to authenticate
confidential clients and to ensure public clients are in fact public. The
latter is achieved with authenticate_client_id and the former with
authenticate_client.
We make sure authentication is done by requiring a client object to be set
on the request object with a client_id parameter. The client_id attribute
prevents this check from being circumvented with a client form parameter.
"""
import json
from unittest import mock
from oauthlib.oauth2 import (
BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
RequestValidator, WebApplicationServer,
)
from tests.unittest import TestCase
from .test_utils import get_fragment_credentials
class ClientAuthenticationTest(TestCase):
def inspect_client(self, request, refresh_token=False):
if not request.client or not request.client.client_id:
raise ValueError()
return 'abc'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.is_pkce_required.return_value = False
self.validator.get_code_challenge.return_value = None
self.validator.get_default_redirect_uri.return_value = 'http://i.b./path'
self.web = WebApplicationServer(self.validator,
token_generator=self.inspect_client)
self.mobile = MobileApplicationServer(self.validator,
token_generator=self.inspect_client)
self.legacy = LegacyApplicationServer(self.validator,
token_generator=self.inspect_client)
self.backend = BackendApplicationServer(self.validator,
token_generator=self.inspect_client)
self.token_uri = 'http://example.com/path'
self.auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
# should be base64 but no added value in this unittest
self.basicauth_client_creds = {"Authorization": "john:doe"}
self.basicauth_client_id = {"Authorization": "john:"}
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def set_client_id(self, client_id, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def basicauth_authenticate_client(self, request):
assert "Authorization" in request.headers
assert "john:doe" in request.headers["Authorization"]
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_client_id_authentication(self):
token_uri = 'http://example.com/path'
# authorization code grant
self.validator.authenticate_client.return_value = False
self.validator.authenticate_client_id.return_value = False
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=mock')
self.assertEqual(json.loads(body)['error'], 'invalid_client')
self.validator.authenticate_client_id.return_value = True
self.validator.authenticate_client.side_effect = self.set_client
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=mock')
self.assertIn('access_token', json.loads(body))
# implicit grant
auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
self.assertRaises(ValueError, self.mobile.create_authorization_response,
auth_uri, scopes=['random'])
self.validator.validate_client_id.side_effect = self.set_client_id
h, _, s = self.mobile.create_authorization_response(auth_uri, scopes=['random'])
self.assertEqual(302, s)
self.assertIn('Location', h)
self.assertIn('access_token', get_fragment_credentials(h['Location']))
def test_basicauth_web(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.web.create_token_response(
self.token_uri,
body='grant_type=authorization_code&code=mock',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def test_basicauth_legacy(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.legacy.create_token_response(
self.token_uri,
body='grant_type=password&username=abc&password=secret',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def test_basicauth_backend(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
_, body, _ = self.backend.create_token_response(
self.token_uri,
body='grant_type=client_credentials',
headers=self.basicauth_client_creds
)
self.assertIn('access_token', json.loads(body))
def METHOD_NAME(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
# legacy or any other uses the same RevocationEndpoint
_, body, status = self.legacy.create_revocation_response(
self.token_uri,
body='token=foobar',
headers=self.basicauth_client_creds
)
self.assertEqual(status, 200, body)
def test_basicauth_introspect(self):
self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
# legacy or any other uses the same IntrospectEndpoint
_, body, status = self.legacy.create_introspect_response(
self.token_uri,
body='token=foobar',
headers=self.basicauth_client_creds
)
self.assertEqual(status, 200, body)
def test_custom_authentication(self):
token_uri = 'http://example.com/path'
# authorization code grant
self.assertRaises(NotImplementedError,
self.web.create_token_response, token_uri,
body='grant_type=authorization_code&code=mock')
# password grant
self.validator.authenticate_client.return_value = True
self.assertRaises(NotImplementedError,
self.legacy.create_token_response, token_uri,
body='grant_type=password&username=abc&password=secret')
# client credentials grant
self.validator.authenticate_client.return_value = True
self.assertRaises(NotImplementedError,
self.backend.create_token_response, token_uri,
body='grant_type=client_credentials')
|
2,575 |
test altitude agl
|
""" Unit Tests for Py-ART's io/nexrad_cdm.py module. """
import bz2
import numpy as np
import pytest
from numpy.ma.core import MaskedArray
from numpy.testing import assert_almost_equal
import pyart
###################################################
# read_nexrad_cdm tests (verify radar attributes) #
###################################################
# read in the sample file and create the radar objects
# We need to decompress the bz2 file which contains this data
with pyart.testing.InTemporaryDirectory():
tmpfile = "tmp_nexrad.nc"
with open(tmpfile, "wb") as f:
f.write(bz2.BZ2File(pyart.testing.NEXRAD_CDM_FILE).read())
radar = pyart.io.read_nexrad_cdm(tmpfile)
# time attribute
def test_time():
assert "comment" in radar.time.keys()
assert "long_name" in radar.time.keys()
assert "standard_name" in radar.time.keys()
assert "units" in radar.time.keys()
assert "calendar" in radar.time.keys()
assert "data" in radar.time.keys()
assert radar.time["units"] == "seconds since 2013-07-17T19:50:21Z"
assert radar.time["data"].shape == (7200,)
assert_almost_equal(radar.time["data"][1], 0.677, 3)
# range attribute
def test_range():
assert "long_name" in radar.range
assert "standard_name" in radar.range
assert "meters_to_center_of_first_gate" in radar.range
assert "meters_between_gates" in radar.range
assert "units" in radar.range
assert "data" in radar.range
assert "spacing_is_constant" in radar.range
assert radar.range["data"].shape == (1832,)
assert_almost_equal(radar.range["data"][1], 2375, 0)
# fields attribute is tested later
# metadata attribute
def test_metadata():
assert "instrument_name" in radar.metadata
assert "source" in radar.metadata
# scan_type attribute
def test_scan_type():
assert radar.scan_type == "ppi"
# latitude attribute
def test_latitude():
assert "data" in radar.latitude
assert "standard_name" in radar.latitude
assert "units" in radar.latitude
assert radar.latitude["data"].shape == (1,)
assert_almost_equal(radar.latitude["data"], 48, 0)
# longitude attribute
def test_longitude():
assert "data" in radar.longitude
assert "standard_name" in radar.longitude
assert "units" in radar.longitude
assert radar.longitude["data"].shape == (1,)
assert_almost_equal(radar.longitude["data"], -122, 0)
# altitude attribute
def test_altitude():
assert "data" in radar.altitude
assert "standard_name" in radar.altitude
assert "units" in radar.altitude
assert "positive" in radar.altitude
assert radar.altitude["data"].shape == (1,)
assert_almost_equal(radar.altitude["data"], 151, 0) # 10 m different
# altitude_agl attribute
def METHOD_NAME():
assert radar.altitude_agl is None
# sweep_number attribute
def test_sweep_number():
assert "standard_name" in radar.sweep_number
assert np.all(radar.sweep_number["data"] == range(16))
# sweep_mode attribute
def test_sweep_mode():
assert "standard_name" in radar.sweep_mode
assert radar.sweep_mode["data"].shape == (16,)
assert radar.sweep_mode["data"].dtype.char == "S"
assert np.all(radar.sweep_mode["data"] == [b"azimuth_surveillance"])
# fixed_angle attribute
def test_fixed_angle():
assert "standard_name" in radar.fixed_angle
assert "units" in radar.fixed_angle
assert radar.fixed_angle["data"].shape == (16,)
assert_almost_equal(radar.fixed_angle["data"][0], 0.53, 2)
# sweep_start_ray_index attribute
def test_sweep_start_ray_index():
assert "long_name" in radar.sweep_start_ray_index
assert radar.sweep_start_ray_index["data"].shape == (16,)
assert_almost_equal(radar.sweep_start_ray_index["data"][0], 0, 0)
# sweep_end_ray_index attribute
def test_sweep_end_ray_index():
assert "long_name" in radar.sweep_end_ray_index
assert radar.sweep_end_ray_index["data"].shape == (16,)
assert_almost_equal(radar.sweep_end_ray_index["data"][0], 719, 0)
# target_scan_rate attribute
def test_target_scan_rate():
assert radar.target_scan_rate is None
# azimuth attribute
def test_azimuth():
assert "standard_name" in radar.azimuth
assert "long_name" in radar.azimuth
assert "units" in radar.azimuth
assert "axis" in radar.azimuth
assert_almost_equal(radar.azimuth["data"][0], 350, 0)
assert_almost_equal(radar.azimuth["data"][10], 355, 0)
# elevation attribute
def test_elevation():
assert "standard_name" in radar.elevation
assert "long_name" in radar.azimuth
assert "units" in radar.elevation
assert "axis" in radar.elevation
assert radar.elevation["data"].shape == (7200,)
assert_almost_equal(radar.elevation["data"][0], 0.75, 2)
# scan_rate attribute
def test_scan_rate():
assert radar.scan_rate is None
# antenna_transition attribute
def test_antenna_transition():
assert radar.antenna_transition is None
# instrument_parameters attribute
def test_instument_parameters():
assert radar.instrument_parameters is None
# radar_calibration attribute
def test_radar_calibration():
assert radar.radar_calibration is None
# ngates attribute
def test_ngates():
assert radar.ngates == 1832
# nrays attribute
def test_nrays():
assert radar.nrays == 7200
# nsweeps attribute
def test_nsweeps():
assert radar.nsweeps == 16
####################
# fields attribute #
####################
fields = [
"differential_phase",
"spectrum_width",
"cross_correlation_ratio",
"reflectivity",
"differential_reflectivity",
"velocity",
]
@pytest.mark.parametrize("field", fields)
def test_field_dics(field):
description = "field : %s, dictionary" % field
check_field_dic.description = description
check_field_dic(field)
def check_field_dic(field):
"""Check that the required keys are present in a field dictionary."""
assert "standard_name" in radar.fields[field]
assert "units" in radar.fields[field]
assert "_FillValue" in radar.fields[field]
assert "coordinates" in radar.fields[field]
@pytest.mark.parametrize("field", fields)
def test_field_shapes(field):
description = "field : %s, shape" % field
check_field_shape.description = description
check_field_shape(field)
def check_field_shape(field):
assert radar.fields[field]["data"].shape == (7200, 1832)
fields = {
"differential_phase": MaskedArray,
"spectrum_width": MaskedArray,
"cross_correlation_ratio": MaskedArray,
"reflectivity": MaskedArray,
"differential_reflectivity": MaskedArray,
"velocity": MaskedArray,
}
@pytest.mark.parametrize("field, field_type", fields.items(), ids=list(fields.keys()))
def test_field_types(field, field_type):
description = "field : %s, type" % field
check_field_type.description = description
check_field_type(field, field_type)
def check_field_type(field, field_type):
assert type(radar.fields[field]["data"]) is field_type
fields = {
"differential_phase": 181.0,
"spectrum_width": np.ma.masked,
"cross_correlation_ratio": 0.0,
"reflectivity": -32.0,
"differential_reflectivity": -8.0,
"velocity": np.ma.masked,
}
@pytest.mark.parametrize("field, field_value", fields.items(), ids=list(fields.keys()))
def test_field_first_points(field, field_value):
# these values can be found using:
# [round(radar.fields[f]['data'][0,0]) for f in radar.fields]
description = "field : %s, first point" % field
check_field_first_point.description = description
check_field_first_point(field, field_value)
def check_field_first_point(field, value):
if np.ma.is_masked(value):
assert np.ma.is_masked(radar.fields[field]["data"][0, 0])
else:
assert_almost_equal(radar.fields[field]["data"][0, 0], value, 0)
|
2,576 |
create layer variables
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Toy models and input generation tools for testing trainer code."""
from lingvo import model_registry
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import metrics as metrics_lib
from lingvo.core import program
from lingvo.core import py_utils
from lingvo.core import summary_utils
import numpy as np
class CountingInputGenerator(base_input_generator.BaseInputGenerator):
"""Produces deterministic inputs for IdentityRegressionModel.
src_ids increment by 1, so a 2x2 batch would look like:
[[0, 1], [2, 3]]
and the next batch would be:
[[4, 5], [6, 7]]
Targets are the sum of the src_ids:
[1, 5]
next batch:
[9, 13]
Since `sum(src_ids) = target`, we expect that the regression model of
`target = sum(m * src_ids) + b` will learn `m = 1` and `b = 0`.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Delete('batch_size')
p.Define('batch_size', 2, 'batch size')
p.Define('shape', [2, 2], 'source shape.')
return p
def __init__(self, params):
super().__init__(params)
self.shape = params.shape
def _InputBatch(self):
length = tf.reduce_prod(self.shape)
counter = summary_utils.StatsCounter('CountingInputGenerator')
new_value = tf.cast(counter.IncBy(length), dtype=tf.int32) - length
new_value = tf.stop_gradient(new_value)
values = new_value + tf.range(length)
shaped_values = tf.reshape(tf.cast(values, dtype=tf.float32), self.shape)
targets = tf.reduce_sum(shaped_values, axis=0)
return py_utils.NestedMap(src_ids=shaped_values, tgt_ids=targets)
class IdentityRegressionTask(base_model.BaseTask):
"""A simple regression task for testing."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('weight_init_value', 0.8, 'Initial value of the model weights.')
p.name = 'identity_regression_task'
return p
def __init__(self, params):
super().__init__(params)
self.global_steps = []
self.metrics = []
self.result_per_example_tensors = []
def METHOD_NAME(self):
super().METHOD_NAME()
p = self.params
self.CreateVariable(
'm',
py_utils.WeightParams(
shape=[], init=py_utils.WeightInit.Constant(p.weight_init_value)))
self.CreateVariable(
'b',
py_utils.WeightParams(
shape=[], init=py_utils.WeightInit.Constant(p.weight_init_value)))
def ComputePredictions(self, theta, input_batch):
"""sum(m * x) + b."""
return tf.reduce_sum(theta.m * input_batch.src_ids, axis=1) + theta.b
def ComputeLoss(self, theta, predicted, input_batch):
diff = predicted - input_batch.tgt_ids
per_example_loss = diff * diff
batch_dim = py_utils.GetShape(per_example_loss)[0]
def replicate_var(name):
return tf.convert_to_tensor(
[self._private_vars[name]] * batch_dim, dtype=tf.float32)
metrics = {'loss': (tf.reduce_sum(per_example_loss), batch_dim)}
per_example_tensors = {
'input': input_batch.src_ids,
'loss': per_example_loss,
'diff': diff,
'm': replicate_var('m'),
'b': replicate_var('b'),
}
return metrics, per_example_tensors
def FilterPerExampleTensors(self, per_example):
return per_example
def ProcessFPropResults(self, sess, global_step, metrics,
per_example_tensors):
self.global_steps.append(global_step)
self.metrics.append(metrics)
self.result_per_example_tensors.append(per_example_tensors)
def CreateDecoderMetrics(self):
return {
'num_samples_in_batch': metrics_lib.AverageMetric(),
'diff': metrics_lib.AverageMetric(),
}
def DecodeWithTheta(self, theta, input_batch):
diff = self.ComputePredictions(theta, input_batch) - input_batch.tgt_ids
return {'diff': diff}
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
diff = dec_out_dict['diff']
dec_metrics_dict['diff'].Update(np.mean(diff))
dec_metrics_dict['num_samples_in_batch'].Update(len(diff))
return []
class ModelTrackingFPropResults(base_model.SingleTaskModel):
"""Simple regression model."""
def __init__(self, params, **kwargs):
super().__init__(params, **kwargs)
self.global_steps = []
self.metrics = []
self.result_per_example_tensors = []
def ProcessFPropResults(self, sess, global_step, metrics,
per_example_tensors):
self.global_steps.append(global_step)
self.metrics.append(metrics)
self.result_per_example_tensors.append(per_example_tensors)
def RegisterIdentityRegressionModel( # pylint: disable=invalid-name
batch_size=2,
weight_init_value=0.8,
optimizer=None,
learning_rate=1.0,
max_train_steps=10,
train_steps_per_loop=2,
eval_decode_steps_per_loop=2,
eval_decode_samples_per_summary=10):
"""Register an IdentityRegressionTask model with given configuration.
Args:
batch_size: batch size of CountingInputGenerator.
weight_init_value: constant init value for the model varialbes.
optimizer: if set, the optimizer params to use.
learning_rate: the learning rate to use.
max_train_steps: maximum training steps.
train_steps_per_loop: number of training steps per TPU loop.
eval_decode_steps_per_loop: number of evaluation/decode steps per TPU loop.
eval_decode_samples_per_summary: number of samples to eval/decode for each
checkpoint.
"""
class IdentityRegressionModel(base_model_params.SingleTaskModelParams):
"""Model params for IdentityRegressionTask."""
def Train(self):
return CountingInputGenerator.Params().Set(batch_size=batch_size)
def Test(self):
return CountingInputGenerator.Params().Set(batch_size=batch_size)
def Task(self):
p = IdentityRegressionTask.Params().Set(
weight_init_value=weight_init_value)
if optimizer:
p.train.optimizer = optimizer
p.train.learning_rate = learning_rate
p.train.max_steps = max_train_steps
p.train.tpu_steps_per_loop = train_steps_per_loop
p.eval.samples_per_summary = eval_decode_samples_per_summary
p.eval.decoder_samples_per_summary = eval_decode_samples_per_summary
return p
def Model(self):
return ModelTrackingFPropResults.Params(self.Task())
def ProgramSchedule(self):
p = program.SimpleProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=train_steps_per_loop,
eval_dataset_names=['Test'],
eval_steps_per_loop=eval_decode_steps_per_loop,
decode_steps_per_loop=eval_decode_steps_per_loop)
if max_train_steps == 0:
p.train_executions_per_eval = 0
return p
model_registry.RegisterSingleTaskModel(IdentityRegressionModel)
|
2,577 |
test stop scheduler many
|
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import vdsm.common.time
from vdsm import schedule
from testlib import VdsmTestCase
from testValidation import broken_on_ci
from testValidation import stresstest
from testlib import permutations, expandPermutations
@expandPermutations
class SchedulerTests(VdsmTestCase):
# Time to wait for completion, so test will not fail on overloaded
# machines. If tests fails on CI, increase this value.
GRACETIME = 0.1
MAX_TASKS = 1000
PERMUTATIONS = ((time.time,), (vdsm.common.time.monotonic_time,))
def setUp(self):
self.scheduler = None
def tearDown(self):
if self.scheduler:
self.scheduler.stop(wait=True)
@broken_on_ci("timing sensitive, may fail on overloaded machine")
@permutations(PERMUTATIONS)
def test_schedule_after(self, clock):
self.create_scheduler(clock)
delay = 0.3
task1 = Task(clock)
task2 = Task(clock)
deadline = self.clock() + delay
self.scheduler.schedule(delay, task1)
self.scheduler.schedule(delay + 1, task2)
task1.wait(delay + self.GRACETIME)
self.assertTrue(deadline <= task1.call_time)
self.assertTrue(task1.call_time < deadline + self.GRACETIME)
self.assertEqual(task2.call_time, None)
@broken_on_ci("timing sensitive, may fail on overloaded machine")
@permutations(PERMUTATIONS)
def test_schedule_before(self, clock):
self.create_scheduler(clock)
delay = 0.3
task1 = Task(clock)
task2 = Task(clock)
deadline = self.clock() + delay
self.scheduler.schedule(delay + 1, task2)
self.scheduler.schedule(delay, task1)
task1.wait(delay + self.GRACETIME)
self.assertTrue(deadline <= task1.call_time)
self.assertTrue(task1.call_time < deadline + self.GRACETIME)
self.assertEqual(task2.call_time, None)
@broken_on_ci("timing sensitive, may fail on overloaded machine")
@permutations(PERMUTATIONS)
def test_continue_after_failures(self, clock):
self.create_scheduler(clock)
self.scheduler.schedule(0.3, FailingTask())
task = Task(clock)
self.scheduler.schedule(0.4, task)
task.wait(0.4 + self.GRACETIME)
self.assertTrue(task.call_time is not None)
@permutations(PERMUTATIONS)
def test_cancel_call(self, clock):
self.create_scheduler(clock)
delay = 0.3
task = Task(clock)
call = self.scheduler.schedule(delay, task)
self.assertTrue(call.valid())
call.cancel()
self.assertFalse(call.valid())
task.wait(delay + self.GRACETIME)
self.assertEqual(task.call_time, None)
@stresstest
@permutations(PERMUTATIONS)
def test_cancel_call_many(self, clock):
self.create_scheduler(clock)
delay = 0.3
tasks = []
for i in range(self.MAX_TASKS):
task = Task(clock)
call = self.scheduler.schedule(delay, task)
tasks.append((task, call))
for task, call in tasks:
call.cancel()
last_task = tasks[-1][0]
last_task.wait(delay + self.GRACETIME)
for task, call in tasks:
self.assertEqual(task.call_time, None)
@permutations(PERMUTATIONS)
def test_stop_scheduler(self, clock):
self.create_scheduler(clock)
delay = 0.3
task = Task(clock)
self.scheduler.schedule(delay, task)
self.scheduler.stop()
task.wait(delay + self.GRACETIME)
self.assertEqual(task.call_time, None)
@stresstest
@permutations(PERMUTATIONS)
def METHOD_NAME(self, clock):
self.create_scheduler(clock)
delay = 0.3
tasks = []
for i in range(self.MAX_TASKS):
task = Task(clock)
call = self.scheduler.schedule(delay, task)
tasks.append((task, call))
self.scheduler.stop()
last_task = tasks[-1][0]
last_task.wait(delay + self.GRACETIME)
for task, call in tasks:
self.assertEqual(task.call_time, None)
@stresstest
@permutations(PERMUTATIONS)
def test_latency(self, clock):
# Test how the scheduler cope with load of 1000 calls per seconds.
# This is not the typical use but it is interesting to see how good we
# can do this. This may also reveal bad changes to the scheduler code
# that otherwise may be hidden in the noise.
self.create_scheduler(clock)
interval = 1.0
tickers = []
for i in range(self.MAX_TASKS):
ticker = Ticker(self.scheduler, interval, clock)
tickers.append(ticker)
time.sleep(10)
for ticker in tickers:
ticker.stop()
ticker.latency.sort()
min = ticker.latency[0]
avg = sum(ticker.latency) / len(ticker.latency)
med = ticker.latency[len(ticker.latency) // 2]
max = ticker.latency[-1]
print('latency - avg: %.3f min: %.3f median: %.3f max: %.3f' % (
avg, min, med, max))
# This may be too strict on overloaded machines. We may need to
# increase this value if it breaks in the CI. On my laptop I get
# avg latency 1 millisecond.
self.assertTrue(max < 0.1)
# Helpers
def create_scheduler(self, clock):
self.clock = clock
self.scheduler = schedule.Scheduler(clock=clock)
self.scheduler.start()
class Task(object):
def __init__(self, clock):
self.clock = clock
self.cond = threading.Condition(threading.Lock())
self.call_time = None
def __call__(self):
with self.cond:
self.call_time = self.clock()
self.cond.notify()
def wait(self, timeout):
with self.cond:
if self.call_time is None:
self.cond.wait(timeout)
class Ticker(object):
def __init__(self, scheduler, interval, clock):
self.scheduler = scheduler
self.interval = interval
self.clock = clock
self.latency = []
self.running = True
self.last = self.clock()
self.scheduler.schedule(self.interval, self.tick)
def stop(self):
self.running = False
def tick(self):
if self.running:
now = self.clock()
self.latency.append(now - self.last - self.interval)
self.last = now
self.scheduler.schedule(self.interval, self.tick)
class FailingTask(object):
def __call__(self):
raise Exception("This task is broken")
class TestScheduledCall(VdsmTestCase):
def setUp(self):
self.count = 0
def callback(self):
self.count += 1
def test_create(self):
call = schedule.ScheduledCall(0, self.callback)
self.assertEqual(0, self.count)
self.assertTrue(call.valid())
def test_execute(self):
call = schedule.ScheduledCall(0, self.callback)
call._execute()
self.assertEqual(1, self.count)
self.assertFalse(call.valid())
def test_execute_callback_once(self):
call = schedule.ScheduledCall(0, self.callback)
call._execute()
call._execute()
self.assertEqual(1, self.count)
def test_order(self):
now = vdsm.common.time.monotonic_time()
call_soon = schedule.ScheduledCall(now, self.callback)
call_later = schedule.ScheduledCall(now + 1, self.callback)
self.assertLess(call_soon, call_later)
|
2,578 |
run test
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thrift SAI interface ACL tests
"""
from switch import *
import sai_base_test
@group('acl')
class IPAclTest(sai_base_test.ThriftInterfaceDataPlane):
def METHOD_NAME(self):
print
print '----------------------------------------------------------------------------------------------'
print "Sending packet ptf_intf 2 -> ptf_intf 1 (192.168.0.1 ---> 10.10.10.1 [id = 105])"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
v4_enabled = 1
v6_enabled = 1
mac = ''
vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled)
rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, 1, port1, 0, v4_enabled, v6_enabled, mac)
rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, 1, port2, 0, v4_enabled, v6_enabled, mac)
addr_family = SAI_IP_ADDR_FAMILY_IPV4
ip_addr1 = '10.10.10.1'
ip_mask1 = '255.255.255.255'
dmac1 = '00:11:22:33:44:55'
sai_thrift_create_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
nhop1 = sai_thrift_create_nhop(self.client, addr_family, ip_addr1, rif_id1)
sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, rif_id1)
# send the test packet(s)
pkt = simple_tcp_packet(eth_dst=router_mac,
eth_src='00:22:22:22:22:22',
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=64)
exp_pkt = simple_tcp_packet(
eth_dst='00:11:22:33:44:55',
eth_src=router_mac,
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=63)
try:
print '#### NO ACL Applied ####'
print '#### Sending ', router_mac, '| 00:22:22:22:22:22 | 10.10.10.1 | 192.168.0.1 | @ ptf_intf 2'
send_packet(self, 2, str(pkt))
print '#### Expecting 00:11:22:33:44:55 |', router_mac, '| 10.10.10.1 | 192.168.0.1 | @ ptf_intf 1'
verify_packets(self, exp_pkt, [1])
finally:
print '----------------------------------------------------------------------------------------------'
print "Sending packet ptf_intf 2 -[acl]-> ptf_intf 1 (192.168.0.1 -[acl]-> 10.10.10.1 [id = 105])"
print 'ACL \'DROP, src 192.168.0.1/255.255.255.0, in_ports[ptf_intf_1,2]\' Applied '
# setup ACL to block based on Source IP
action = 1 #Drop
in_ports = [port1, port2]
ip_src = "192.168.0.1"
ip_src_mask = "255.255.255.0"
ip_dst = None
ip_dst_mask = None
ip_proto = None
in_port = None
out_port = None
out_ports = None
ingress_mirror_id = None
egress_mirror_id = None
acl_table_id = sai_thrift_create_acl_table(self.client,
addr_family,
ip_src,
ip_dst,
ip_proto,
in_ports,
out_ports,
in_port,
out_port)
acl_entry_id = sai_thrift_create_acl_entry(self.client, acl_table_id,
action, addr_family,
ip_src, ip_src_mask,
ip_dst, ip_dst_mask,
ip_proto,
in_ports, out_ports,
in_port, out_port,
ingress_mirror_id,
egress_mirror_id)
try:
assert acl_table_id > 0, 'acl_entry_id is <= 0'
assert acl_entry_id > 0, 'acl_entry_id is <= 0'
print '#### ACL \'DROP, src 192.168.0.1/255.255.255.0, in_ports[ptf_intf_1,2]\' Applied ####'
print '#### Sending ', router_mac, '| 00:22:22:22:22:22 | 10.10.10.1 | 192.168.0.1 | @ ptf_intf 2'
# send the same packet
send_packet(self, 2, str(pkt))
# ensure packet is dropped
# check for absence of packet here!
print '#### NOT Expecting 00:11:22:33:44:55 |', router_mac, '| 10.10.10.1 | 192.168.0.1 | @ ptf_intf 1'
verify_no_packet(self, exp_pkt, 1)
#verify_packets(self, exp_pkt, [1])
finally:
# cleanup ACL
self.client.sai_thrift_delete_acl_entry(acl_entry_id)
self.client.sai_thrift_delete_acl_table(acl_table_id)
# cleanup
sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, rif_id1)
self.client.sai_thrift_remove_next_hop(nhop1)
sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
self.client.sai_thrift_remove_router_interface(rif_id1)
self.client.sai_thrift_remove_router_interface(rif_id2)
self.client.sai_thrift_remove_virtual_router(vr_id)
|
2,579 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionProxyResult',
'AwaitableGetPrivateEndpointConnectionProxyResult',
'get_private_endpoint_connection_proxy',
'get_private_endpoint_connection_proxy_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionProxyResult:
"""
Private endpoint connection proxy details.
"""
def __init__(__self__, e_tag=None, id=None, METHOD_NAME=None, provisioning_state=None, remote_private_endpoint=None, status=None, system_data=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_private_endpoint and not isinstance(remote_private_endpoint, dict):
raise TypeError("Expected argument 'remote_private_endpoint' to be a dict")
pulumi.set(__self__, "remote_private_endpoint", remote_private_endpoint)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="eTag")
def e_tag(self) -> str:
"""
ETag from NRP.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection proxy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(METHOD_NAME="remotePrivateEndpoint")
def remote_private_endpoint(self) -> Optional['outputs.RemotePrivateEndpointResponse']:
"""
Remote private endpoint details.
"""
return pulumi.get(self, "remote_private_endpoint")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Operation status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionProxyResult(GetPrivateEndpointConnectionProxyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionProxyResult(
e_tag=self.e_tag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
remote_private_endpoint=self.remote_private_endpoint,
status=self.status,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection_proxy(account_name: Optional[str] = None,
private_endpoint_connection_proxy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionProxyResult:
"""
(INTERNAL - DO NOT USE) Get the specified private endpoint connection proxy associated with the device update account.
:param str account_name: Account name.
:param str private_endpoint_connection_proxy_id: The ID of the private endpoint connection proxy object.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['privateEndpointConnectionProxyId'] = private_endpoint_connection_proxy_id
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:deviceupdate/v20230701:getPrivateEndpointConnectionProxy', __args__, opts=opts, typ=GetPrivateEndpointConnectionProxyResult).value
return AwaitableGetPrivateEndpointConnectionProxyResult(
e_tag=pulumi.get(__ret__, 'e_tag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
remote_private_endpoint=pulumi.get(__ret__, 'remote_private_endpoint'),
status=pulumi.get(__ret__, 'status'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection_proxy)
def get_private_endpoint_connection_proxy_output(account_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_proxy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionProxyResult]:
"""
(INTERNAL - DO NOT USE) Get the specified private endpoint connection proxy associated with the device update account.
:param str account_name: Account name.
:param str private_endpoint_connection_proxy_id: The ID of the private endpoint connection proxy object.
:param str resource_group_name: The resource group name.
"""
...
|
2,580 |
factor rule8
|
# Leo colorizer control file for factor mode.
# This file is in the public domain.
# Properties for factor mode.
properties = {
"commentEnd": ")",
"commentStart": "(",
"doubleBracketIndent": "true",
"indentCloseBrackets": "]",
"indentNextLines": "^(\\*<<|:).*",
"indentOpenBrackets": "[",
"lineComment": "!",
"lineUpClosingBracket": "true",
"noWordSep": "+-*=><;.?/'",
}
# Attributes dict for factor_main ruleset.
factor_main_attributes_dict = {
"default": "null",
"digit_re": "-?\\d+([./]\\d+)?",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "+-*=><;.?/'",
}
# Attributes dict for factor_stack_effect ruleset.
factor_stack_effect_attributes_dict = {
"default": "COMMENT4",
"digit_re": "-?\\d+([./]\\d+)?",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "+-*=><;.?/'",
}
# Dictionary of attributes dictionaries for factor mode.
attributesDictDict = {
"factor_main": factor_main_attributes_dict,
"factor_stack_effect": factor_stack_effect_attributes_dict,
}
# Keywords dict for factor_main ruleset.
factor_main_keywords_dict = {
"#{": "operator",
"--": "label",
";": "markup",
"<": "label",
">": "label",
"[": "operator",
"]": "operator",
"f": "literal4",
"r": "keyword1",
"t": "literal3",
"{": "operator",
"|": "operator",
"}": "operator",
"~": "label",
}
# Keywords dict for factor_stack_effect ruleset.
factor_stack_effect_keywords_dict = {}
# Dictionary of keywords dictionaries for factor mode.
keywordsDictDict = {
"factor_main": factor_main_keywords_dict,
"factor_stack_effect": factor_stack_effect_keywords_dict,
}
# Rules for factor_main ruleset.
def factor_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="#!")
def factor_rule1(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="!")
def factor_rule2(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp=":\\s+(\\S+)")
def factor_rule3(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="IN:\\s+(\\S+)")
def factor_rule4(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="USE:\\s+(\\S+)")
def factor_rule5(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="DEFER:\\s+(\\S+)")
def factor_rule6(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="POSTPONE:\\s+(\\S+)")
def factor_rule7(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="CHAR:\\s+(\\S+)")
def METHOD_NAME(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="BIN:\\s+(\\S+)")
def factor_rule9(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="OCT:\\s+(\\S+)")
def factor_rule10(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="HEX:\\s+(\\S+)")
def factor_rule11(colorer, s, i):
return colorer.match_span(s, i, kind="comment3", begin="(", end=")",
delegate="factor::stack_effect")
def factor_rule12(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
no_line_break=True)
def factor_rule13(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for factor_main ruleset.
rulesDict1 = {
"!": [factor_rule1,],
"\"": [factor_rule12,],
"#": [factor_rule0, factor_rule13,],
"(": [factor_rule11,],
"-": [factor_rule13,],
"0": [factor_rule13,],
"1": [factor_rule13,],
"2": [factor_rule13,],
"3": [factor_rule13,],
"4": [factor_rule13,],
"5": [factor_rule13,],
"6": [factor_rule13,],
"7": [factor_rule13,],
"8": [factor_rule13,],
"9": [factor_rule13,],
":": [factor_rule2,],
";": [factor_rule13,],
"<": [factor_rule13,],
">": [factor_rule13,],
"@": [factor_rule13,],
"A": [factor_rule13,],
"B": [METHOD_NAME, factor_rule13,],
"C": [factor_rule7, factor_rule13,],
"D": [factor_rule5, factor_rule13,],
"E": [factor_rule13,],
"F": [factor_rule13,],
"G": [factor_rule13,],
"H": [factor_rule10, factor_rule13,],
"I": [factor_rule3, factor_rule13,],
"J": [factor_rule13,],
"K": [factor_rule13,],
"L": [factor_rule13,],
"M": [factor_rule13,],
"N": [factor_rule13,],
"O": [factor_rule9, factor_rule13,],
"P": [factor_rule6, factor_rule13,],
"Q": [factor_rule13,],
"R": [factor_rule13,],
"S": [factor_rule13,],
"T": [factor_rule13,],
"U": [factor_rule4, factor_rule13,],
"V": [factor_rule13,],
"W": [factor_rule13,],
"X": [factor_rule13,],
"Y": [factor_rule13,],
"Z": [factor_rule13,],
"[": [factor_rule13,],
"]": [factor_rule13,],
"a": [factor_rule13,],
"b": [factor_rule13,],
"c": [factor_rule13,],
"d": [factor_rule13,],
"e": [factor_rule13,],
"f": [factor_rule13,],
"g": [factor_rule13,],
"h": [factor_rule13,],
"i": [factor_rule13,],
"j": [factor_rule13,],
"k": [factor_rule13,],
"l": [factor_rule13,],
"m": [factor_rule13,],
"n": [factor_rule13,],
"o": [factor_rule13,],
"p": [factor_rule13,],
"q": [factor_rule13,],
"r": [factor_rule13,],
"s": [factor_rule13,],
"t": [factor_rule13,],
"u": [factor_rule13,],
"v": [factor_rule13,],
"w": [factor_rule13,],
"x": [factor_rule13,],
"y": [factor_rule13,],
"z": [factor_rule13,],
"{": [factor_rule13,],
"|": [factor_rule13,],
"}": [factor_rule13,],
"~": [factor_rule13,],
}
# Rules for factor_stack_effect ruleset.
def factor_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="comment3", seq="--")
# Rules dict for factor_stack_effect ruleset.
rulesDict2 = {
"-": [factor_rule14,],
}
# x.rulesDictDict for factor mode.
rulesDictDict = {
"factor_main": rulesDict1,
"factor_stack_effect": rulesDict2,
}
# Import dict for factor mode.
importDict = {}
|
2,581 |
get argparser ctor args
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Defines a subcommand for CodeChecker which prints version information.
"""
import argparse
import json
from typing import Dict, List, Tuple
from codechecker_report_converter import twodim
from codechecker_common import logger
from codechecker_common.output import USER_FORMATS
from codechecker_web.shared import webserver_context, version
LOG = logger.get_logger('system')
class Version:
def __init__(self):
context = webserver_context.get_context()
self.server_versions = [
f'{major}.{minor}'
for major, minor in version.SUPPORTED_VERSIONS.items()]
self.version = context.version
self.build_date = context.package_build_date
self.git_hash = context.package_git_hash
self.git_tag = context.package_git_tag
self.client_api = version.CLIENT_API
def is_release_candidate(self):
return 'rc' in self.version
def to_dict(self) -> Dict[str, str]:
""" Get version information in dictionary format. """
return {
"base_package_version": self.version,
"package_build_date": self.build_date,
"git_commit": self.git_hash,
"git_tag": self.git_tag,
"server_api_version": self.server_versions,
"client_api_version": self.client_api}
def to_list(self) -> List[Tuple[str, str]]:
""" Get version information in list format. """
server_versions = ', '.join(self.server_versions)
return [
("Base package version", self.version),
("Package build date", self.build_date),
("Git commit ID (hash)", self.git_hash),
("Git tag information", self.git_tag),
("Server supported Thrift API version", server_versions),
("Client Thrift API version", self.client_api)]
def print(self, output_format: str):
""" Print web server version information in the given format. """
if output_format == "json":
print(json.dumps(self.to_dict()))
else:
LOG.info("CodeChecker web version:")
print(twodim.to_str(
output_format, ["Kind", "Version"], self.to_list()),
flush=True)
if self.is_release_candidate():
LOG.warning("This version is only a release candidate! If you "
"encounter any problems, please submit a bug "
"report!")
def METHOD_NAME():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker webserver version',
'formatter_class': argparse.ArgumentDefaultsHelpFormatter,
# Description is shown when the command's help is queried directly
'description': "Print the version of CodeChecker server package that "
"is being used.",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Print the version of CodeChecker server package that is "
"being used."
}
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('-o', '--output',
dest='output_format',
required=False,
default='table',
choices=USER_FORMATS,
help="The format to use when printing the version.")
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main)
def main(args):
"""
Get and print the version information from the version config
file and Thrift API definition.
"""
# If the given output format is not 'table', redirect logger's output to
# the stderr.
stream = None
if 'output_format' in args and args.output_format != 'table':
stream = 'stderr'
logger.setup_logger(args.verbose if 'verbose' in args else None, stream)
Version().print(args.output_format)
|
2,582 |
lot solvent string
|
""" Utilities to determine level of theory, task type, and calculation type for Q-Chem calculations"""
from typing import Any, Dict, Optional
from emmet.core.qchem.calc_types import LevelOfTheory, CalcType, TaskType
from emmet.core.qchem.calc_types.calc_types import (
FUNCTIONALS,
BASIS_SETS,
)
__author__ = "Evan Spotte-Smith <[email protected]>"
functional_synonyms = {
"b97mv": "b97m-v",
"b97mrv": "b97m-rv",
"wb97xd": "wb97x-d",
"wb97xd3": "wb97x-d3",
"wb97xv": "wb97x-v",
"wb97mv": "wb97m-v",
}
smd_synonyms = {
"DIELECTRIC=7,230;N=1,410;ALPHA=0,000;BETA=0,859;GAMMA=36,830;PHI=0,000;PSI=0,000": "diglyme",
"DIELECTRIC=18,500;N=1,415;ALPHA=0,000;BETA=0,735;GAMMA=20,200;PHI=0,000;PSI=0,000": "3:7 EC:EMC",
}
def level_of_theory(parameters: Dict[str, Any]) -> LevelOfTheory:
"""
Returns the level of theory for a calculation,
based on the input parameters given to Q-Chem
Args:
parameters: Dict of Q-Chem input parameters
"""
funct_raw = parameters.get("rem", dict()).get("method")
basis_raw = parameters.get("rem", dict()).get("basis")
if funct_raw is None or basis_raw is None:
raise ValueError(
'Method and basis must be included in "rem" section ' "of parameters!"
)
disp_corr = parameters.get("rem", dict()).get("dft_d")
if disp_corr is None:
funct_lower = funct_raw.lower()
funct_lower = functional_synonyms.get(funct_lower, funct_lower)
else:
# Replace Q-Chem terms for D3 tails with more common expressions
disp_corr = disp_corr.replace("_bj", "(bj)").replace("_zero", "(0)")
funct_lower = f"{funct_raw}-{disp_corr}"
basis_lower = basis_raw.lower()
functional = [f for f in FUNCTIONALS if f.lower() == funct_lower]
if not functional:
raise ValueError(f"Unexpected functional {funct_lower}!")
functional = functional[0]
basis = [b for b in BASIS_SETS if b.lower() == basis_lower]
if not basis:
raise ValueError(f"Unexpected basis set {basis_lower}!")
basis = basis[0]
solvent_method = parameters["rem"].get("solvent_method", "").lower()
if solvent_method == "":
solvation = "VACUUM"
elif solvent_method in ["pcm", "cosmo"]:
solvation = "PCM"
# TODO: Add this once added into pymatgen and atomate
# elif solvent_method == "isosvp":
# if parameters.get("svp", {}).get("idefesr", 0):
# solvation = "CMIRS"
# else:
# solvation = "ISOSVP"
elif solvent_method == "smd":
solvation = "SMD"
else:
raise ValueError(f"Unexpected implicit solvent method {solvent_method}!")
lot = f"{functional}/{basis}/{solvation}"
return LevelOfTheory(lot)
def solvent(parameters: Dict[str, Any], custom_smd: Optional[str] = None) -> str:
"""
Returns the solvent used for this calculation.
Args:
parameters: Dict of Q-Chem input parameters
custom_smd: (Optional) string representing SMD parameters for a
non-standard solvent
"""
lot = level_of_theory(parameters)
solvation = lot.value.split("/")[-1]
if solvation == "PCM":
dielectric = float(parameters.get("solvent", {}).get("dielectric", 78.39))
dielectric_string = f"{dielectric:.2f}".replace(".", ",")
return f"DIELECTRIC={dielectric_string}"
# TODO: Add this once added into pymatgen and atomate
# elif solvation == "ISOSVP":
# dielectric = float(parameters.get("svp", {}).get("dielst", 78.39))
# rho = float(parameters.get("svp", {}).get("rhoiso", 0.001))
# return f"DIELECTRIC={round(dielectric, 2)},RHO={round(rho, 4)}"
# elif solvation == "CMIRS":
# dielectric = float(parameters.get("svp", {}).get("dielst", 78.39))
# rho = float(parameters.get("svp", {}).get("rhoiso", 0.001))
# a = parameters.get("pcm_nonels", {}).get("a")
# b = parameters.get("pcm_nonels", {}).get("b")
# c = parameters.get("pcm_nonels", {}).get("c")
# d = parameters.get("pcm_nonels", {}).get("d")
# solvrho = parameters.get("pcm_nonels", {}).get("solvrho")
# gamma = parameters.get("pcm_nonels", {}).get("gamma")
#
# string = f"DIELECTRIC={round(dielectric, 2)},RHO={round(rho, 4)}"
# for name, (piece, digits) in {"A": (a, 6), "B": (b, 6), "C": (c, 1), "D": (d, 3),
# "SOLVRHO": (solvrho, 2), "GAMMA": (gamma, 1)}.items():
# if piece is None:
# piecestring = "NONE"
# else:
# piecestring = f"{name}={round(float(piece), digits)}"
# string += "," + piecestring
# return string
elif solvation == "SMD":
solvent = parameters.get("smx", {}).get("solvent", "water")
if solvent == "other":
if custom_smd is None:
raise ValueError(
"SMD calculation with solvent=other requires custom_smd!"
)
names = ["DIELECTRIC", "N", "ALPHA", "BETA", "GAMMA", "PHI", "PSI"]
numbers = [float(x) for x in custom_smd.split(",")]
string = ""
for name, number in zip(names, numbers):
string += f"{name}={number:.3f};"
return string.rstrip(",").rstrip(";").replace(".", ",")
else:
return f"SOLVENT={solvent.upper()}"
else:
return "NONE"
def METHOD_NAME(
parameters: Dict[str, Any], custom_smd: Optional[str] = None
) -> str:
"""
Returns a string representation of the level of theory and solvent used for this calculation.
Args:
parameters: Dict of Q-Chem input parameters
custom_smd: (Optional) string representing SMD parameters for a
non-standard solvent
"""
lot = level_of_theory(parameters).value
solv = solvent(parameters, custom_smd=custom_smd)
return f"{lot}({solv})"
def task_type(orig: Dict[str, Any], special_run_type: Optional[str] = None) -> TaskType:
if special_run_type == "frequency_flattener":
return TaskType("Frequency Flattening Geometry Optimization")
elif special_run_type == "ts_frequency_flattener":
return TaskType("Frequency Flattening Transition State Geometry Optimization")
if orig["rem"].get("job_type") == "sp":
return TaskType("Single Point")
elif orig["rem"].get("job_type") == "force":
return TaskType("Force")
elif orig["rem"].get("job_type") == "opt":
return TaskType("Geometry Optimization")
elif orig["rem"].get("job_type") == "ts":
return TaskType("Transition State Geometry Optimization")
elif orig["rem"].get("job_type") == "freq":
return TaskType("Frequency Analysis")
return TaskType("Unknown")
def calc_type(special_run_type: str, orig: Dict[str, Any]) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = level_of_theory(orig).value
tt = task_type(orig, special_run_type=special_run_type).value
return CalcType(f"{rt} {tt}")
|
2,583 |
predicate
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, Tuple, Type, Union
import numpy as np
from cirq import ops, protocols, value
if TYPE_CHECKING:
import cirq
# Tag for gates to which noise must be applied.
PHYSICAL_GATE_TAG = 'physical_gate'
@value.value_equality(distinct_child_types=True)
class OpIdentifier:
"""Identifies an operation by gate and (optionally) target qubits."""
def __init__(self, gate_type: Type['cirq.Gate'], *qubits: 'cirq.Qid'):
self._gate_type = gate_type
self._gate_family = ops.GateFamily(gate_type)
self._qubits: Tuple['cirq.Qid', ...] = tuple(qubits)
@property
def gate_type(self) -> Type['cirq.Gate']:
# set to a type during initialization, never modified
return self._gate_type
@property
def qubits(self) -> Tuple['cirq.Qid', ...]:
return self._qubits
def METHOD_NAME(self, *args, **kwargs):
return self._gate_family.METHOD_NAME(*args, **kwargs)
def is_proper_subtype_of(self, op_id: 'OpIdentifier'):
"""Returns true if this is contained within op_id, but not equal to it.
If this returns true, (x in self) implies (x in op_id), but the reverse
implication does not hold. op_id must be more general than self (either
by accepting any qubits or having a more general gate type) for this
to return true.
"""
more_specific_qubits = self.qubits and not op_id.qubits
more_specific_gate = self.gate_type != op_id.gate_type and issubclass(
self.gate_type, op_id.gate_type
)
if more_specific_qubits:
return more_specific_gate or self.gate_type == op_id.gate_type
elif more_specific_gate:
return more_specific_qubits or self.qubits == op_id.qubits
else:
return False
def __contains__(self, item: Union[ops.Gate, ops.Operation]) -> bool:
if isinstance(item, ops.Gate):
return (not self._qubits) and self.METHOD_NAME(item)
return (
(not self.qubits or (item.qubits == self._qubits))
and item.gate is not None
and self.METHOD_NAME(item.gate)
)
def __str__(self):
return f'{self.gate_type}{self.qubits}'
def __repr__(self) -> str:
fullname = f'{self.gate_type.__module__}.{self.gate_type.__qualname__}'
qubits = ', '.join(map(repr, self.qubits))
return f'cirq.devices.noise_utils.OpIdentifier({fullname}, {qubits})'
def _value_equality_values_(self) -> Any:
return (self.gate_type, self.qubits)
def _json_dict_(self) -> Dict[str, Any]:
gate_json = protocols.json_cirq_type(self._gate_type)
return {'gate_type': gate_json, 'qubits': self._qubits}
@classmethod
def _from_json_dict_(cls, gate_type, qubits, **kwargs) -> 'OpIdentifier':
gate_type = protocols.cirq_type_from_json(gate_type)
return cls(gate_type, *qubits)
# TODO: expose all from top-level cirq?
def decay_constant_to_xeb_fidelity(decay_constant: float, num_qubits: int = 2) -> float:
"""Calculates the XEB fidelity from the depolarization decay constant.
Args:
decay_constant: Depolarization decay constant.
num_qubits: Number of qubits.
Returns:
Calculated XEB fidelity.
"""
N = 2**num_qubits
return 1 - ((1 - decay_constant) * (1 - 1 / N))
def decay_constant_to_pauli_error(decay_constant: float, num_qubits: int = 1) -> float:
"""Calculates pauli error from the depolarization decay constant.
Args:
decay_constant: Depolarization decay constant.
num_qubits: Number of qubits.
Returns:
Calculated Pauli error.
"""
N = 2**num_qubits
return (1 - decay_constant) * (1 - 1 / N / N)
def pauli_error_to_decay_constant(pauli_error: float, num_qubits: int = 1) -> float:
"""Calculates depolarization decay constant from pauli error.
Args:
pauli_error: The pauli error.
num_qubits: Number of qubits.
Returns:
Calculated depolarization decay constant.
"""
N = 2**num_qubits
return 1 - (pauli_error / (1 - 1 / N / N))
def xeb_fidelity_to_decay_constant(xeb_fidelity: float, num_qubits: int = 2) -> float:
"""Calculates the depolarization decay constant from XEB fidelity.
Args:
xeb_fidelity: The XEB fidelity.
num_qubits: Number of qubits.
Returns:
Calculated depolarization decay constant.
"""
N = 2**num_qubits
return 1 - (1 - xeb_fidelity) / (1 - 1 / N)
def pauli_error_from_t1(t_ns: float, t1_ns: float) -> float:
"""Calculates the pauli error from T1 decay constant.
This computes error for a specific duration, `t`.
Args:
t_ns: The duration of the gate in ns.
t1_ns: The T1 decay constant in ns.
Returns:
Calculated Pauli error resulting from T1 decay.
"""
t2 = 2 * t1_ns
return (1 - np.exp(-t_ns / t2)) / 2 + (1 - np.exp(-t_ns / t1_ns)) / 4
def average_error(decay_constant: float, num_qubits: int = 1) -> float:
"""Calculates the average error from the depolarization decay constant.
Args:
decay_constant: Depolarization decay constant.
num_qubits: Number of qubits.
Returns:
Calculated average error.
"""
N = 2**num_qubits
return (1 - decay_constant) * (1 - 1 / N)
def decoherence_pauli_error(t1_ns: float, tphi_ns: float, gate_time_ns: float) -> float:
"""The component of Pauli error caused by decoherence on a single qubit.
Args:
t1_ns: T1 time in nanoseconds.
tphi_ns: Tphi time in nanoseconds.
gate_time_ns: Duration in nanoseconds of the gate affected by this error.
Returns:
Calculated Pauli error resulting from decoherence.
"""
gamma_2 = (1 / (2 * t1_ns)) + 1 / tphi_ns
exp1 = np.exp(-gate_time_ns / t1_ns)
exp2 = np.exp(-gate_time_ns * gamma_2)
px = 0.25 * (1 - exp1)
py = px
pz = 0.5 * (1 - exp2) - px
return px + py + pz
|
2,584 |
execute
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2022 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <[email protected]>
#
import unittest
import unittest.mock
import click.testing
from sortinghat.cli.client import SortingHatClientError
from sortinghat.cli.cmds.add import add
ADD_CMD_OP = """mutation {{
addIdentity(
source: "{}"
email: "{}"
name: "{}"
username: "{}"
) {{
uuid
}}
}}"""
ADD_PARTIAL_CMD_OP = """mutation {{
addIdentity(source: "{}", email: "{}") {{
uuid
}}
}}"""
ADD_UUID_CMD_OP = """mutation {{
addIdentity(source: "{}", email: "{}", uuid: "{}") {{
uuid
}}
}}"""
ADD_OUTPUT = (
"New identity eda9f62ad321b1fbe5f283cc05e2484516203117 "
"added to eda9f62ad321b1fbe5f283cc05e2484516203117\n"
)
ADD_PARTIAL_OUTPUT = (
"New identity 322397ed782a798ffd9d0bc7e293df4292fe075d "
"added to 322397ed782a798ffd9d0bc7e293df4292fe075d\n"
)
ADD_UUID_OUTPUT = (
"New identity ffefc2e3f2a255e9450ac9e2d36f37c28f51bd73 "
"added to a9b403e150dd4af8953a52a4bb841051e4b705d9\n"
)
ADD_UTF8_OUTPUT = (
"New identity 843fcc3383ddfd6179bef87996fa761d88a43915 "
"added to 843fcc3383ddfd6179bef87996fa761d88a43915\n"
)
ADD_NOT_FOUND_ERROR = (
"FFFFFFFFFFFFFFF not found in the registry"
)
class MockClient:
"""Mock client"""
def __init__(self, responses):
self.responses = responses
self.ops = []
def connect(self):
pass
def disconnect(self):
pass
def METHOD_NAME(self, operation):
self.ops.append(operation)
response = self.responses.pop(0)
if isinstance(response, SortingHatClientError):
raise response
else:
return response
class TestAddCommand(unittest.TestCase):
"""Add command unit tests"""
@unittest.mock.patch('sortinghat.cli.utils.SortingHatClient')
def test_add(self, mock_client):
"""Check if it adds a new identity"""
responses = [
{'data': {'addIdentity': {'uuid': 'eda9f62ad321b1fbe5f283cc05e2484516203117'}}},
]
client = MockClient(responses)
mock_client.return_value = client
runner = click.testing.CliRunner()
# Create a new identity
params = [
'--source', 'scm',
'--email', '[email protected]',
'--name', 'Jane Roe',
'--username', 'jrae'
]
result = runner.invoke(add, params)
expected = ADD_CMD_OP.format('scm', '[email protected]', 'Jane Roe', 'jrae')
self.assertEqual(len(client.ops), 1)
self.assertEqual(str(client.ops[0]), expected)
self.assertEqual(result.stdout, ADD_OUTPUT)
self.assertEqual(result.exit_code, 0)
@unittest.mock.patch('sortinghat.cli.utils.SortingHatClient')
def test_add_partial_data(self, mock_client):
"""Check if it adds a new identity giving partial data"""
responses = [
{'data': {'addIdentity': {'uuid': '322397ed782a798ffd9d0bc7e293df4292fe075d'}}},
]
client = MockClient(responses)
mock_client.return_value = client
runner = click.testing.CliRunner()
# Create a new identity setting partial data
params = [
'--source', 'scm',
'--email', '[email protected]'
]
result = runner.invoke(add, params)
expected = ADD_PARTIAL_CMD_OP.format('scm', '[email protected]')
self.assertEqual(len(client.ops), 1)
self.assertEqual(str(client.ops[0]), expected)
self.assertEqual(result.stdout, ADD_PARTIAL_OUTPUT)
self.assertEqual(result.exit_code, 0)
@unittest.mock.patch('sortinghat.cli.utils.SortingHatClient')
def test_add_with_uuid(self, mock_client):
"""Check if it adds a new identity to an existing one"""
responses = [
{'data': {'addIdentity': {'uuid': 'ffefc2e3f2a255e9450ac9e2d36f37c28f51bd73'}}}
]
client = MockClient(responses)
mock_client.return_value = client
runner = click.testing.CliRunner()
# Assign to John Smith - a9b403e150dd4af8953a52a4bb841051e4b705d9
# individual
params = [
'--source', 'mls',
'--email', '[email protected]',
'--uuid', 'a9b403e150dd4af8953a52a4bb841051e4b705d9'
]
result = runner.invoke(add, params)
expected = ADD_UUID_CMD_OP.format('mls', '[email protected]',
'a9b403e150dd4af8953a52a4bb841051e4b705d9')
self.assertEqual(len(client.ops), 1)
self.assertEqual(str(client.ops[0]), expected)
self.assertEqual(result.stdout, ADD_UUID_OUTPUT)
self.assertEqual(result.exit_code, 0)
@unittest.mock.patch('sortinghat.cli.utils.SortingHatClient')
def test_add_with_utf8_4bytes(self, mock_client):
"""Check if it adds a new identity with utf-8 of 4 bytes"""
responses = [
{'data': {'addIdentity': {'uuid': '843fcc3383ddfd6179bef87996fa761d88a43915'}}}
]
client = MockClient(responses)
mock_client.return_value = client
runner = click.testing.CliRunner()
params = [
'--source', 'scm',
'--name', '😂',
'--email', '😂',
'--username', '😂'
]
result = runner.invoke(add, params)
expected = ADD_CMD_OP.format('scm', '😂', '😂', '😂',
'843fcc3383ddfd6179bef87996fa761d88a43915')
self.assertEqual(len(client.ops), 1)
self.assertEqual(str(client.ops[0]), expected)
self.assertEqual(result.stdout, ADD_UTF8_OUTPUT)
self.assertEqual(result.exit_code, 0)
@unittest.mock.patch('sortinghat.cli.utils.SortingHatClient')
def test_error(self, mock_client):
""""Check if it fails when an error is sent by the server"""
error = {
'message': ADD_NOT_FOUND_ERROR,
'extensions': {
'code': 9
}
}
responses = [
SortingHatClientError(error['message'], errors=[error])
]
client = MockClient(responses)
mock_client.return_value = client
runner = click.testing.CliRunner(mix_stderr=False)
params = [
'--source', 'scm',
'--email', '[email protected]',
'--uuid', 'FFFFFFFFFFFFFFF'
]
result = runner.invoke(add, params, obj=mock_client)
expected = ADD_UUID_CMD_OP.format('scm', '[email protected]',
'FFFFFFFFFFFFFFF')
self.assertEqual(len(client.ops), 1)
self.assertEqual(str(client.ops[0]), expected)
expected_err = "Error: " + ADD_NOT_FOUND_ERROR + '\n'
self.assertEqual(result.stderr, expected_err)
self.assertEqual(result.exit_code, 9)
if __name__ == '__main__':
unittest.main()
|
2,585 |
convert to text
|
from collections import OrderedDict, defaultdict
from typing import List
from deeplake.util.hash import hash_str_to_int32
from deeplake.util.exceptions import EmptyTensorError
from deeplake.client.log import logger
import numpy as np
import deeplake
def convert_to_idx(samples, class_names: List[str]):
class_idx = {class_names[i]: i for i in range(len(class_names))}
def convert(samples):
idxs = []
additions = []
for sample in samples:
if isinstance(sample, np.ndarray):
sample = sample.tolist()
if isinstance(sample, str):
idx = class_idx.get(sample)
if idx is None:
idx = len(class_idx)
class_idx[sample] = idx
additions.append((sample, idx))
idxs.append(idx)
elif isinstance(sample, list):
idxs_, additions_ = convert(sample)
idxs.append(idxs_)
additions.extend(additions_)
else:
idxs.append(sample)
return idxs, additions
return convert(samples)
def convert_to_hash(samples, hash_label_map):
if isinstance(samples, np.ndarray):
samples = samples.tolist()
if isinstance(samples, list):
return [convert_to_hash(sample, hash_label_map) for sample in samples]
else:
if isinstance(samples, str):
hash_ = hash_str_to_int32(samples)
hash_label_map[hash_] = samples
else:
hash_ = samples
return hash_
def convert_hash_to_idx(hashes, hash_idx_map):
if isinstance(hashes, list):
return [convert_hash_to_idx(hash, hash_idx_map) for hash in hashes]
else:
try:
return hash_idx_map[hashes]
except KeyError:
return hashes
def METHOD_NAME(inp, class_names: List[str], return_original=False):
if isinstance(inp, np.integer):
idx = int(inp)
if idx < len(class_names):
return class_names[idx]
return idx if return_original else None
return [METHOD_NAME(item, class_names) for item in inp]
def sync_labels(
ds, label_temp_tensors, hash_label_maps, num_workers, scheduler, verbose=True
):
ds = ds.root
hl_maps = defaultdict(OrderedDict)
for map in hash_label_maps:
for tensor in map:
hl_maps[tensor].update(map[tensor])
hash_label_maps = hl_maps
@deeplake.compute
def class_label_sync(
hash_tensor_sample,
samples_out,
label_tensor: str,
hash_idx_map,
):
try:
hashes = hash_tensor_sample.numpy().tolist()
idxs = convert_hash_to_idx(hashes, hash_idx_map)
except EmptyTensorError:
idxs = None
samples_out[label_tensor].append(idxs)
for tensor, temp_tensor in label_temp_tensors.items():
if len(ds[temp_tensor]) == 0:
ds.delete_tensor(temp_tensor, large_ok=True)
else:
try:
target_tensor = ds[tensor]
hash_label_map = hash_label_maps[temp_tensor]
class_names = target_tensor.info.class_names
new_labels = [
label
for label in hash_label_map.values()
if label not in class_names
]
if verbose:
N = len(class_names)
for i in range(len(new_labels)):
logger.info(
f"'{new_labels[i]}' added to {tensor}.info.class_names at index {N + i}"
)
class_names.extend(new_labels)
label_idx_map = {class_names[i]: i for i in range(len(class_names))}
hash_idx_map = {
hash: label_idx_map[hash_label_map[hash]] for hash in hash_label_map
}
target_tensor.info.is_dirty = True
target_tensor.meta._disable_temp_transform = True
target_tensor.meta.is_dirty = True
logger.info("Synchronizing class labels...")
class_label_sync(label_tensor=tensor, hash_idx_map=hash_idx_map).eval(
ds[temp_tensor],
ds,
progressbar=True,
check_lengths=False,
skip_ok=True,
)
target_tensor.meta._disable_temp_transform = False
finally:
ds.delete_tensor(temp_tensor, large_ok=True)
|
2,586 |
is applied
|
# Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Callable
from ndspy.rom import NintendoDSRom
from range_typed_integers import u32_checked
from skytemple_files.common.ppmdu_config.data import (
GAME_REGION_EU,
GAME_REGION_US,
GAME_VERSION_EOS,
Pmd2Data,
)
from skytemple_files.common.util import (
create_file_in_rom,
write_u32,
read_u32,
)
from skytemple_files.common.i18n_util import _
from skytemple_files.hardcoded.dungeons import HardcodedDungeons
from skytemple_files.patch.category import PatchCategory
from skytemple_files.patch.handler.abstract import AbstractPatchHandler
PATCH_CHECK_ADDR_APPLIED_US = 0x4F7F8
PATCH_CHECK_INSTR_APPLIED_US = 0x359F1010
PATCH_CHECK_ADDR_APPLIED_EU = 0x4FB30
PATCH_CHECK_INSTR_APPLIED_EU = 0x359F1010
FLOOR_FORBID_TABLE_US = 0x9F714
ITEM_AVAILABLE_TABLE_US = 0x94D34
FLOOR_RANKS_TABLE_US = 0xA0AD4
FLOOR_FORBID_TABLE_EU = 0x9FC98
ITEM_AVAILABLE_TABLE_EU = 0x95130
FLOOR_RANKS_TABLE_EU = 0xA1058
NB_ITEMS_TABLE = 100
AVAILABLE_ITEMS_NB = 1024
ARM9_START = 0x02000000
FLOOR_FORBID_PATH = "BALANCE/fforbid.bin"
FLOOR_RANKS_PATH = "BALANCE/f_ranks.bin"
AVAILABLE_ITEMS_PATH = "BALANCE/a_items.bin"
# TODO: move this somewhere else
FLOORS_NB = [
3,
5,
6,
10,
8,
12,
9,
5,
14,
5,
11,
5,
16,
20,
15,
21,
11,
14,
8,
15,
15,
8,
12,
20,
15,
24,
24,
14,
25,
25,
20,
18,
50,
20,
23,
30,
18,
30,
20,
8,
13,
20,
10,
15,
20,
20,
30,
6,
5,
10,
5,
50,
20,
99,
30,
19,
19,
17,
25,
75,
40,
40,
99,
1,
50,
99,
10,
5,
15,
20,
25,
30,
40,
17,
7,
10,
15,
11,
16,
20,
8,
10,
15,
10,
18,
10,
11,
5,
5,
11,
19,
16,
5,
6,
7,
6,
5,
5,
5,
5,
]
class ExtractDungeonDataPatchHandler(AbstractPatchHandler):
@property
def name(self) -> str:
return "ExtractDungeonData"
@property
def description(self) -> str:
return _(
"Extracts the floor ranks, forbidden mission floors, items available in dungeon tables and put them in files. Provides support for reading them from the rom file system."
)
@property
def author(self) -> str:
return "Anonymous"
@property
def version(self) -> str:
return "0.0.1"
@property
def category(self) -> PatchCategory:
return PatchCategory.UTILITY
def METHOD_NAME(self, rom: NintendoDSRom, config: Pmd2Data) -> bool:
if config.game_version == GAME_VERSION_EOS:
if config.game_region == GAME_REGION_US:
return (
read_u32(rom.arm9, PATCH_CHECK_ADDR_APPLIED_US)
!= PATCH_CHECK_INSTR_APPLIED_US
)
if config.game_region == GAME_REGION_EU:
return (
read_u32(rom.arm9, PATCH_CHECK_ADDR_APPLIED_EU)
!= PATCH_CHECK_INSTR_APPLIED_EU
)
raise NotImplementedError()
def apply(
self, apply: Callable[[], None], rom: NintendoDSRom, config: Pmd2Data
) -> None:
if not self.METHOD_NAME(rom, config):
if config.game_version == GAME_VERSION_EOS:
if config.game_region == GAME_REGION_US:
rank_table = FLOOR_RANKS_TABLE_US
item_table = ITEM_AVAILABLE_TABLE_US
forbid_table = FLOOR_FORBID_TABLE_US
if config.game_region == GAME_REGION_EU:
rank_table = FLOOR_RANKS_TABLE_EU
item_table = ITEM_AVAILABLE_TABLE_EU
forbid_table = FLOOR_FORBID_TABLE_EU
header = bytearray(NB_ITEMS_TABLE * 4)
rank_data = bytearray(0)
forbid_data = bytearray(0)
current_ptr = u32_checked(len(header))
for i in range(NB_ITEMS_TABLE):
start = read_u32(rom.arm9, rank_table + i * 4) - ARM9_START
end = start + 1 + FLOORS_NB[i]
if end % 4 != 0:
end += 4 - (end % 4)
rdata = rom.arm9[start:end]
fdata = bytearray(len(rdata))
x = forbid_table
while rom.arm9[x] != 0x64:
if rom.arm9[x] == i:
fdata[rom.arm9[x + 1]] = 1
x += 2
rom.arm9 = (
rom.arm9[:start] + bytes([0xCC] * (end - start)) + rom.arm9[end:]
)
write_u32(header, current_ptr, i * 4)
rank_data += bytearray(rdata)
forbid_data += bytearray(fdata)
current_ptr += end - start # type: ignore
file_data = header + rank_data
if FLOOR_RANKS_PATH not in rom.filenames:
create_file_in_rom(rom, FLOOR_RANKS_PATH, file_data)
else:
rom.setFileByName(FLOOR_RANKS_PATH, file_data)
file_data = header + forbid_data
if FLOOR_FORBID_PATH not in rom.filenames:
create_file_in_rom(rom, FLOOR_FORBID_PATH, file_data)
else:
rom.setFileByName(FLOOR_FORBID_PATH, file_data)
dungeon_list = HardcodedDungeons.get_dungeon_list(rom.arm9, config)
groups = [d.mappa_index for d in dungeon_list]
print(hex(len(groups)))
list_available = []
for x in range(AVAILABLE_ITEMS_NB):
list_available.append(bytearray(0x100 // 8))
for i, g in enumerate(groups):
off = item_table + g * (AVAILABLE_ITEMS_NB // 8) + (x // 8)
if rom.arm9[off] & (1 << (x % 8)):
list_available[-1][i // 8] |= 1 << (i % 8)
file_data = bytearray().join(list_available)
if AVAILABLE_ITEMS_PATH not in rom.filenames:
create_file_in_rom(rom, AVAILABLE_ITEMS_PATH, file_data)
else:
rom.setFileByName(AVAILABLE_ITEMS_PATH, file_data)
try:
apply()
except RuntimeError as ex:
raise ex
def unapply(
self, unapply: Callable[[], None], rom: NintendoDSRom, config: Pmd2Data
) -> None:
raise NotImplementedError()
|
2,587 |
run
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.resolution
from iotbx import reflection_file_reader
from cctbx import maptbx
from cctbx.array_family import flex
from libtbx.utils import Sorry
import sys, math, time
from scitbx import regular_grid_on_unit_sphere
import six
from six.moves import range
def one_d_image_along_axis(n, step, uc_length):
rho = flex.double()
dist = flex.double()
r = 0
while r < uc_length/2:
rho_ = 0
for n_key, n_value in six.iteritems(n):
rho_ += n_value*math.cos(2*math.pi*r*n_key/uc_length)
dist.append(r)
rho.append(rho_)
r+=step
return dist, rho
def second_derivatives(rho, delta):
rho_2nd = flex.double()
for i in range(rho.size()):
if(i>=1 and i<rho.size()-1): tau = (rho[i+1]+rho[i-1]-2*rho[i])/delta**2
elif(i==0): tau = (rho[i+1]+rho[i+1]-2*rho[i])/delta**2
else: tau = (rho[i+0]+rho[i-1]-2*rho[i])/delta**2
rho_2nd.append(tau)
result = flex.double()
for i in range(rho_2nd.size()):
rho_ave = 0
span = [-2,-1,0,1,2]
#span = [-5,-4,-3,-2,-1,0,1,2, 3,4,5]
for j in span:
ij = i+j
if(ij<0): ij=0
if(ij>=rho_2nd.size()): ij=rho_2nd.size()-1
rho_ave += rho_2nd[ij]
rho_ave = rho_ave/len(span)
result.append(rho_ave)
return result
def compute_d_eff(r, rho_2nd):
v0 = rho_2nd[0]
for i in range(rho_2nd.size()):
if(v0*rho_2nd[i]<0):
return r[i]*2.5 # formulas (9)-(10)
return None
def compute(miller_array, step_scale=0.0005):
miller_array.show_comprehensive_summary(prefix=" ")
step = miller_array.d_min()*step_scale
#
ma_p1 = miller_array.expand_to_p1()
#
n_h = {}
n_k = {}
n_l = {}
indices = ma_p1.indices()
for ind in indices:
h,k,l = ind
n_h.setdefault(h, flex.int()).append(1)
n_k.setdefault(k, flex.int()).append(1)
n_l.setdefault(l, flex.int()).append(1)
def count(d):
for k in d.keys():
d[k] = d[k].size()
return d
n_h = count(n_h)
n_k = count(n_k)
n_l = count(n_l)
# resolutions along axes
a,b,c = miller_array.unit_cell().parameters()[:3]
x, rho_x = one_d_image_along_axis(n=n_h, step=step, uc_length=a)
y, rho_y = one_d_image_along_axis(n=n_k, step=step, uc_length=b)
z, rho_z = one_d_image_along_axis(n=n_l, step=step, uc_length=c)
# 2nd derivatives
r2x = second_derivatives(rho=rho_x, delta=step)
r2y = second_derivatives(rho=rho_y, delta=step)
r2z = second_derivatives(rho=rho_z, delta=step)
# effective resolution along axes
d_eff_a = compute_d_eff(r=x, rho_2nd=r2x)
d_eff_b = compute_d_eff(r=y, rho_2nd=r2y)
d_eff_c = compute_d_eff(r=z, rho_2nd=r2z)
print(" Effective resolution along axes a,b,c: %6.3f %6.3f %6.3f"%(
d_eff_a, d_eff_b, d_eff_c))
# all directions
l = 0.8 * min(d_eff_a/2.5, d_eff_b/2.5, d_eff_c/2.5)
r = 1.2 * max(d_eff_a/2.5, d_eff_b/2.5, d_eff_c/2.5)
us = regular_grid_on_unit_sphere.rosca(m=9, hemisphere=True)
d_effs = flex.double()
o = maptbx.ft_analytical_1d_point_scatterer_at_origin(N=100000)
for i, u in enumerate(us):
o.compute(
miller_indices=indices,
step=step,
left=l,
right=r,
u_frac=miller_array.unit_cell().fractionalize(u))
dist, rho_ = o.distances(), o.rho()
rho2 = second_derivatives(rho=rho_, delta=step)
d_eff = compute_d_eff(r=dist, rho_2nd=rho2)
d_effs.append(d_eff)
print(" Effective resolution (min,max): %8.3f%8.3f"%(
flex.min(d_effs), flex.max(d_effs)))
def METHOD_NAME(args):
if(len(args)!=1):
raise Sorry("Reflection file expected.")
reflection_file = reflection_file_reader.any_reflection_file(
file_name = args[0])
miller_arrays = reflection_file.as_miller_arrays(
force_symmetry=True,
merge_equivalents=False)
if(miller_arrays is None):
raise Sorry("Warning: unknown file format:", file_name)
for ma in miller_arrays:
if(type(ma.data()) == type(flex.double())):
print("Processing data array with labels:", ma.info().label_string())
compute(miller_array=ma)
print()
if (__name__ == "__main__"):
t0 = time.time()
METHOD_NAME(args=sys.argv[1:])
print("Time: %8.3f"%(time.time()-t0))
|
2,588 |
command
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from typing import Optional, Type
from lisa.executable import Tool
from lisa.operating_system import Alpine, Debian
from lisa.tools.ip import Ip
from lisa.util import UnsupportedDistroException
from lisa.util.process import Process
INTERNET_PING_ADDRESS = "8.8.8.8"
class Ping(Tool):
# ping: SO_BINDTODEVICE: Operation not permitted
# ping: icmp open socket: Operation not permitted
# ping: socket: Operation not permitted
_no_permission_pattern = re.compile(
r"ping: .* Operation not permitted",
re.M,
)
# ping: sendmsg: Operation not permitted
# The message indicates that the ICMP echo request packet has not been sent and is
# blocked by the Control Plane ACL. Run "iptables --list" to check.
no_sendmsg_permission_pattern = re.compile(
r"ping: sendmsg: Operation not permitted",
re.M,
)
@property
def METHOD_NAME(self) -> str:
return "ping"
@property
def can_install(self) -> bool:
return True
def install(self) -> bool:
if isinstance(self.node.os, Debian):
package_name = "iputils-ping"
else:
raise UnsupportedDistroException(self.node.os)
self.node.os.install_packages(package_name)
return self._check_exists()
def ping_async(
self,
target: str = "",
nic_name: str = "",
count: int = 5,
interval: float = 0.2,
package_size: Optional[int] = None,
sudo: bool = False,
) -> Process:
if not target:
target = INTERNET_PING_ADDRESS
args: str = f"{target} -c {count} -i {interval}"
# For Alpine, '-O' option is unrecognized, so remove '-O'
if not isinstance(self.node.os, Alpine):
args += " -O"
if nic_name:
args += f" -I {nic_name}"
if package_size:
args += f" -s {package_size}"
return self.run_async(args, force_run=True, sudo=sudo)
def ping(
self,
target: str = "",
nic_name: str = "",
count: int = 5,
interval: float = 0.2,
package_size: Optional[int] = None,
ignore_error: bool = False,
sudo: bool = False,
) -> bool:
if not target:
target = INTERNET_PING_ADDRESS
result = self.ping_async(
target=target,
nic_name=nic_name,
count=count,
interval=interval,
package_size=package_size,
sudo=sudo,
).wait_result()
# for some distro like RHEL, ping with -I nic_name needs sudo
# otherwise, ping fails with below output:
# 'ping: SO_BINDTODEVICE: Operation not permitted'
if not sudo and self._no_permission_pattern.findall(result.stdout):
result = self.ping_async(
target=target,
nic_name=nic_name,
count=count,
interval=interval,
package_size=package_size,
sudo=True,
).wait_result()
if not ignore_error:
result.assert_exit_code(
message=(
"failed on ping. The server may not be reached."
f" ping result is {result.stdout}"
),
)
# return ping passed or not.
return result.exit_code == 0
@classmethod
def _freebsd_tool(cls) -> Optional[Type[Tool]]:
return FreeBSDPing
class FreeBSDPing(Ping):
def ping_async(
self,
target: str = "",
nic_name: str = "",
count: int = 5,
interval: float = 0.2,
package_size: Optional[int] = None,
sudo: bool = False,
) -> Process:
if not target:
target = INTERNET_PING_ADDRESS
args: str = ""
# ping with '-O' in FreeBSD has issue, so remove '-O'
# run 'ping -c 5 -i 0.2 bing.com' without sudo, will encounter below issue
# ping: -i interval too short: Operation not permitted
# either run ping under sudo, there is no minimal value bar for interval value
# or without sudo, set interval >= 1
if interval < 1 and not sudo:
sudo = True
args = f"-c {count} -i {interval}"
if nic_name:
# pinging with interface name has issue in FreeBSD
# https://unix.stackexchange.com/questions/341590/invalid-multicast-interface
interface_inet = self.node.tools[Ip].get_ip_address(nic_name)
args += f" -S {interface_inet}"
if package_size:
args += f" -s {package_size}"
args += f" {target}"
return self.run_async(args, force_run=True, sudo=sudo)
|
2,589 |
config date
|
from bisect import bisect
from collections import OrderedDict
from decimal import Decimal
from itertools import groupby
from dateutil.parser import parse
from flask import current_app as app
from main import db
from sqlalchemy import true, inspect
from sqlalchemy.orm.base import NO_VALUE
from sqlalchemy.sql.functions import func
from sqlalchemy_continuum.utils import version_class, transaction_class
from typing import TypeAlias
# This alias *was* required to apply type annotations to the model objects,
# but I don't think it even does that any more.
# MyPy doesn't support this nested class syntax which flask-sqlalchemy uses,
# even though type annotations are now present. https://github.com/pallets-eco/flask-sqlalchemy/issues/1112
BaseModel: TypeAlias = db.Model # type: ignore[name-defined]
""" Type alias for ISO currency (GBP or EUR currently). """
# Note: A better type for this would be Union[Literal['GBP'], Literal['EUR']] but setting this
# results in a world of pain currently.
#
# Ideally needs to be unified with the Currency class in app/common/__init__.py, but this is
# non-trivial.
Currency = str
def event_start():
return METHOD_NAME("EVENT_START")
def event_year():
"""Year of the current event"""
return event_start().year
def event_end():
return METHOD_NAME("EVENT_END")
def exists(query):
return db.session.query(true()).filter(query.exists()).scalar()
def to_dict(obj):
return OrderedDict(
(a.key, getattr(obj, a.key))
for a in inspect(obj).attrs
if a.loaded_value != NO_VALUE
)
def count_groups(query, *entities):
return (
query.with_entities(func.count().label("count"), *entities)
.group_by(*entities)
.order_by(*entities)
)
def nest_count_keys(rows):
"""For JSON's sake, because it doesn't support tuples as keys"""
tree = OrderedDict()
for c, *key in rows:
node = tree
for k in key[:-1]:
node = node.setdefault(k, OrderedDict())
node[key[-1]] = c
return tree
def bucketise(vals, boundaries):
"""Sort values into bins, like pandas.cut"""
ranges = [
"%s-%s" % (a, b - 1) if isinstance(b, int) and b - 1 > a else str(a)
for a, b in zip(boundaries[:-1], boundaries[1:])
]
ranges.append("%s+" % boundaries[-1])
counts = OrderedDict.fromkeys(ranges, 0)
for val in vals:
if isinstance(val, tuple):
# As a convenience for fetching counts/single columns in sqla
val, *_ = val
i = bisect(boundaries, val)
if i == 0:
raise IndexError(
"{} is below the lowest boundary {}".format(val, boundaries[0])
)
counts[ranges[i - 1]] += 1
return counts
def export_intervals(query, date_entity, interval, fmt):
return nest_count_keys(
count_groups(query, func.to_char(func.date_trunc(interval, date_entity), fmt))
)
def export_counts(query, cols):
counts = OrderedDict()
for col in cols:
counts[col.name] = nest_count_keys(count_groups(query, col))
return counts
def export_attr_counts(cls, attrs):
cols = [getattr(cls, a) for a in attrs]
return export_counts(cls.query, cols)
def export_attr_edits(cls, attrs):
edits_iter = iter_attr_edits(cls, attrs)
maxes = dict.fromkeys(attrs, 0)
totals = dict.fromkeys(attrs, 0)
count = 0
for pk, attr_times in edits_iter:
for a in attrs:
maxes[a] = max(maxes[a], len(attr_times[a]))
totals[a] += len(attr_times[a])
count += 1
edits = OrderedDict()
for a in attrs:
if count == 0:
edits[a] = {"max": 0, "avg": Decimal("0.00")}
else:
avg = Decimal(totals[a]) / count
edits[a] = {"max": maxes[a], "avg": avg.quantize(Decimal("0.01"))}
return edits
def iter_attr_edits(cls, attrs, query=None):
pk_cols = [k for k in inspect(cls).primary_key]
cls_version = version_class(cls)
pk_cols_version = [getattr(cls_version, k.name) for k in pk_cols]
attrs_version = [getattr(cls_version, a) for a in attrs]
cls_transaction = transaction_class(cls)
if query is None:
query = cls_version.query
all_versions = (
query.join(cls_version.transaction)
.with_entities(*pk_cols_version + attrs_version + [cls_transaction.issued_at])
.order_by(*pk_cols_version + [cls_version.transaction_id])
)
def get_pk(row):
return [getattr(row, k.name) for k in pk_cols_version]
for pk, versions in groupby(all_versions, get_pk):
# We don't yet process inserts/deletes, but should
first = next(versions)
attr_vals = {a: getattr(first, a) for a in attrs}
attr_times = {a: [first.issued_at] for a in attrs}
for version in versions:
for attr in attrs:
val = getattr(version, attr)
if val != attr_vals[attr]:
attr_times[attr].append(version.issued_at)
attr_vals[attr] = val
yield (pk, attr_times)
def METHOD_NAME(key):
return parse(app.config.get(key))
from .user import * # noqa: F401,F403
from .payment import * # noqa: F401,F403
from .cfp import * # noqa: F401,F403
from .permission import * # noqa: F401,F403
from .email import * # noqa: F401,F403
from .ical import * # noqa: F401,F403
from .product import * # noqa: F401,F403
from .purchase import * # noqa: F401,F403
from .basket import * # noqa: F401,F403
from .admin_message import * # noqa: F401,F403
from .volunteer import * # noqa: F401,F403
from .village import * # noqa: F401,F403
from .scheduled_task import * # noqa: F401,F403
from .feature_flag import * # noqa: F401,F403
from .site_state import * # noqa: F401,F403
db.configure_mappers()
|
2,590 |
copy from local
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from iopath.common.file_io import g_pathmgr as IOPathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
IOPathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def METHOD_NAME(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.METHOD_NAME(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
|
2,591 |
test partial summarize transform
|
import pytest
from db.transforms.base import Summarize, Limit
from db.transforms.operations.serialize import serialize_transformation
fully_speced_summarize = \
Summarize(
dict(
aggregation_expressions=[
dict(
function='distinct_aggregate_to_array',
input_alias='col2',
output_alias='col2_agged'
)
],
base_grouping_column='col1',
grouping_expressions=[
dict(
input_alias='col1',
output_alias='col1_grouped',
)
]
)
)
@pytest.mark.parametrize(
'input_summarize, expected_summarize', [
[
Summarize(
dict(
base_grouping_column='col1',
aggregation_expressions=[
dict(
function='distinct_aggregate_to_array',
input_alias='col2',
output_alias='col2_agged'
)
],
),
),
fully_speced_summarize,
],
[
Summarize(
dict(
base_grouping_column='col1',
grouping_expressions=[
dict(
input_alias='col1',
output_alias='col1_grouped',
)
]
)
),
fully_speced_summarize,
],
[
Summarize(
dict(
base_grouping_column='col1',
)
),
fully_speced_summarize,
],
]
)
def METHOD_NAME(
create_patents_table, client, input_summarize, expected_summarize,
):
base_table = create_patents_table(table_name='patent_query_run_minimal_table')
initial_columns = [
{
'id': base_table.get_column_by_name('Center').id,
'alias': 'col1',
},
{
'id': base_table.get_column_by_name('Case Number').id,
'alias': 'col2',
},
]
input_summarize_transform_json = \
serialize_transformation(input_summarize)
expected_summarize_transform_json = \
serialize_transformation(expected_summarize)
limit_transform_json = serialize_transformation(Limit(5))
input_transformations = [
limit_transform_json,
input_summarize_transform_json,
]
output_transformations = [
limit_transform_json,
expected_summarize_transform_json,
]
data = {
'base_table': base_table.id,
'initial_columns': initial_columns,
'display_names': None,
'parameters': {
'order_by': [
{'field': 'col1_grouped', 'direction': 'asc'},
{'field': 'col2_agged', 'direction': 'desc'}
],
'limit': 2
},
'transformations': input_transformations,
}
expected_query = (
{k: v for k, v in data.items() if k not in {'parameters'}}
| {
'schema': base_table.schema.id,
'transformations': output_transformations,
'display_names': {
'col1': 'Center',
'col1_grouped': 'Center group',
'col2': 'Case Number',
'col2_agged': 'Case Number distinct list',
},
}
)
expect_response_json = {
'column_metadata': {
'col1': {
'alias': 'col1',
'display_name': 'Center',
'display_options': None,
'input_alias': None,
'input_column_name': 'Center',
'input_table_name': 'patent_query_run_minimal_table',
'input_table_id': base_table.id,
'is_initial_column': True,
'type': 'text',
'type_options': None
},
'col1_grouped': {
'alias': 'col1_grouped',
'display_name': 'Center group',
'display_options': None,
'input_alias': 'col1',
'input_column_name': None,
'input_table_name': None,
'input_table_id': None,
'is_initial_column': False,
'type': 'text',
'type_options': None
},
'col2': {
'alias': 'col2',
'display_name': 'Case Number',
'display_options': None,
'input_alias': None,
'input_column_name': 'Case Number',
'input_table_name': 'patent_query_run_minimal_table',
'input_table_id': base_table.id,
'is_initial_column': True,
'type': 'text',
'type_options': None
},
'col2_agged': {
'alias': 'col2_agged',
'display_name': 'Case Number distinct list',
'display_options': None,
'input_alias': 'col2',
'input_column_name': None,
'input_table_name': None,
'input_table_id': None,
'is_initial_column': False,
'type': '_array',
'type_options': {'item_type': 'text'}
}
},
'output_columns': [
'col1_grouped',
'col2_agged',
],
'parameters': {
'limit': 2,
'order_by': [
{'direction': 'asc', 'field': 'col1_grouped'},
{'direction': 'desc', 'field': 'col2_agged'}
]
},
'query': expected_query,
'records': {
'count': 2,
'grouping': None,
'preview_data': None,
'results': [
{
'col1_grouped': 'NASA Ames Research Center',
'col2_agged': [
'ARC-14048-1',
'ARC-14231-1',
'ARC-14231-2DIV',
'ARC-14231-3'
]
},
{
'col1_grouped': 'NASA Kennedy Space Center',
'col2_agged': ['KSC-12871']
}
]
}
}
response = client.post('/api/db/v0/queries/run/', data, format='json')
assert response.status_code == 200
assert response.json() == expect_response_json
|
2,592 |
test get vsicurl
|
# =================================================================
#
# Authors: Francesco Bartoli <[email protected]>
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2020 Francesco Bartoli
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# Needs to be run like: python3 -m pytest
# https://sampleserver6.arcgisonline.com/arcgis/rest/services/CommunityAddressing/FeatureServer/0
import logging
import pytest
from pygeoapi.provider.base import ProviderItemNotFoundError
from pygeoapi.provider.ogr import OGRProvider
LOGGER = logging.getLogger(__name__)
@pytest.fixture()
def config_vsicurl_csv():
return {
'name': 'OGR',
'type': 'feature',
'data': {
'source_type': 'CSV',
'source': '/vsicurl/https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv', # noqa
# 'source_srs': 'EPSG:4326',
# 'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
'open_options': {
'X_POSSIBLE_NAMES': 'long',
'Y_POSSIBLE_NAMES': 'lat',
},
'gdal_ogr_options': {
'EMPTY_AS_NULL': 'NO',
'GDAL_CACHEMAX': '64',
'CPL_DEBUG': 'NO'
},
},
'id_field': 'fid',
'time_field': 'data',
'layer': 'dpc-covid19-ita-regioni'
}
def test_get_fields_vsicurl(config_vsicurl_csv):
"""Testing field types"""
p = OGRProvider(config_vsicurl_csv)
results = p.get_fields()
assert results['denominazione_regione']['type'] == 'string'
assert results['totale_positivi']['type'] == 'string'
def METHOD_NAME(config_vsicurl_csv):
"""Testing query for a specific object"""
p = OGRProvider(config_vsicurl_csv)
result = p.get('32')
assert result['id'] == 32
assert '14' in result['properties']['codice_regione']
def test_get_not_existing_feature_raise_exception(
config_vsicurl_csv
):
"""Testing query for a not existing object"""
p = OGRProvider(config_vsicurl_csv)
with pytest.raises(ProviderItemNotFoundError):
p.get(-1)
def test_query_hits_vsicurl(config_vsicurl_csv):
"""Testing query on entire collection for hits"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(resulttype='hits')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 0
hits = feature_collection.get('numberMatched')
assert hits is not None
assert hits > 100
def test_query_bbox_hits_vsicurl(config_vsicurl_csv):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(
bbox=[10.497565, 41.520355, 15.111823, 43.308645],
resulttype='hits')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 0
hits = feature_collection.get('numberMatched')
assert hits is not None
assert hits > 1
def test_query_with_limit_vsicurl(config_vsicurl_csv):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(limit=2, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 2
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
geometry = feature.get('geometry')
assert geometry is not None
def test_query_with_offset_vsicurl(config_vsicurl_csv):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(offset=20, limit=10, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 10
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
assert feature['id'] == 21
assert 'Veneto' in properties['denominazione_regione']
geometry = feature.get('geometry')
assert geometry is not None
def test_query_with_property_vsicurl(config_vsicurl_csv):
"""Testing query for a valid JSON object with property filter"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(
offset=20, limit=10, resulttype='results',
properties=[('denominazione_regione', 'Lazio')])
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 10
for feature in features:
assert 'Lazio' in feature['properties']['denominazione_regione']
def test_query_with_skip_geometry_vsicurl(config_vsicurl_csv):
"""Testing query for a valid JSON object with property filter"""
p = OGRProvider(config_vsicurl_csv)
feature_collection = p.query(skip_geometry=True)
for feature in feature_collection['features']:
assert feature['geometry'] is None
|
2,593 |
get mi cols
|
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Converter utilities between dask and pandas, with multiindex convention.
Converts between:
pd.DataFrames with ordinary (single-level) index or pd.Multiindex, and
dask DataFrame
If pd.DataFrame has ordinary index, converts using dask compute/from_pandas
if pd.DataFrame has MultiIndex, converts and back-converts
MultiIndex columns to DataFrame columns with the name:
__index__[indexname], if level has a name indexname
__index__[index_iloc], if level has no indexname and is index_iloc-th level
index is replaced by a string index where tuples are replaced with str coerced elements
"""
import pandas as pd
from sktime.datatypes._common import _req
from sktime.datatypes._common import _ret as ret
def _is_mi_col(x):
return isinstance(x, str) and x.startswith("__index__")
def METHOD_NAME(obj):
"""Get multiindex cols from a dask object.
Parameters
----------
obj : dask DataFrame
Returns
-------
list of pandas index elements
all column index elements of obj that start with __index__
i.e., columns that are interpreted as multiindex columns in the correspondence
"""
return [x for x in obj.columns if _is_mi_col(x)]
def convert_dask_to_pandas(obj):
"""Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.
Parameters
----------
obj : pandas.DataFrame
Returns
-------
dask DataFrame
MultiIndex levels 0 .. -1 of X are converted to columns of name
__index__[indexname], where indexname is name of multiindex level,
or the integer index if the level has no name
other columns and column names are identical to those of X
"""
obj = obj.compute()
def mi_name(x):
return x.split("__index__")[1]
def mi_names(names):
new_names = [mi_name(x) for x in names]
for i, name in enumerate(new_names):
if name == str(i):
new_names[i] = None
return new_names
multi_cols = METHOD_NAME(obj)
# if has multi-index cols, move to pandas MultiIndex
if len(multi_cols) > 0:
obj = obj.set_index(multi_cols)
names = obj.index.names
new_names = mi_names(names)
new_names = new_names
obj.index.names = new_names
return obj
def convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True):
"""Convert pandas DataFrame to dask DataFrame, preserving MultiIndex.
Parameters
----------
obj : dask DataFrame
npartitions : int or None, optional, default = 1
npartitions passed to dask from_pandas when converting obj to dask
chunksize : int or None, optional, default = None
chunksize passed to dask from_pandas when converting obj to dask
sort : bool, optional, default = True
sort passed to dask from_pandas when converting obj to dask
Returns
-------
pandas.DataFrame
MultiIndex levels 0 .. -1 of X are converted to columns of name
__index__[indexname], where indexname is name of multiindex level,
or the integer index if the level has no name
other columns and column names are identical to those of X
"""
from dask.dataframe import from_pandas
def dask_mi_names(names):
res = list(names).copy()
for i, name in enumerate(names):
if name is None:
res[i] = str(i)
return [f"__index__{x}" for x in res]
if isinstance(obj.index, pd.MultiIndex):
names = obj.index.names
new_names = dask_mi_names(names)
new_index = [str(x) for x in obj.index]
obj = obj.copy()
obj.index.names = new_names
obj = obj.reset_index()
obj.index = new_index
obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort)
return obj
def check_dask_frame(
obj, return_metadata=False, var_name="obj", freq_set_check=False, scitype="Series"
):
"""Check dask frame, generic for sktime check format."""
import dask
metadata = {}
if not isinstance(obj, dask.dataframe.core.DataFrame):
msg = f"{var_name} must be a dask DataFrame, found {type(obj)}"
return ret(False, msg, None, return_metadata)
# we now know obj is a dask DataFrame
index_cols = METHOD_NAME(obj)
# check right number of cols depending on scitype
if scitype == "Series":
cols_msg = (
f"{var_name} must have exactly one index column, "
f"found {len(index_cols)}, namely: {index_cols}"
)
right_no_index_cols = len(index_cols) <= 1
elif scitype == "Panel":
cols_msg = (
f"{var_name} must have exactly two index columns, "
f"found {len(index_cols)}, namely: {index_cols}"
)
right_no_index_cols = len(index_cols) == 2
elif scitype == "Hierarchical":
cols_msg = (
f"{var_name} must have three or more index columns, "
f"found {len(index_cols)}, namely: {index_cols}"
)
right_no_index_cols = len(index_cols) >= 3
else:
return RuntimeError(
'scitype arg of check_dask_frame must be one of strings "Series", '
f'"Panel", or "Hierarchical", but found {scitype}'
)
if not right_no_index_cols:
# dask series should have at most one __index__ col
return ret(False, cols_msg, None, return_metadata)
if _req("is_empty", return_metadata):
metadata["is_empty"] = len(obj.index) < 1 or len(obj.columns) < 1
if _req("is_univariate", return_metadata):
metadata["is_univariate"] = len(obj.columns) == 1
# check that columns are unique
if not obj.columns.is_unique:
msg = f"{var_name} must have unique column indices, but found {obj.columns}"
return ret(False, msg, None, return_metadata)
# check whether the time index is of valid type
# if not is_in_valid_index_types(index):
# msg = (
# f"{type(index)} is not supported for {var_name}, use "
# f"one of {VALID_INDEX_TYPES} or integer index instead."
# )
# return ret(False, msg, None, return_metadata)
# Check time index is ordered in time
if not obj.index.is_monotonic_increasing.compute():
msg = (
f"The (time) index of {var_name} must be sorted "
f"monotonically increasing, but found: {obj.index}"
)
return ret(False, msg, None, return_metadata)
if freq_set_check and isinstance(obj.index, pd.DatetimeIndex):
if obj.index.freq is None:
msg = f"{var_name} has DatetimeIndex, but no freq attribute set."
return ret(False, msg, None, return_metadata)
# check whether index is equally spaced or if there are any nans
# compute only if needed
if _req("is_equally_spaced", return_metadata):
# todo: logic for equal spacing
metadata["is_equally_spaced"] = True
if _req("has_nans", return_metadata):
metadata["has_nans"] = obj.isnull().values.any().compute()
if scitype in ["Panel", "Hierarchical"]:
if _req("n_instances", return_metadata):
instance_cols = index_cols[:-1]
metadata["n_instances"] = len(obj[instance_cols].drop_duplicates())
if scitype in ["Hierarchical"]:
if _req("n_panels", return_metadata):
panel_cols = index_cols[:-2]
metadata["n_panels"] = len(obj[panel_cols].drop_duplicates())
return ret(True, None, metadata, return_metadata)
|
2,594 |
add
|
#
# data_source.py -- utilities to assist in making time-series plots
#
from collections import deque
import numpy as np
class XYDataSource:
"""Monitor a data source efficiently.
Uses a static array circular queue and a sliding window algorithm to
efficiently keep track of the current set of points and it's minmax.
"""
def __init__(self, arr_src, points=[], overwrite=False,
none_for_empty=False):
"""Constructor for Data Source
Parameters
----------
arr_src : ndarray
(x, y) points to initialize the data source with
points : array, list, tuple or sequence-like (optional)
An array of points to initialize the data source with
overwrite : bool (optional)
If ``True`` the buffer will eventually overwrite old entries
none_for_empty : bool (optional)
If ``True`` then don't raise a ValueError for an empty buffer
"""
self.buf = arr_src
self.length = len(arr_src)
self.overwrite = overwrite
self.none_for_empty = none_for_empty
# for circular queue
self.front = self.length - 1
self.rear = None
self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])
self.set_points(points)
def get_limits(self):
"""Return the limits of this current data source. The return value
looks like: [(xmin, ymin), (xmax, ymax)]
"""
return np.copy(self.limits)
def update_limits(self):
"""Mostly internal routine to update the limits. Shouldn't need to
be called explicitly in most cases.
"""
if len(self) == 0:
self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])
else:
x_min, x_max = self.buf[self.rear][0], self.buf[self.front][0]
y_min, y_max = self.slmm.get_minmax()
self.limits = np.array([[x_min, y_min], [x_max, y_max]])
def set_points(self, points):
"""Initialize the data source with a series of points.
`points` should be a list/array/sequence of (x, y)
"""
self.slmm = SlidingWindowMinMax()
self.front = self.length - 1
self.rear = None
self.add_points(points)
def add_points(self, points):
"""Add a series of points.
`points` should be a list/array/sequence of (x, y)
"""
for pt in points:
self._add(pt)
self.update_limits()
def is_fullp(self):
"""Returns True if there is no more room in the buffer."""
front = (self.front + 1) % self.length
return front == self.rear
def _add(self, pt):
x, y = pt
front = (self.front + 1) % self.length
if front == self.rear:
if not self.overwrite:
raise ValueError("Buffer is full")
# circular queue full, need to expunge an old element
_x, _y = self.buf[self.rear]
if not np.isnan(_y):
self.slmm.remove_head(_y)
self.rear = (self.rear + 1) % self.length
self.front = front
if self.rear is None:
self.rear = self.front
self.buf[self.front, :] = pt
if not np.isnan(y):
self.slmm.add_tail(y)
def METHOD_NAME(self, pt, update_limits=True):
"""Add a single data point and update the plot.
If the number of points exceeds the `length` of the data source,
the oldest point will be ejected.
If `update_limits` is `True` (the default) then the limits of the
current data set are updated.
"""
self._add(pt)
if update_limits:
self.update_limits()
append = METHOD_NAME
push = METHOD_NAME
def peek(self):
"""Get the latest data point. Will return `None` if there are no
points recorded.
"""
if len(self) == 0:
if self.none_for_empty:
return None
raise ValueError("Buffer is empty")
return self.buf[self.front]
get_latest = peek
def pop(self):
"""Get and remove the latest data point.
Will return `None` if there are no points recorded.
"""
if len(self) == 0:
if self.none_for_empty:
return None
raise ValueError("Buffer is empty")
pt = self.buf[self.front]
if self.rear == self.front:
self.rear = None
else:
self.front = self.length - 1 if self.front == 0 else self.front - 1
return pt
def peek_rear(self):
"""Get the earliest data point. Will return `None` if there are no
points recorded.
"""
if len(self) == 0:
if self.none_for_empty:
return None
raise ValueError("Buffer is empty")
return self.buf[self.rear]
def pop_rear(self):
"""Get and remove the earliest data point.
Will return `None` if there are no points recorded.
"""
if len(self) == 0:
if self.none_for_empty:
return None
raise ValueError("Buffer is empty")
pt = self.buf[self.rear]
if self.rear == self.front:
self.rear = None
else:
self.rear = (self.rear + 1) % self.length
return pt
def get_points(self):
"""Get the entire set of data points as an `ndarray`.
"""
n = len(self)
arr = np.zeros((n, 2), dtype=float)
if n == 0:
return arr
if self.front >= self.rear:
arr[:n, :] = self.buf[self.rear:self.front + 1, :]
else:
m = self.length - self.rear
arr[0:m, :] = self.buf[self.rear:self.length, :]
arr[m:n, :] = self.buf[0:self.front + 1, :]
return arr
@property
def points(self):
return self.get_points()
def __len__(self):
if self.rear is None:
return 0
if self.front >= self.rear:
return self.front - self.rear + 1
else:
return self.length - self.rear + self.front + 1
class SlidingWindowMinMax:
"""Class to efficiently keep track of the minmax values of a changing
data set.
"""
def __init__(self):
self.min_deque = deque()
self.max_deque = deque()
def get_minmax(self):
return (self.min_deque[0], self.max_deque[0])
def add_tail(self, val):
while len(self.min_deque) > 0 and val < self.min_deque[-1]:
self.min_deque.pop()
self.min_deque.append(val)
while len(self.max_deque) > 0 and val > self.max_deque[-1]:
self.max_deque.pop()
self.max_deque.append(val)
def remove_head(self, val):
if val < self.min_deque[0]:
raise ValueError("Wrong value")
elif val == self.min_deque[0]:
self.min_deque.popleft()
if val > self.max_deque[0]:
raise ValueError("Wrong value")
elif val == self.max_deque[0]:
self.max_deque.popleft()
def update_plot_from_source(dsrc, xyplot, update_limits=False):
"""Update the associated plot with the current set of points.
If `update_limits` is `True` then the plot limits will be updated
with the current limits of the data.
"""
arr = dsrc.get_points()
if update_limits:
limits = dsrc.get_limits()
xyplot.plot(arr, limits=limits)
else:
xyplot.plot(arr)
|
2,595 |
form valid
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.functions import Concat
from django.forms import CharField, HiddenInput, ModelForm, Textarea
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import timezone
from django.views import View
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from ..models.code_golf import CodeTask, Result
class ResultForm(ModelForm):
"""Form for submitting code.
Discussion:
We cannot verify that the user's code actually outputs what he reports, but
we might at the very least verify that they include the correct output in
their response. We here extend the model form of the Result model to include
a field `output`, and verify that that field has the correct output (which
is passed to the form through the constructor).
Whether this is actually useful, or if a simple client side check is sufficient,
can be discussed. Future refactorings of code golf are free to remvoe this
server side check if they wish, which will have the benefit of making the
code simpler.
"""
output = CharField(
widget=HiddenInput(attrs={"v-model": "output"})
) # The ouput of the user's code
def __init__(self, *args, **kwargs):
self.task = kwargs.pop("task")
super().__init__(*args, **kwargs)
class Meta:
model = Result
fields = ["solution"]
labels = {
"solution": "", # Drop the label
}
widgets = {
"solution": Textarea(
attrs={"placeholder": "Your solution", "v-model": "user_code"}
),
}
def clean(self):
cleaned_data = super().clean()
output = cleaned_data.get("output")
if output != self.task.correct_output:
raise ValidationError("Output does not match correct output")
class CodeGolf(LoginRequiredMixin, CreateView):
"""View for writing and submitting solutions"""
model = Result
form_class = ResultForm
template_name = "interactive/code_golf.html"
def get_success_url(self):
return reverse("code_golf_score", kwargs={"task_id": self.task.id})
def dispatch(self, request, *args, **kwargs):
self.task = get_object_or_404(CodeTask, pk=self.kwargs.get("task_id"))
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["task"] = self.task
# kwargs["auto_id"] = False
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["task"] = self.task
context["result_list"] = Result.objects.best_by_user(task=self.task)
best_result = Result.objects.best_specific_user(
task=self.task, user=self.request.user
)
if best_result is None:
best_result_length = -1
else:
best_result_length = best_result.length
context["best_attempt"] = best_result_length
return context
def METHOD_NAME(self, form):
"""Set other fields"""
form.instance.user = self.request.user
form.instance.task = self.task
form.instance.python_version = (
"python2.7" # NB! Change if changing Skulpt version
)
return super().METHOD_NAME(form)
def markdownify_code(code):
"""
Takes a code as a string and converts it to a form where it will be
formatted correctly by markdown.
As of now, it is to add tabs in front of each line, but this is not very pretty...
"""
return "\n".join(["\t" + line for line in code.split("\n")])
@login_required
def code_golf_score(request, task_id):
"""
Show user list with number of chars for their code.
If user is logged in show users solution, if not, only show the results list.
"""
task = get_object_or_404(CodeTask, pk=task_id)
result_list = Result.objects.best_by_user(task=task)
output = markdownify_code(task.correct_output)
context = {"output": output, "result_list": result_list, "task": task}
user_has_solution = Result.objects.filter(user=request.user, task=task).exists()
context["has_solution"] = user_has_solution
if user_has_solution:
user_results = Result.objects.filter(user=request.user, task=task).order_by(
"length"
)
best_result = user_results.first()
length = best_result.length
code = best_result.solution # Add #!python for markdown
context["code"] = code
context["length"] = length
context["user_results"] = user_results
return render(request, "interactive/code_golf_score.html", context)
class CodeTaskListView(ListView):
model = CodeTask
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
task_list = CodeTask.objects.prefetch_related("result_set").order_by("-pk")
best_results = []
user_results = []
for obj in task_list:
best_results.append(obj.get_best_result)
if self.request.user.is_authenticated:
user_result = (
obj.result_set.filter(user=self.request.user)
.order_by("length")
.first()
)
if user_result is not None:
user_results.append(user_result.length)
else:
user_results.append("--")
else:
user_results.append("--")
tasks = zip(task_list, best_results, user_results)
newest_task = task_list.first()
context["newest_task"] = newest_task
context["tasks"] = tasks
return context
|
2,596 |
wrapped
|
#!/usr/bin/env python3
"""
This is a minimal, speed-optimized version of a FASTQ->FASTA conversion script. It lacks a
lot of options other scripts might have, but I got tired of runtimes taking hours with
billion-read FASTQ files.
Example timing information on a 5.4GB input file (with 21.7 million records)**:
- 55.3 seconds with default settings
- 2 minutes, 2 seconds when run with --detect_direction option
- 2 minutes, 8 seconds when run with the --width=60 option
- 3 minutes, 17 seconds when run with both --detect_direction and --width=60 options
** Timing was run on a laptop with an Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz,
utilizing only one core.
OUTPUT
------
Running with no options other than -i and -o, this script will transform headers like these:
@SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
@61JCNAAXX100503:5:100:10001:18267/2
to this in the FASTA output:
>SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
>61JCNAAXX100503:5:100:10001:18267/2
If you have have headers like this:
@SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
The optional --detect_direction option will see the '1' or '2' after the whitespace and transform
the header to this instead (in FASTA):
>SN7001163:78:C0YG5ACXX:6:1101:1129:2043/1
Note that this can increase the runtime up to 3x (from my testing), since a regex is used.
The FASTQ format dictates that the sequence residues of each entry be all contained within a
single line, while the convention for FASTA format is a fixed-with for all residue lines (usually
60bp) where longer ones are wrapped to the next line. By default this script will simply copy
the residue lines over from the FASTQ to the FASTA file, but you can use the --width option
to specify that the lines in the output FASTA should be wrapped at some width. This comes with
a performance penalty.
Author: Joshua Orvis (jorvis AT gmail)
"""
import argparse
import re
import sys
def main():
parser = argparse.ArgumentParser( description='Convert FASTQ to FASTA format')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output file to be created' )
parser.add_argument('-d', '--detect_direction', action='store_true', help='Pass this flag to auto-detect the mate pair direction. See full documentation for more info' )
parser.add_argument('-w', '--width', type=int, required=False, help='Defines the width (or number of bases) on each FASTA sequence line' )
args = parser.parse_args()
if args.output_file is None:
ofh = sys.stdout
else:
ofh = open( args.output_file, 'wt' )
line_count = 0
record_count = 0
last_header = None
for line in open(args.input_file, 'rU'):
line_count += 1
if line_count % 4 == 1:
record_count += 1
if args.detect_direction:
m = re.search('^\@(.+?) (\d)', line)
if m:
last_header = "{0}/{1}\n".format(m.group(1), m.group(2))
else:
raise Exception("ERROR: FASTQ header line found that didn't match expected format: {0}".format(line))
else:
line = line.lstrip('@')
last_header = line
elif line_count % 4 == 2:
if args.width:
ofh.write(">{0}{1}\n".format(last_header, METHOD_NAME(line, args.width)))
else:
ofh.write(">{0}{1}".format(last_header, line))
ofh.close()
print("{0} records written to the output FASTA file".format(record_count))
def METHOD_NAME(string, every=60):
string = string.rstrip()
''' this runs orders of magnitude faster than using the textwrap module '''
return '\n'.join(string[i:i+every] for i in range(0, len(string), every))
if __name__ == '__main__':
main()
|
2,597 |
run
|
"""
Contains objects that define UI related workers that perform tasks in the
background.
Copyright (c) 2015-2022 Nicholas H.Tollervey and others (see the AUTHORS file).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import requests
from PyQt5.QtCore import pyqtSignal, QObject
logger = logging.getLogger(__name__)
WSGI = """# This file contains the WSGI configuration required to serve up your
# web application at http://<your-username>.pythonanywhere.com/
# It works by setting the variable 'application' to a WSGI handler of some
# description.
#
# The below has been auto-generated BY MU for your Flask project
import sys
# add your project directory to the sys.path
project_home = '/home/{username}/{app_name}'
if project_home not in sys.path:
sys.path = [project_home] + sys.path
# import flask app but need to call it "application" for WSGI to work
from {app_name} import app as application # noqa
"""
class PythonAnywhereWorker(QObject):
"""
Encapsulates deployment, configuration and restart of a website on
PythonAnywhere.
"""
finished = pyqtSignal(str) # Emit domain when successfully finished.
error = pyqtSignal(str) # Emitted with an error description if failed.
def __init__(self, instance, username, token, files, app_name, progress):
super().__init__(None)
self.instance = instance
self.username = username # PythonAnywhere username.
self.token = token # PythonAnywhere API token.
self.files = files # {"RemotePath": "LocalPath", ... } files dict.
self.app_name = app_name # Python module containing Flask app.
self.progress = progress # Progress modal to update.
# The root API URL. The instance should be either "www" (main
# PythonAnywhere instance) or "eu" (hosted in Europe). This is
# configured in the admin widget.
self.url = (
"https://{instance}.pythonanywhere.com/api/v0/user/{username}/"
).format(instance=instance, username=username)
# Path to where web application's files are to be uploaded.
self.files_path = "files/path/home/{username}/{app_name}/".format(
username=username, app_name=app_name
)
# Path to the WSGI configuration file for the application.
self.wsgi_path = (
"/files/path/var/www/{username}_pythonanywhere_com_wsgi.py"
).format(username=username)
# Path to where the web application's static files will be found.
self.static_path = "/home/{username}/{app_name}/static/".format(
username=username, app_name=app_name
)
# The content of the WSGI file to upload.
self.wsgi_config = WSGI.format(
username=self.username, app_name=self.app_name
)
def METHOD_NAME(self):
logger.info(
"Deploying to PythonAnywhere {instance} for: {app_name}".format(
instance=self.instance, app_name=self.app_name
)
)
headers = {"Authorization": "Token {token}".format(token=self.token)}
domain = "{username}.pythonanywhere.com".format(username=self.username)
if self.instance == "eu":
domain = "{username}.eu.pythonanywhere.com".format(
username=self.username
)
# Progress steps are calculated as the number of files to upload plus
# the five-ish additional API calls needed to configure and restart
# the web application.
self.progress.setMaximum(len(self.files) + 5)
# The counter tracks how much progress has been made.
counter = 1
try:
# Get web application details (if required).
self.progress.setValue(counter)
path = self.url + "webapps/"
response = requests.get(path, headers=headers)
response.raise_for_status()
apps = response.json()
# Create the application if it doesn't exist.
exists = False
for app in apps:
if app["domain_name"] == domain:
exists = True
break
if not exists:
# Create the app.
response = requests.post(
path,
data={
"domain_name": domain,
"python_version": "python38",
},
headers=headers,
)
response.raise_for_status()
# Configure serving of static files.
path = self.url + "webapps/{domain}/static_files/".format(
domain=domain
)
response = requests.post(
path,
data={
"url": "/static/",
"path": self.static_path,
},
headers=headers,
)
response.raise_for_status()
counter += 1
# Upload files.
for target, source in self.files.items():
self.progress.setValue(counter)
with open(source, "rb") as source_file:
path = self.url + self.files_path + target
response = requests.post(
path,
files={"content": source_file.read()},
headers=headers,
)
response.raise_for_status()
counter += 1
# Update WSGI settings.
self.progress.setValue(counter)
logger.info(self.wsgi_config)
path = self.url + self.wsgi_path
response = requests.post(
path,
files={"content": self.wsgi_config},
headers=headers,
)
response.raise_for_status()
counter += 1
# Force HTTPS and source directory.
self.progress.setValue(counter)
data = {
"source_directory": "/home/{username}/{app_name}/".format(
username=self.username, app_name=self.app_name
),
"force_https": True,
}
path = self.url + "webapps/{domain}/".format(domain=domain)
response = requests.put(path, data=data, headers=headers)
response.raise_for_status()
counter += 1
# Reload the application.
self.progress.setValue(counter)
path = self.url + "webapps/{domain}/reload/".format(domain=domain)
response = requests.post(path, headers=headers)
response.raise_for_status()
counter += 1
self.progress.setValue(counter)
self.finished.emit(domain)
except Exception as ex:
# Gracefully report all failures (logged and reported upstream).
self.error.emit(repr(ex))
|
2,598 |
get all
|
# Stubs for ply.yacc (Python 3.7)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional, TypeVar, Generic, List
from ply.lex import Lexer
from inmanta.ast.statements import Statement
__tabversion__: str
yaccdebug: bool
debug_file: str
tab_module: str
default_lr: str
error_count: int
yaccdevel: bool
resultlimit: int
pickle_protocol: int
string_types = str
MAXINT: Any
class PlyLogger:
f: Any = ...
def __init__(self, f: Any) -> None: ...
def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None: ...
info: Any = ...
def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None: ...
def error(self, msg: Any, *args: Any, **kwargs: Any) -> None: ...
critical: Any = ...
class NullLogger: ...
class YaccError(Exception): ...
class YaccSymbol: ...
class YaccProduction:
slice: Any = ...
stack: Any = ...
lexer: Lexer = ...
parser: Any = ...
value: str
type: str
def __init__(self, s: Any, stack: Optional[Any] = ...) -> None: ...
def __getitem__(self, n: Any) -> Any: ...
def __setitem__(self, n: Any, v: Any) -> None: ...
def __len__(self) -> int: ...
def lineno(self, n: Any) -> int: ...
def set_lineno(self, n: Any, lineno: Any) -> None: ...
def lexpos(self, n: Any) -> int: ...
def set_lexpos(self, n: Any, lexpos: Any) -> None: ...
def error(self) -> None: ...
T = TypeVar("T")
class LRParser(Generic[T]):
productions: Any = ...
action: Any = ...
goto: Any = ...
errorfunc: Any = ...
errorok: bool = ...
def __init__(self, lrtab: Any, errorf: Any) -> None: ...
def errok(self) -> None: ...
def restart(self) -> None: ...
defaulted_states: Any = ...
def set_defaulted_states(self) -> None: ...
def disable_defaulted_states(self) -> None: ...
def parse(self, input: Optional[Any] = ..., lexer: Optional[Any] = ..., debug: bool = ..., tracking: bool = ..., tokenfunc: Optional[Any] = ...) -> List[T]: ...
token: Any = ...
statestack: Any = ...
symstack: Any = ...
state: Any = ...
class Production:
reduced: int = ...
name: Any = ...
prod: Any = ...
number: Any = ...
func: Any = ...
callable: Any = ...
file: Any = ...
line: Any = ...
prec: Any = ...
len: Any = ...
usyms: Any = ...
lr_items: Any = ...
lr_next: Any = ...
str: Any = ...
def __init__(self, number: Any, name: Any, prod: Any, precedence: Any = ..., func: Optional[Any] = ..., file: str = ..., line: int = ...) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, index: Any) -> Any: ...
def bind(self, pdict: Any) -> None: ...
class MiniProduction:
name: Any = ...
len: Any = ...
func: Any = ...
callable: Any = ...
file: Any = ...
line: Any = ...
str: Any = ...
def __init__(self, str: Any, name: Any, len: Any, func: Any, file: Any, line: Any) -> None: ...
def bind(self, pdict: Any) -> None: ...
class LRItem:
name: Any = ...
prod: Any = ...
number: Any = ...
lr_index: Any = ...
lookaheads: Any = ...
len: Any = ...
usyms: Any = ...
def __init__(self, p: Any, n: Any) -> None: ...
class GrammarError(YaccError): ...
class Grammar:
Productions: Any = ...
Prodnames: Any = ...
Prodmap: Any = ...
Terminals: Any = ...
Nonterminals: Any = ...
First: Any = ...
Follow: Any = ...
Precedence: Any = ...
UsedPrecedence: Any = ...
Start: Any = ...
def __init__(self, terminals: Any) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, index: Any) -> Any: ...
def set_precedence(self, term: Any, assoc: Any, level: Any) -> None: ...
def add_production(self, prodname: Any, syms: Any, func: Optional[Any] = ..., file: str = ..., line: int = ...) -> None: ...
def set_start(self, start: Optional[Any] = ...) -> None: ...
def build_lritems(self) -> None: ...
class VersionError(YaccError): ...
class LRTable:
lr_action: Any = ...
lr_goto: Any = ...
lr_productions: Any = ...
lr_method: Any = ...
def __init__(self) -> None: ...
def bind_callables(self, pdict: Any) -> None: ...
def traverse(x: Any, N: Any, stack: Any, F: Any, X: Any, R: Any, FP: Any) -> None: ...
class LALRError(YaccError): ...
class LRGeneratedTable(LRTable):
grammar: Any = ...
lr_method: Any = ...
log: Any = ...
lr_action: Any = ...
lr_goto: Any = ...
lr_productions: Any = ...
lr_goto_cache: Any = ...
lr0_cidhash: Any = ...
sr_conflict: int = ...
rr_conflict: int = ...
conflicts: Any = ...
sr_conflicts: Any = ...
rr_conflicts: Any = ...
def __init__(self, grammar: Any, method: str = ..., log: Optional[Any] = ...) -> None: ...
def add_lookaheads(self, lookbacks: Any, followset: Any) -> None: ...
def add_lalr_lookaheads(self, C: Any) -> None: ...
def lr_parse_table(self) -> None: ...
def write_table(self, tabmodule: Any, outputdir: str = ..., signature: str = ...) -> None: ...
def pickle_table(self, filename: Any, signature: str = ...) -> None: ...
class ParserReflect:
pdict: Any = ...
start: Any = ...
error_func: Any = ...
tokens: Any = ...
modules: Any = ...
grammar: Any = ...
error: bool = ...
log: Any = ...
def __init__(self, pdict: Any, log: Optional[Any] = ...) -> None: ...
def METHOD_NAME(self) -> None: ...
def validate_modules(self) -> None: ...
def get_start(self) -> None: ...
def validate_start(self) -> None: ...
def get_error_func(self) -> None: ...
prec: Any = ...
def get_precedence(self) -> None: ...
preclist: Any = ...
pfuncs: Any = ...
def yacc(
method: str = ...,
debug: Any = ...,
module: Optional[Any] = ...,
tabmodule: Any = ...,
start: Optional[Any] = ...,
check_recursion: bool = ...,
optimize: bool = ...,
write_tables: bool = ...,
debugfile: Any = ...,
outputdir: Optional[Any] = ...,
debuglog: Optional[Any] = ...,
errorlog: Optional[Any] = ...,
picklefile: Optional[Any] = ...
) -> LRParser[Statement]: ...
|
2,599 |
setup
|
# -*- coding: utf-8 -*-
import pytest
try:
from ddtrace.appsec.iast import oce
from ddtrace.appsec.iast._taint_tracking import as_formatted_evidence
from tests.appsec.iast.aspects.aspect_utils import BaseReplacement
from tests.appsec.iast.aspects.aspect_utils import create_taint_range_with_format
from tests.appsec.iast.aspects.conftest import _iast_patched_module
except (ImportError, AttributeError):
pytest.skip("IAST not supported for this Python version", allow_module_level=True)
mod = _iast_patched_module("tests.appsec.iast.fixtures.aspects.str_methods")
def METHOD_NAME():
oce._enabled = True
@pytest.mark.parametrize(
"obj, kwargs",
[
(3.5, {}),
("Hi", {}),
("🙀", {}),
(b"Hi", {}),
(b"Hi", {"encoding": "utf-8", "errors": "strict"}),
(b"Hi", {"encoding": "utf-8", "errors": "ignore"}),
({"a": "b", "c": "d"}, {}),
({"a", "b", "c", "d"}, {}),
(("a", "b", "c", "d"), {}),
(["a", "b", "c", "d"], {}),
],
)
def test_str_aspect(obj, kwargs):
import ddtrace.appsec.iast._taint_tracking.aspects as ddtrace_aspects
assert ddtrace_aspects.str_aspect(obj, **kwargs) == str(obj, **kwargs)
@pytest.mark.parametrize(
"obj, kwargs, should_be_tainted",
[
(3.5, {}, False),
("Hi", {}, True),
("🙀", {}, True),
(b"Hi", {}, True),
(bytearray(b"Hi"), {}, True),
(b"Hi", {"encoding": "utf-8", "errors": "strict"}, True),
(b"Hi", {"encoding": "utf-8", "errors": "ignore"}, True),
({"a": "b", "c": "d"}, {}, False),
({"a", "b", "c", "d"}, {}, False),
(("a", "b", "c", "d"), {}, False),
(["a", "b", "c", "d"], {}, False),
],
)
def test_str_aspect_tainting(obj, kwargs, should_be_tainted):
from ddtrace.appsec.iast._taint_tracking import OriginType
from ddtrace.appsec.iast._taint_tracking import is_pyobject_tainted
from ddtrace.appsec.iast._taint_tracking import taint_pyobject
import ddtrace.appsec.iast._taint_tracking.aspects as ddtrace_aspects
if should_be_tainted:
obj = taint_pyobject(
obj, source_name="test_str_aspect_tainting", source_value=obj, source_origin=OriginType.PARAMETER
)
result = ddtrace_aspects.str_aspect(obj, **kwargs)
assert is_pyobject_tainted(result) == should_be_tainted
assert result == str(obj, **kwargs)
@pytest.mark.parametrize(
"obj, expected_result",
[
("3.5", "'3.5'"),
("Hi", "'Hi'"),
("🙀", "'🙀'"),
(b"Hi", "b'Hi'"),
(bytearray(b"Hi"), "bytearray(b'Hi')"),
],
)
def test_repr_aspect_tainting(obj, expected_result):
from ddtrace.appsec.iast._taint_tracking import OriginType
from ddtrace.appsec.iast._taint_tracking import is_pyobject_tainted
from ddtrace.appsec.iast._taint_tracking import taint_pyobject
import ddtrace.appsec.iast._taint_tracking.aspects as ddtrace_aspects
assert repr(obj) == expected_result
obj = taint_pyobject(
obj, source_name="test_repr_aspect_tainting", source_value=obj, source_origin=OriginType.PARAMETER
)
result = ddtrace_aspects.repr_aspect(obj)
assert is_pyobject_tainted(result) is True
class TestOperatorsReplacement(BaseReplacement):
def test_aspect_ljust_str_tainted(self):
# type: () -> None
string_input = "foo"
# Not tainted
ljusted = mod.do_ljust(string_input, 4) # pylint: disable=no-member
assert as_formatted_evidence(ljusted) == ljusted
# Tainted
string_input = create_taint_range_with_format(":+-foo-+:")
ljusted = mod.do_ljust(string_input, 4) # pylint: disable=no-member
assert as_formatted_evidence(ljusted) == ":+-foo-+: "
def test_zfill(self):
# Not tainted
string_input = "-1234"
res = mod.do_zfill(string_input, 6) # pylint: disable=no-member
assert as_formatted_evidence(res) == "-01234"
# Tainted
string_input = create_taint_range_with_format(":+--12-+:34")
res = mod.do_zfill(string_input, 6) # pylint: disable=no-member
assert as_formatted_evidence(res) == ":+---+:0:+-12-+:34"
string_input = create_taint_range_with_format(":+-+12-+:34")
res = mod.do_zfill(string_input, 7) # pylint: disable=no-member
assert as_formatted_evidence(res) == ":+-+-+:00:+-12-+:34"
string_input = create_taint_range_with_format(":+-012-+:34")
res = mod.do_zfill(string_input, 7) # pylint: disable=no-member
assert as_formatted_evidence(res) == "00:+-012-+:34"
def test_format(self):
# type: () -> None
string_input = "foo"
result = mod.do_format_fill(string_input)
assert result == "foo "
string_input = create_taint_range_with_format(":+-foo-+:")
result = mod.do_format_fill(string_input) # pylint: disable=no-member
# TODO format with params doesn't work correctly the assert should be
# assert as_formatted_evidence(result) == ":+-foo -+:"
assert as_formatted_evidence(result) == "foo "
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.