id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
4,200 |
forward
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
def varifocal_loss(pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
alpha: float = 0.75,
gamma: float = 2.0,
iou_weighted: bool = True,
reduction: str = 'mean',
avg_factor: Optional[int] = None) -> Tensor:
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes.
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
Tensor: Loss tensor.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@MODELS.register_module()
class VarifocalLoss(nn.Module):
def __init__(self,
use_sigmoid: bool = True,
alpha: float = 0.75,
gamma: float = 2.0,
iou_weighted: bool = True,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super().__init__()
assert use_sigmoid is True, \
'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def METHOD_NAME(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes.
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is
the number of classes.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
iou_weighted=self.iou_weighted,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
|
4,201 |
build single handler application
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide utility functions for implementing the ``bokeh`` command.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import contextlib
import errno
import os
import sys
from typing import Iterator
# Bokeh imports
from bokeh.application import Application
from bokeh.application.handlers import (
DirectoryHandler,
Handler,
NotebookHandler,
ScriptHandler,
)
from bokeh.util.warnings import warn
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'build_single_handler_application',
'build_single_handler_applications',
'die',
'report_server_init_errors',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def die(message: str, status: int = 1) -> None:
''' Print an error message and exit.
This function will call ``sys.exit`` with the given ``status`` and the
process will terminate.
Args:
message (str) : error message to print
status (int) : the exit status to pass to ``sys.exit``
'''
print(message, file=sys.stderr)
sys.exit(status)
DIRSTYLE_MAIN_WARNING = """
It looks like you might be running the main.py of a directory app directly.
If this is the case, to enable the features of directory style apps, you must
call "bokeh serve" on the directory instead. For example:
bokeh serve my_app_dir/
If this is not the case, renaming main.py will suppress this warning.
"""
def METHOD_NAME(path: str, argv: list[str] | None = None) -> Application:
''' Return a Bokeh application built using a single handler for a script,
notebook, or directory.
In general a Bokeh :class:`~bokeh.application.application.Application` may
have any number of handlers to initialize |Document| objects for new client
sessions. However, in many cases only a single handler is needed. This
function examines the ``path`` provided, and returns an ``Application``
initialized with one of the following handlers:
* :class:`~bokeh.application.handlers.script.ScriptHandler` when ``path``
is to a ``.py`` script.
* :class:`~bokeh.application.handlers.notebook.NotebookHandler` when
``path`` is to an ``.ipynb`` Jupyter notebook.
* :class:`~bokeh.application.handlers.directory.DirectoryHandler` when
``path`` is to a directory containing a ``main.py`` script.
Args:
path (str) : path to a file or directory for creating a Bokeh
application.
argv (seq[str], optional) : command line arguments to pass to the
application handler
Returns:
:class:`~bokeh.application.application.Application`
Raises:
RuntimeError
Notes:
If ``path`` ends with a file ``main.py`` then a warning will be printed
regarding running directory-style apps by passing the directory instead.
'''
argv = argv or []
path = os.path.abspath(os.path.expanduser(path))
handler: Handler
# There are certainly race conditions here if the file/directory is deleted
# in between the isdir/isfile tests and subsequent code. But it would be a
# failure if they were not there to begin with, too (just a different error)
if os.path.isdir(path):
handler = DirectoryHandler(filename=path, argv=argv)
elif os.path.isfile(path):
if path.endswith(".ipynb"):
handler = NotebookHandler(filename=path, argv=argv)
elif path.endswith(".py"):
if path.endswith("main.py"):
warn(DIRSTYLE_MAIN_WARNING)
handler = ScriptHandler(filename=path, argv=argv)
else:
raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
else:
raise ValueError("Path for Bokeh server application does not exist: %s" % path)
if handler.failed:
raise RuntimeError(f"Error loading {path}:\n\n{handler.error}\n{handler.error_detail} ")
application = Application(handler)
return application
def build_single_handler_applications(paths: list[str], argvs: dict[str, list[str]] | None = None) -> dict[str, Application]:
''' Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
paths (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError
'''
applications: dict[str, Application] = {}
argvs = argvs or {}
for path in paths:
application = METHOD_NAME(path, argvs.get(path, []))
route = application.handlers[0].url_path()
if not route:
if '/' in applications:
raise RuntimeError("Don't know the URL path to use for %s" % (path))
route = '/'
applications[route] = application
return applications
@contextlib.contextmanager
def report_server_init_errors(address: str | None = None, port: int | None = None, **kwargs: str) -> Iterator[None]:
''' A context manager to help print more informative error messages when a
``Server`` cannot be started due to a network problem.
Args:
address (str) : network address that the server will be listening on
port (int) : network address that the server will be listening on
Example:
.. code-block:: python
with report_server_init_errors(**server_kwargs):
server = Server(applications, **server_kwargs)
If there are any errors (e.g. port or address in already in use) then a
critical error will be logged and the process will terminate with a
call to ``sys.exit(1)``
'''
try:
yield
except OSError as e:
if e.errno == errno.EADDRINUSE:
log.critical("Cannot start Bokeh server, port %s is already in use", port)
elif e.errno == errno.EADDRNOTAVAIL:
log.critical("Cannot start Bokeh server, address '%s' not available", address)
else:
codename = errno.errorcode[e.errno]
log.critical("Cannot start Bokeh server [%s]: %r", codename, e)
sys.exit(1)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
4,202 |
test emg clean
|
import biosppy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import scipy.stats
import neurokit2 as nk
# =============================================================================
# EMG
# =============================================================================
def test_emg_simulate():
emg1 = nk.emg_simulate(duration=20, length=5000, burst_number=1)
assert len(emg1) == 5000
emg2 = nk.emg_simulate(duration=20, length=5000, burst_number=15)
assert scipy.stats.median_abs_deviation(emg1) < scipy.stats.median_abs_deviation(
emg2
)
emg3 = nk.emg_simulate(duration=20, length=5000, burst_number=1, burst_duration=2.0)
# pd.DataFrame({"EMG1":emg1, "EMG3": emg3}).plot()
assert len(nk.signal_findpeaks(emg3, height_min=1.0)["Peaks"]) > len(
nk.signal_findpeaks(emg1, height_min=1.0)["Peaks"]
)
def test_emg_activation():
emg = nk.emg_simulate(duration=10, burst_number=3)
cleaned = nk.emg_clean(emg)
emg_amplitude = nk.emg_amplitude(cleaned)
activity_signal, info = nk.emg_activation(emg_amplitude)
assert set(activity_signal.columns.to_list()) == set(list(info.keys()))
assert len(info["EMG_Onsets"]) == len(info["EMG_Offsets"])
for i, j in zip(info["EMG_Onsets"], info["EMG_Offsets"]):
assert i < j
def METHOD_NAME():
sampling_rate = 1000
emg = nk.emg_simulate(duration=20, sampling_rate=sampling_rate)
emg_cleaned = nk.emg_clean(emg, sampling_rate=sampling_rate)
assert emg.size == emg_cleaned.size
# Comparison to biosppy (https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/emg.py)
original, _, _ = biosppy.tools.filter_signal(
signal=emg,
ftype="butter",
band="highpass",
order=4,
frequency=100,
sampling_rate=sampling_rate,
)
emg_cleaned_biosppy = nk.signal_detrend(original, order=0)
assert np.allclose((emg_cleaned - emg_cleaned_biosppy).mean(), 0, atol=1e-6)
def test_emg_plot():
sampling_rate = 1000
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
emg_summary, _ = nk.emg_process(emg, sampling_rate=sampling_rate)
# Plot data over samples.
fig = nk.emg_plot(emg_summary)
assert len(fig.axes) == 2
titles = ["Raw and Cleaned Signal", "Muscle Activation"]
for (ax, title) in zip(fig.get_axes(), titles):
assert ax.get_title() == title
assert fig.get_axes()[1].get_xlabel() == "Samples"
np.testing.assert_array_equal(fig.axes[0].get_xticks(), fig.axes[1].get_xticks())
plt.close(fig)
# Plot data over time.
fig = nk.emg_plot(emg_summary, sampling_rate=sampling_rate)
assert fig.get_axes()[1].get_xlabel() == "Time (seconds)"
def test_emg_eventrelated():
emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
epochs = nk.epochs_create(
emg_signals,
events=[3000, 6000, 9000],
sampling_rate=1000,
epochs_start=-0.1,
epochs_end=1.9,
)
emg_eventrelated = nk.emg_eventrelated(epochs)
# Test amplitude features
no_activation = np.where(emg_eventrelated["EMG_Activation"] == 0)[0][0]
assert int(pd.DataFrame(emg_eventrelated.values[no_activation]).isna().sum()) == 5
assert np.alltrue(
np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Mean"]))
< np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Max"]))
)
assert len(emg_eventrelated["Label"]) == 3
# Test warning on missing columns
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `EMG_Onsets`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Onsets"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an `EMG_Activity`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Activity"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
with pytest.warns(
nk.misc.NeuroKitWarning, match=r".*does not have an.*`EMG_Amplitude`.*"
):
first_epoch_key = list(epochs.keys())[0]
first_epoch_copy = epochs[first_epoch_key].copy()
del first_epoch_copy["EMG_Amplitude"]
nk.emg_eventrelated({**epochs, first_epoch_key: first_epoch_copy})
def test_emg_intervalrelated():
emg = nk.emg_simulate(duration=40, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
columns = ["EMG_Activation_N", "EMG_Amplitude_Mean"]
# Test with signal dataframe
features_df = nk.emg_intervalrelated(emg_signals)
assert all(
elem in columns for elem in np.array(features_df.columns.values, dtype=str)
)
assert features_df.shape[0] == 1 # Number of rows
# Test with dict
columns.append("Label")
epochs = nk.epochs_create(
emg_signals, events=[0, 20000], sampling_rate=1000, epochs_end=20
)
features_dict = nk.emg_intervalrelated(epochs)
assert all(
elem in columns for elem in np.array(features_dict.columns.values, dtype=str)
)
assert features_dict.shape[0] == 2 # Number of rows
@pytest.mark.parametrize(
"method_cleaning, method_activation, threshold",
[("none", "threshold", "default"),
("biosppy", "pelt", 0.5),
("biosppy", "mixture", 0.05),
("biosppy", "biosppy", "default"),
("biosppy", "silva", "default")],
)
def test_emg_report(tmp_path, method_cleaning, method_activation, threshold):
sampling_rate = 250
emg = nk.emg_simulate(
duration=30,
sampling_rate=sampling_rate,
random_state=0,
)
d = tmp_path / "sub"
d.mkdir()
p = d / "myreport.html"
signals, _ = nk.emg_process(
emg,
sampling_rate=sampling_rate,
report=str(p),
method_cleaning=method_cleaning,
method_activation=method_activation,
threshold=threshold
)
assert p.is_file()
assert "EMG_Activity" in signals.columns
|
4,203 |
test library nucleic acid depleted in term
|
import pytest
def test_library_starting_quantity_post(testapp, library_starting_quantity):
testapp.post_json('/library', library_starting_quantity)
def test_library_fragmentation_method_string(testapp, library_with_invalid_fragmentation_methods_string):
res = testapp.post_json('/library', library_with_invalid_fragmentation_methods_string, status=422)
def test_library_fragmentation_method_list(testapp, library_with_valid_fragmentation_method_list):
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=201)
def test_library_fragmentation_method_list(testapp, library_with_valid_fragmentation_method_list):
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=201)
library_with_valid_fragmentation_method_list.update({'fragmentation_duration_time': 5})
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=422)
library_with_valid_fragmentation_method_list.update({'fragmentation_duration_time_units': 'minutes'})
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=201)
library_with_valid_fragmentation_method_list.pop('fragmentation_methods')
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=422)
def test_library_size_SD_and_CV_properties(testapp, library_size_range, library_fragment_length_CV):
# https://encodedcc.atlassian.net/browse/ENCD-5276
testapp.post_json('/library', library_size_range, status=201)
library_size_range.update({'average_fragment_size': 350})
testapp.post_json('/library', library_size_range, status=422)
library_size_range.pop('size_range')
testapp.post_json('/library', library_size_range, status=201)
testapp.post_json('/library', library_fragment_length_CV, status=201)
library_fragment_length_CV.update({'fragment_length_SD': 45})
testapp.post_json('/library', library_fragment_length_CV, status=422)
library_fragment_length_CV.pop('fragment_length_CV')
testapp.post_json('/library', library_fragment_length_CV, status=201)
def test_library_adapters(testapp, library, file):
file_adapters = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'file': file['@id'],
},
]
}
testapp.post_json('/library', file_adapters, status=201)
sequence_adapters = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'sequence': 'GGGGGGCNA',
},
{
'type': "read1 3' adapter",
'sequence': 'GGGGGGCNAT',
},
]
}
testapp.post_json('/library', sequence_adapters, status=201)
file_sequence_adapter1 = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'file': file['@id'],
'sequence': 'GGGGGGCNA',
},
]
}
testapp.post_json('/library', file_sequence_adapter1, status=422)
file_sequence_adapter2 = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'file': file['@id'],
},
{
'type': "read1 3' adapter",
'file': file['@id'],
'sequence': 'GGGGGGCNA',
},
]
}
testapp.post_json('/library', file_sequence_adapter2, status=422)
mixed_adapters = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'file': file['@id'],
},
{
'type': "read1 3' adapter",
'sequence': 'GGGGGGCNA',
},
]
}
testapp.post_json('/library', mixed_adapters, status=422)
def test_library_adapters_type(testapp, library, file):
adapters = {
**library,
'adapters': [
{
'type': "read1 3' adapter",
'file': file['@id'],
},
]
}
testapp.post_json('/library', adapters, status=201)
adapters_missing_type = {
**library,
'adapters': [
{
'sequence': 'GGGGGGCNA',
}
]
}
testapp.post_json('/library', adapters_missing_type, status=422)
def METHOD_NAME(testapp, library_schema_13, library_schema_capped_mRNA):
# https://encodedcc.atlassian.net/browse/ENCD-5368
testapp.post_json('/library', library_schema_13, status=422)
library_schema_13.update({'nucleic_acid_term_name': 'RNA'})
testapp.post_json('/library', library_schema_13, status=201)
# https://encodedcc.atlassian.net/browse/ENCD-5647
testapp.post_json('/library', library_schema_capped_mRNA, status=201)
library_schema_capped_mRNA.update({'depleted_in_term_name': ['capped mRNA', 'polyadenylated mRNA']})
testapp.post_json('/library', library_schema_capped_mRNA, status=422)
def test_library_biosample_and_mixed_biosample(testapp, library, biosample_1, biosample_2):
# https://encodedcc.atlassian.net/browse/ENCD-5674
testapp.post_json('/library', library, status=201)
library.update({'mixed_biosamples': [biosample_1['@id'], biosample_2['@id']]})
testapp.post_json('/library', library, status=201)
library.update({'biosample': biosample_2})
testapp.post_json('/library', library, status=422)
def test_library_strand_specificity_required_for_RNA(testapp, library, file):
# https://encodedcc.atlassian.net/browse/ENCD-5894
testapp.post_json('/library', library, status=201)
library.update({'nucleic_acid_term_name': 'RNA'})
testapp.post_json('/library', library, status=422)
library.update({'strand_specificity': 'unstranded'})
testapp.post_json('/library', library, status=201)
|
4,204 |
skip if asan class
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import os
import random
import socket
import time
from contextlib import closing
from functools import wraps
from typing import Any, Callable, Dict, Optional, TypeVar
import numpy as np
import torch
import torch.distributed as dist
from pyre_extensions import ParameterSpecification
from torch import nn
TParams = ParameterSpecification("TParams")
TReturn = TypeVar("TReturn")
def get_free_port() -> int:
# INTERNAL
if os.getenv("SANDCASTLE") == "1" or os.getenv("TW_JOB_USER") == "sandcastle":
if socket.has_ipv6:
family = socket.AF_INET6
address = "localhost6"
else:
family = socket.AF_INET
address = "localhost4"
with socket.socket(family, socket.SOCK_STREAM) as s:
try:
s.bind((address, 0))
s.listen(0)
with closing(s):
return s.getsockname()[1]
except socket.gaierror:
if address == "localhost6":
address = "::1"
else:
address = "127.0.0.1"
s.bind((address, 0))
s.listen(0)
with closing(s):
return s.getsockname()[1]
except Exception as e:
raise Exception(
f"Binding failed with address {address} while getting free port {e}"
)
# OSS GHA: TODO remove when enable ipv6 on GHA @omkar
else:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
s.listen(0)
with closing(s):
return s.getsockname()[1]
except Exception as e:
raise Exception(
f"Binding failed with address 127.0.0.1 while getting free port {e}"
)
def is_asan() -> bool:
"""Determines if the Python interpreter is running with ASAN"""
return hasattr(ctypes.CDLL(""), "__asan_init")
def is_tsan() -> bool:
"""Determines if the Python interpreter is running with TSAN"""
return hasattr(ctypes.CDLL(""), "__tsan_init")
def is_asan_or_tsan() -> bool:
return is_asan() or is_tsan()
def skip_if_asan(
func: Callable[TParams, TReturn]
) -> Callable[TParams, Optional[TReturn]]:
"""Skip test run if we are in ASAN mode."""
@wraps(func)
def wrapper(*args: TParams.args, **kwargs: TParams.kwargs) -> Optional[TReturn]:
if is_asan_or_tsan():
print("Skipping test run since we are in ASAN mode.")
return
return func(*args, **kwargs)
return wrapper
def METHOD_NAME(cls: TReturn) -> Optional[TReturn]:
if is_asan_or_tsan():
print("Skipping test run since we are in ASAN mode.")
return
return cls
def init_distributed_single_host(
rank: int, world_size: int, backend: str, local_size: Optional[int] = None
) -> dist.ProcessGroup:
os.environ["LOCAL_WORLD_SIZE"] = str(local_size if local_size else world_size)
os.environ["LOCAL_RANK"] = str(rank % local_size if local_size else rank)
dist.init_process_group(rank=rank, world_size=world_size, backend=backend)
# pyre-fixme[7]: Expected `ProcessGroup` but got
# `Optional[_distributed_c10d.ProcessGroup]`.
return dist.group.WORLD
# pyre-ignore [24]
def seed_and_log(wrapped_func: Callable) -> Callable:
# pyre-ignore [2, 3]
def _wrapper(*args, **kwargs):
seed = int(time.time() * 1000) % (1 << 31)
print(f"Using random seed: {seed}")
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
return wrapped_func(*args, **kwargs)
return _wrapper
def get_state_buffers_parameters(model: nn.Module) -> Dict[str, Any]:
return {
"state_dict": model.state_dict(),
"named_buffers": dict(model.named_buffers()),
"named_parameters": dict(model.named_parameters()),
}
def assert_state_buffers_parameters_equal(
model_1: nn.Module,
model_2: nn.Module,
check_named_buffers: bool = True,
check_named_parameters: bool = True,
check_state_dict: bool = True,
) -> None:
"""
Checks to see if the keys of top level PyTorch API calls are the same
between two modules.
"""
model_characteristics = {}
model_characteristics["model_1"] = get_state_buffers_parameters(model_1)
model_characteristics["model_2"] = get_state_buffers_parameters(model_2)
assert (
not check_named_buffers
or model_characteristics["model_1"]["named_buffers"].keys()
== model_characteristics["model_2"]["named_buffers"].keys()
), "named buffers keys are not the same"
assert (
not check_named_parameters
or model_characteristics["model_1"]["named_parameters"].keys()
== model_characteristics["model_2"]["named_parameters"].keys()
), f"named parameter keys are not the same {model_characteristics['model_1']['named_parameters'].keys()} vs {model_characteristics['model_2']['named_parameters'].keys()}"
assert (
not check_state_dict
or model_characteristics["model_1"]["state_dict"].keys()
== model_characteristics["model_2"]["state_dict"].keys()
), f"state dict key are not the same, {model_characteristics['model_1']['state_dict'].keys()} vs {model_characteristics['model_2']['state_dict'].keys()}"
|
4,205 |
plugin init
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: https://foglamp-foglamp-documentation.readthedocs-hosted.com
# FOGLAMP_END
""" Plugin module which adds a square block of specific monochrome shade on images """
import os
import logging
import datetime
import filter_ingest
import traceback
import copy
import json
import numpy as np
from fledge.common import logger
# local logger
_LOGGER = logger.setup(__name__, level=logging.DEBUG)
_DEFAULT_CONFIG = {
'plugin': { # mandatory filter
'description': 'Filter that overlays a square block on image',
'type': 'string',
'default': 'imageblock',
'readonly': 'true'
},
'enable': { # recommended filter
'description': 'Enable imageblock filter plugin',
'type': 'boolean',
'default': 'false',
'displayName': 'Enabled',
'order': "1"
},
'block_color': {
'description': 'Block color (0-255)',
'type': 'integer',
'default': '255',
'displayName': 'Block color',
'order': '2'
}
}
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
_LOGGER.info("imageblock - plugin_info called")
return {
'name': 'imageblock',
'version': '1.9.2',
'mode': 'none',
'type': 'filter',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def METHOD_NAME(config, ingest_ref, callback):
""" Initialise the plugin.
Args:
config: JSON configuration document for the plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
_LOGGER.info("imageblock - plugin_init called")
try:
_config = copy.deepcopy(config)
_config['ingest_ref'] = ingest_ref
_config['callback'] = callback
except:
_LOGGER.info("could not create configuration")
raise
return _config
def plugin_ingest(handle, data):
""" plugin_ingest -- log data we receive """
if handle['enable']['value'] == 'false':
_LOGGER.debug("imageblock - plugin_ingest: enable=FALSE, not processing data, forwarding received data")
filter_ingest.filter_ingest_callback(handle['callback'], handle['ingest_ref'], data)
return
_LOGGER.debug("imageblock - plugin_ingest: INPUT: type(data)={}, data={}".format(type(data), data))
color = int(handle['block_color']['value'])
try:
if type(data) == dict:
data = [data]
for entry in data:
_LOGGER.debug("np.pi={}, type(entry) = {}".format(np.pi, type(entry)))
for k in entry['readings'].keys():
v = entry['readings'][k]
_LOGGER.debug("k={}, type(v)={}, v.shape={}, v={}".format(k, type(v), v.shape, v))
import random
center = random.randint(v.shape[0]//4,v.shape[0]//4*3+1)
sz = random.randint(10,v.shape[0]//4-10)
_LOGGER.debug("imageblock - plugin_ingest: center={}, sz={}, color={}".format(center, sz, color))
v[center-sz:center+sz,center-sz:center+sz] = color
entry['readings'][k] = v
_LOGGER.debug("After adding a small block, pixel values: OUTPUT: data={}".format(data))
filter_ingest.filter_ingest_callback(handle['callback'], handle['ingest_ref'], data)
except Exception as ex:
_LOGGER.error("imageblock writer exception {}".format(traceback.format_exc()))
raise
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
"""
_LOGGER.info("imageblock - Old config for plugin {} \n new config {}".format(handle, new_config))
plugin_shutdown(handle)
# plugin_init
new_handle = METHOD_NAME(new_config, handle['ingest_ref'], handle['callback'])
return new_handle
def plugin_shutdown(handle):
""" Shut down the plugin.
Args:
handle: JSON configuration document for the plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
_LOGGER.info("imageblock Shutdown")
|
4,206 |
float to fixed to float
|
"""
The `OpenType specification <https://docs.microsoft.com/en-us/typography/opentype/spec/otff#data-types>`_
defines two fixed-point data types:
``Fixed``
A 32-bit signed fixed-point number with a 16 bit twos-complement
magnitude component and 16 fractional bits.
``F2DOT14``
A 16-bit signed fixed-point number with a 2 bit twos-complement
magnitude component and 14 fractional bits.
To support reading and writing data with these data types, this module provides
functions for converting between fixed-point, float and string representations.
.. data:: MAX_F2DOT14
The maximum value that can still fit in an F2Dot14. (1.99993896484375)
"""
from .roundTools import otRound, nearestMultipleShortestRepr
import logging
log = logging.getLogger(__name__)
__all__ = [
"MAX_F2DOT14",
"fixedToFloat",
"floatToFixed",
"floatToFixedToFloat",
"floatToFixedToStr",
"fixedToStr",
"strToFixed",
"strToFixedToFloat",
"ensureVersionIsLong",
"versionToFixed",
]
MAX_F2DOT14 = 0x7FFF / (1 << 14)
def fixedToFloat(value, precisionBits):
"""Converts a fixed-point number to a float given the number of
precision bits.
Args:
value (int): Number in fixed-point format.
precisionBits (int): Number of precision bits.
Returns:
Floating point value.
Examples::
>>> import math
>>> f = fixedToFloat(-10139, precisionBits=14)
>>> math.isclose(f, -0.61883544921875)
True
"""
return value / (1 << precisionBits)
def floatToFixed(value, precisionBits):
"""Converts a float to a fixed-point number given the number of
precision bits.
Args:
value (float): Floating point value.
precisionBits (int): Number of precision bits.
Returns:
int: Fixed-point representation.
Examples::
>>> floatToFixed(-0.61883544921875, precisionBits=14)
-10139
>>> floatToFixed(-0.61884, precisionBits=14)
-10139
"""
return otRound(value * (1 << precisionBits))
def METHOD_NAME(value, precisionBits):
"""Converts a float to a fixed-point number and back again.
By converting the float to fixed, rounding it, and converting it back
to float again, this returns a floating point values which is exactly
representable in fixed-point format.
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
Args:
value (float): The input floating point value.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Examples::
>>> import math
>>> f1 = -0.61884
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
>>> f1 != f2
True
>>> math.isclose(f2, -0.61883544921875)
True
"""
scale = 1 << precisionBits
return otRound(value * scale) / scale
def fixedToStr(value, precisionBits):
"""Converts a fixed-point number to a string representing a decimal float.
This chooses the float that has the shortest decimal representation (the least
number of fractional decimal digits).
For example, to convert a fixed-point number in a 2.14 format, use
``precisionBits=14``::
>>> fixedToStr(-10139, precisionBits=14)
'-0.61884'
This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form.
It uses nearestMultipleShortestRepr under the hood.
Args:
value (int): The fixed-point value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
def strToFixed(string, precisionBits):
"""Converts a string representing a decimal float to a fixed-point number.
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
int: Fixed-point representation.
Examples::
>>> ## to convert a float string to a 2.14 fixed-point number:
>>> strToFixed('-0.61884', precisionBits=14)
-10139
"""
value = float(string)
return otRound(value * (1 << precisionBits))
def strToFixedToFloat(string, precisionBits):
"""Convert a string to a decimal float with fixed-point rounding.
This first converts string to a float, then turns it into a fixed-point
number with ``precisionBits`` fractional binary digits, then back to a
float again.
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Examples::
>>> import math
>>> s = '-0.61884'
>>> bits = 14
>>> f = strToFixedToFloat(s, precisionBits=bits)
>>> math.isclose(f, -0.61883544921875)
True
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
True
"""
value = float(string)
scale = 1 << precisionBits
return otRound(value * scale) / scale
def floatToFixedToStr(value, precisionBits):
"""Convert float to string with fixed-point rounding.
This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits.
It uses nearestMultipleShortestRepr under the hood.
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884'
Args:
value (float): The float value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value, factor=1.0 / scale)
def ensureVersionIsLong(value):
"""Ensure a table version is an unsigned long.
OpenType table version numbers are expressed as a single unsigned long
comprising of an unsigned short major version and unsigned short minor
version. This function detects if the value to be used as a version number
looks too small (i.e. is less than ``0x10000``), and converts it to
fixed-point using :func:`floatToFixed` if so.
Args:
value (Number): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
if value < 0x10000:
newValue = floatToFixed(value, 16)
log.warning(
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
value,
newValue,
)
value = newValue
return value
def versionToFixed(value):
"""Ensure a table version number is fixed-point.
Args:
value (str): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
value = int(value, 0) if value.startswith("0") else float(value)
value = ensureVersionIsLong(value)
return value
|
4,207 |
test allineate inputs
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import Allineate
def METHOD_NAME():
input_map = dict(
allcostx=dict(
argstr="-allcostx |& tee %s",
extensions=None,
position=-1,
xor=["out_file", "out_matrix", "out_param_file", "out_weight_file"],
),
args=dict(
argstr="%s",
),
autobox=dict(
argstr="-autobox",
),
automask=dict(
argstr="-automask+%d",
),
autoweight=dict(
argstr="-autoweight%s",
),
center_of_mass=dict(
argstr="-cmass%s",
),
check=dict(
argstr="-check %s",
),
convergence=dict(
argstr="-conv %f",
),
cost=dict(
argstr="-cost %s",
),
environ=dict(
nohash=True,
usedefault=True,
),
epi=dict(
argstr="-EPI",
),
final_interpolation=dict(
argstr="-final %s",
),
fine_blur=dict(
argstr="-fineblur %f",
),
in_file=dict(
argstr="-source %s",
copyfile=False,
extensions=None,
mandatory=True,
),
in_matrix=dict(
argstr="-1Dmatrix_apply %s",
extensions=None,
position=-3,
xor=["out_matrix"],
),
in_param_file=dict(
argstr="-1Dparam_apply %s",
extensions=None,
xor=["out_param_file"],
),
interpolation=dict(
argstr="-interp %s",
),
master=dict(
argstr="-master %s",
extensions=None,
),
maxrot=dict(
argstr="-maxrot %f",
),
maxscl=dict(
argstr="-maxscl %f",
),
maxshf=dict(
argstr="-maxshf %f",
),
maxshr=dict(
argstr="-maxshr %f",
),
newgrid=dict(
argstr="-newgrid %f",
),
nmatch=dict(
argstr="-nmatch %d",
),
no_pad=dict(
argstr="-nopad",
),
nomask=dict(
argstr="-nomask",
),
num_threads=dict(
nohash=True,
usedefault=True,
),
nwarp=dict(
argstr="-nwarp %s",
),
nwarp_fixdep=dict(
argstr="-nwarp_fixdep%s...",
),
nwarp_fixmot=dict(
argstr="-nwarp_fixmot%s...",
),
one_pass=dict(
argstr="-onepass",
),
out_file=dict(
argstr="-prefix %s",
extensions=None,
hash_files=False,
name_source="in_file",
name_template="%s_allineate",
xor=["allcostx"],
),
out_matrix=dict(
argstr="-1Dmatrix_save %s",
extensions=None,
xor=["in_matrix", "allcostx"],
),
out_param_file=dict(
argstr="-1Dparam_save %s",
extensions=None,
xor=["in_param_file", "allcostx"],
),
out_weight_file=dict(
argstr="-wtprefix %s",
extensions=None,
xor=["allcostx"],
),
outputtype=dict(),
overwrite=dict(
argstr="-overwrite",
),
quiet=dict(
argstr="-quiet",
),
reference=dict(
argstr="-base %s",
extensions=None,
),
replacebase=dict(
argstr="-replacebase",
),
replacemeth=dict(
argstr="-replacemeth %s",
),
source_automask=dict(
argstr="-source_automask+%d",
),
source_mask=dict(
argstr="-source_mask %s",
extensions=None,
),
two_best=dict(
argstr="-twobest %d",
),
two_blur=dict(
argstr="-twoblur %f",
),
two_first=dict(
argstr="-twofirst",
),
two_pass=dict(
argstr="-twopass",
),
usetemp=dict(
argstr="-usetemp",
),
verbose=dict(
argstr="-verb",
),
warp_type=dict(
argstr="-warp %s",
),
warpfreeze=dict(
argstr="-warpfreeze",
),
weight=dict(
argstr="-weight %s",
),
weight_file=dict(
argstr="-weight %s",
deprecated="1.0.0",
extensions=None,
new_name="weight",
),
zclip=dict(
argstr="-zclip",
),
)
inputs = Allineate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Allineate_outputs():
output_map = dict(
allcostx=dict(
extensions=None,
),
out_file=dict(
extensions=None,
),
out_matrix=dict(
extensions=None,
),
out_param_file=dict(
extensions=None,
),
out_weight_file=dict(
extensions=None,
),
)
outputs = Allineate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
4,208 |
get test suite id
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import functools
import json
import uuid
from typing import Any, Dict, List, Tuple
import boto3
import pandas as pd
import os
from botocore.config import Config
import pytest
from tests.integ.sagemaker.jumpstart.constants import (
TEST_ASSETS_SPECS,
TMP_DIRECTORY_PATH,
TRAINING_DATASET_MODEL_DICT,
ContentType,
)
from sagemaker.jumpstart.constants import JUMPSTART_DEFAULT_REGION_NAME
from sagemaker.jumpstart.utils import get_jumpstart_content_bucket
from sagemaker.session import Session
def get_test_artifact_bucket() -> str:
bucket_name = get_sm_session().default_bucket()
return bucket_name
def METHOD_NAME() -> str:
return str(uuid.uuid4())
def get_sm_session() -> Session:
return Session(boto_session=boto3.Session(region_name=JUMPSTART_DEFAULT_REGION_NAME))
def get_training_dataset_for_model_and_version(model_id: str, version: str) -> dict:
return TRAINING_DATASET_MODEL_DICT[(model_id, version)]
def x_fail_if_ice(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if "CapacityError" in str(e):
pytest.xfail(str(e))
raise
return wrapper
def download_inference_assets():
if not os.path.exists(TMP_DIRECTORY_PATH):
os.makedirs(TMP_DIRECTORY_PATH)
for asset, s3_key in TEST_ASSETS_SPECS.items():
file_path = os.path.join(TMP_DIRECTORY_PATH, str(asset.value))
if not os.path.exists(file_path):
download_file(
file_path,
get_jumpstart_content_bucket(JUMPSTART_DEFAULT_REGION_NAME),
s3_key,
boto3.client("s3"),
)
def get_tabular_data(data_filename: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
asset_file_path = os.path.join(TMP_DIRECTORY_PATH, data_filename)
test_data = pd.read_csv(asset_file_path, header=None)
label, features = test_data.iloc[:, :1], test_data.iloc[:, 1:]
return label, features
def download_file(local_download_path, s3_bucket, s3_key, s3_client) -> None:
s3_client.download_file(s3_bucket, s3_key, local_download_path)
class EndpointInvoker:
def __init__(
self,
endpoint_name: str,
region: str = JUMPSTART_DEFAULT_REGION_NAME,
boto_config: Config = Config(retries={"max_attempts": 10, "mode": "standard"}),
) -> None:
self.endpoint_name = endpoint_name
self.region = region
self.config = boto_config
self.sagemaker_runtime_client = self.get_sagemaker_runtime_client()
def _invoke_endpoint(
self,
body: Any,
content_type: ContentType,
) -> Dict[str, Any]:
response = self.sagemaker_runtime_client.invoke_endpoint(
EndpointName=self.endpoint_name, ContentType=content_type.value, Body=body
)
return json.loads(response["Body"].read())
def invoke_tabular_endpoint(self, data: pd.DataFrame) -> Dict[str, Any]:
return self._invoke_endpoint(
body=data.to_csv(header=False, index=False).encode("utf-8"),
content_type=ContentType.TEXT_CSV,
)
def invoke_spc_endpoint(self, text: List[str]) -> Dict[str, Any]:
return self._invoke_endpoint(
body=json.dumps(text).encode("utf-8"),
content_type=ContentType.LIST_TEXT,
)
def get_sagemaker_runtime_client(self) -> boto3.client:
return boto3.client(
service_name="runtime.sagemaker", config=self.config, region_name=self.region
)
|
4,209 |
setup
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ScorpioBroker'
copyright = '2020, NECTI + NLE'
author = 'NECTI + NLE'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
def METHOD_NAME(app):
app.add_stylesheet('css/fiware_readthedocs.css')
app.add_stylesheet('css/fiware_readthedocs_core.css')
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ScorpioBrokerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ScorpioBroker.tex', 'ScorpioBroker Documentation',
'NECTI', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scorpiobroker', 'ScorpioBroker Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ScorpioBroker', 'ScorpioBroker Documentation',
author, 'ScorpioBroker', 'One line description of project.',
'Miscellaneous'),
|
4,210 |
add handle
|
import logging
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Union,
)
from cloudevents.conversion import to_json
from cloudevents.http import CloudEvent
from _ert_com_protocol import DispatcherMessage
from _ert_job_runner.client import Client
from ert.ensemble_evaluator import state
from ert.ensemble_evaluator.snapshot import (
Job,
PartialSnapshot,
RealizationSnapshot,
Snapshot,
SnapshotDict,
Step,
)
from ert.serialization import evaluator_marshaller
from ._realization import Realization
if TYPE_CHECKING:
import asyncio
from ..config import EvaluatorServerConfig
logger = logging.getLogger(__name__)
_handle = Callable[..., Any]
class _EnsembleStateTracker:
def __init__(self, state_: str = state.ENSEMBLE_STATE_UNKNOWN) -> None:
self._state = state_
self._handles: Dict[str, _handle] = {}
self._msg = "Illegal state transition from %s to %s"
self.set_default_handles()
def METHOD_NAME(self, state_: str, handle: _handle) -> None:
self._handles[state_] = handle
def _handle_unknown(self) -> None:
if self._state != state.ENSEMBLE_STATE_UNKNOWN:
logger.warning(self._msg, self._state, state.ENSEMBLE_STATE_UNKNOWN)
self._state = state.ENSEMBLE_STATE_UNKNOWN
def _handle_started(self) -> None:
if self._state != state.ENSEMBLE_STATE_UNKNOWN:
logger.warning(self._msg, self._state, state.ENSEMBLE_STATE_STARTED)
self._state = state.ENSEMBLE_STATE_STARTED
def _handle_failed(self) -> None:
if self._state not in [
state.ENSEMBLE_STATE_UNKNOWN,
state.ENSEMBLE_STATE_STARTED,
]:
logger.warning(self._msg, self._state, state.ENSEMBLE_STATE_FAILED)
self._state = state.ENSEMBLE_STATE_FAILED
def _handle_stopped(self) -> None:
if self._state != state.ENSEMBLE_STATE_STARTED:
logger.warning(self._msg, self._state, state.ENSEMBLE_STATE_STOPPED)
self._state = state.ENSEMBLE_STATE_STOPPED
def _handle_canceled(self) -> None:
if self._state != state.ENSEMBLE_STATE_STARTED:
logger.warning(self._msg, self._state, state.ENSEMBLE_STATE_CANCELLED)
self._state = state.ENSEMBLE_STATE_CANCELLED
def set_default_handles(self) -> None:
self.METHOD_NAME(state.ENSEMBLE_STATE_UNKNOWN, self._handle_unknown)
self.METHOD_NAME(state.ENSEMBLE_STATE_STARTED, self._handle_started)
self.METHOD_NAME(state.ENSEMBLE_STATE_FAILED, self._handle_failed)
self.METHOD_NAME(state.ENSEMBLE_STATE_STOPPED, self._handle_stopped)
self.METHOD_NAME(state.ENSEMBLE_STATE_CANCELLED, self._handle_canceled)
def update_state(self, state_: str) -> str:
if state_ not in self._handles:
raise KeyError(f"Handle not defined for state {state_}")
# Call the state handle mapped to the new state
self._handles[state_]()
return self._state
class Ensemble:
def __init__(
self, reals: Sequence[Realization], metadata: Mapping[str, Any], id_: str
) -> None:
self.reals = reals
self.metadata = metadata
self._snapshot = self._create_snapshot()
self.status = self._snapshot.status
if self._snapshot.status:
self._status_tracker = _EnsembleStateTracker(self._snapshot.status)
else:
self._status_tracker = _EnsembleStateTracker()
self._id: str = id_
def __repr__(self) -> str:
return f"Ensemble with {len(self.reals)} members"
def evaluate(self, config: "EvaluatorServerConfig") -> None:
pass
async def evaluate_async(
self, config: "EvaluatorServerConfig", experiment_id: str
) -> None:
pass
def cancel(self) -> None:
pass
@property
def id_(self) -> str:
return self._id
@property
def cancellable(self) -> bool:
return False
@property
def active_reals(self) -> Sequence[Realization]:
return list(filter(lambda real: real.active, self.reals))
@property
def snapshot(self) -> Snapshot:
return self._snapshot
def update_snapshot(self, events: List[CloudEvent]) -> PartialSnapshot:
snapshot_mutate_event = PartialSnapshot(self._snapshot)
for event in events:
snapshot_mutate_event.from_cloudevent(event)
self._snapshot.merge_event(snapshot_mutate_event)
if self._snapshot.status is not None and self.status != self._snapshot.status:
self.status = self._status_tracker.update_state(self._snapshot.status)
return snapshot_mutate_event
async def send_cloudevent( # pylint: disable=too-many-arguments
self,
url: str,
event: CloudEvent,
token: Optional[str] = None,
cert: Optional[Union[str, bytes]] = None,
retries: int = 10,
) -> None:
async with Client(url, token, cert, max_retries=retries) as client:
await client._send(to_json(event, data_marshaller=evaluator_marshaller))
# TODO: make legacy-only?
# See https://github.com/equinor/ert/issues/3456
@property
@abstractmethod
def output_bus(
self,
) -> "asyncio.Queue[DispatcherMessage]":
raise NotImplementedError
# TODO: make legacy-only?
# See https://github.com/equinor/ert/issues/3456
async def queue_cloudevent(
self,
event: DispatcherMessage,
) -> None:
self.output_bus.put_nowait(event)
def get_successful_realizations(self) -> int:
return self._snapshot.get_successful_realizations()
def _create_snapshot(self) -> Snapshot:
reals: Dict[str, RealizationSnapshot] = {}
for real in self.active_reals:
reals[str(real.iens)] = RealizationSnapshot(
active=True,
status=state.REALIZATION_STATE_WAITING,
)
for step in real.steps:
reals[str(real.iens)].steps[str(step.id_)] = Step(
status=state.STEP_STATE_UNKNOWN
)
for job in step.jobs:
reals[str(real.iens)].steps[str(step.id_)].jobs[str(job.id_)] = Job(
status=state.JOB_STATE_START,
index=job.index,
name=job.name,
)
top = SnapshotDict(
reals=reals,
status=state.ENSEMBLE_STATE_UNKNOWN,
metadata=self.metadata,
)
return Snapshot(top.dict())
|
4,211 |
prettify
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroSSHELR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def METHOD_NAME(response, verbose=True):
if verbose:
print(json.dumps(response, indent=4, ensure_ascii=False))
print()
return response
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host[0]
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
# guest_train_data = {"name": "default_credit_hetero_guest", "namespace": f"experiment{namespace}"}
# host_train_data = {"name": "default_credit_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=hosts)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", output_format='dense')
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True)
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
lr_param = {
"name": "hetero_sshe_lr_0",
"penalty": "L2",
"optimizer": "adam",
"tol": 0.0001,
"alpha": 0.001,
"max_iter": 30,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros",
"fit_intercept": False
},
"encrypt_param": {
"key_length": 1024
},
"reveal_every_iter": True,
"reveal_strategy": "respectively"
}
hetero_sshe_lr_0 = HeteroSSHELR(**lr_param)
pipeline.add_component(hetero_sshe_lr_0, data=Data(train_data=intersection_0.output.data))
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(evaluation_0, data=Data(data=hetero_sshe_lr_0.output.data))
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
METHOD_NAME(pipeline.get_component("hetero_sshe_lr_0").get_summary())
METHOD_NAME(pipeline.get_component("evaluation_0").get_summary())
pipeline.deploy_component([data_transform_0, intersection_0, hetero_sshe_lr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
return pipeline
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
4,212 |
map
|
import sys
from collections.abc import Callable, Iterable, Iterator, Mapping
from types import TracebackType
from typing import Any, Generic, TypeVar
from typing_extensions import Literal, Self
if sys.version_info >= (3, 9):
from types import GenericAlias
__all__ = ["Pool", "ThreadPool"]
_S = TypeVar("_S")
_T = TypeVar("_T")
class ApplyResult(Generic[_T]):
if sys.version_info >= (3, 8):
def __init__(
self, pool: Pool, callback: Callable[[_T], object] | None, error_callback: Callable[[BaseException], object] | None
) -> None: ...
else:
def __init__(
self,
cache: dict[int, ApplyResult[Any]],
callback: Callable[[_T], object] | None,
error_callback: Callable[[BaseException], object] | None,
) -> None: ...
def get(self, timeout: float | None = None) -> _T: ...
def wait(self, timeout: float | None = None) -> None: ...
def ready(self) -> bool: ...
def successful(self) -> bool: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
# alias created during issue #17805
AsyncResult = ApplyResult
class MapResult(ApplyResult[list[_T]]):
if sys.version_info >= (3, 8):
def __init__(
self,
pool: Pool,
chunksize: int,
length: int,
callback: Callable[[list[_T]], object] | None,
error_callback: Callable[[BaseException], object] | None,
) -> None: ...
else:
def __init__(
self,
cache: dict[int, ApplyResult[Any]],
chunksize: int,
length: int,
callback: Callable[[list[_T]], object] | None,
error_callback: Callable[[BaseException], object] | None,
) -> None: ...
class IMapIterator(Iterator[_T]):
if sys.version_info >= (3, 8):
def __init__(self, pool: Pool) -> None: ...
else:
def __init__(self, cache: dict[int, IMapIterator[Any]]) -> None: ...
def __iter__(self) -> Self: ...
def next(self, timeout: float | None = None) -> _T: ...
def __next__(self, timeout: float | None = None) -> _T: ...
class IMapUnorderedIterator(IMapIterator[_T]): ...
class Pool:
def __init__(
self,
processes: int | None = None,
initializer: Callable[..., object] | None = None,
initargs: Iterable[Any] = (),
maxtasksperchild: int | None = None,
context: Any | None = None,
) -> None: ...
def apply(self, func: Callable[..., _T], args: Iterable[Any] = (), kwds: Mapping[str, Any] = {}) -> _T: ...
def apply_async(
self,
func: Callable[..., _T],
args: Iterable[Any] = (),
kwds: Mapping[str, Any] = {},
callback: Callable[[_T], object] | None = None,
error_callback: Callable[[BaseException], object] | None = None,
) -> AsyncResult[_T]: ...
def METHOD_NAME(self, func: Callable[[_S], _T], iterable: Iterable[_S], chunksize: int | None = None) -> list[_T]: ...
def map_async(
self,
func: Callable[[_S], _T],
iterable: Iterable[_S],
chunksize: int | None = None,
callback: Callable[[_T], object] | None = None,
error_callback: Callable[[BaseException], object] | None = None,
) -> MapResult[_T]: ...
def imap(self, func: Callable[[_S], _T], iterable: Iterable[_S], chunksize: int | None = 1) -> IMapIterator[_T]: ...
def imap_unordered(self, func: Callable[[_S], _T], iterable: Iterable[_S], chunksize: int | None = 1) -> IMapIterator[_T]: ...
def starmap(self, func: Callable[..., _T], iterable: Iterable[Iterable[Any]], chunksize: int | None = None) -> list[_T]: ...
def starmap_async(
self,
func: Callable[..., _T],
iterable: Iterable[Iterable[Any]],
chunksize: int | None = None,
callback: Callable[[_T], object] | None = None,
error_callback: Callable[[BaseException], object] | None = None,
) -> AsyncResult[list[_T]]: ...
def close(self) -> None: ...
def terminate(self) -> None: ...
def join(self) -> None: ...
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
class ThreadPool(Pool):
def __init__(
self, processes: int | None = None, initializer: Callable[..., object] | None = None, initargs: Iterable[Any] = ()
) -> None: ...
# undocumented
if sys.version_info >= (3, 8):
INIT: Literal["INIT"]
RUN: Literal["RUN"]
CLOSE: Literal["CLOSE"]
TERMINATE: Literal["TERMINATE"]
else:
RUN: Literal[0]
CLOSE: Literal[1]
TERMINATE: Literal[2]
|
4,213 |
set hash
|
from functools import reduce
from operator import or_
from django.db import models
from django.utils import timezone
from sentry.backup.scopes import RelocationScope
from sentry.constants import MAX_EMAIL_FIELD_LENGTH
from sentry.db.models import BoundedBigIntegerField, Model, region_silo_only_model, sane_repr
from sentry.utils.datastructures import BidirectionalMapping
from sentry.utils.hashlib import md5_text
# The order of these keys are significant to also indicate priority
# when used in hashing and determining uniqueness. If you change the order
# you will break stuff.
KEYWORD_MAP = BidirectionalMapping(
{
"ident": "id",
"username": "username",
"email": "email",
"ip_address": "ip",
}
)
@region_silo_only_model
class EventUser(Model):
__relocation_scope__ = RelocationScope.Excluded
project_id = BoundedBigIntegerField(db_index=True)
hash = models.CharField(max_length=32)
ident = models.CharField(max_length=128, null=True)
email = models.EmailField(null=True, max_length=MAX_EMAIL_FIELD_LENGTH)
username = models.CharField(max_length=128, null=True)
name = models.CharField(max_length=128, null=True)
ip_address = models.GenericIPAddressField(null=True)
date_added = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
app_label = "sentry"
db_table = "sentry_eventuser"
unique_together = (("project_id", "ident"), ("project_id", "hash"))
index_together = (
("project_id", "email"),
("project_id", "username"),
("project_id", "ip_address"),
)
__repr__ = sane_repr("project_id", "ident", "email", "username", "ip_address")
@classmethod
def attr_from_keyword(cls, keyword):
return KEYWORD_MAP.get_key(keyword)
@classmethod
def hash_from_tag(cls, value):
return md5_text(value.split(":", 1)[-1]).hexdigest()
@classmethod
def for_tags(cls, project_id, values):
"""
Finds matching EventUser objects from a list of tag values.
Return a dictionary of {tag_value: event_user}.
"""
hashes = [cls.hash_from_tag(v) for v in values]
return {e.tag_value: e for e in cls.objects.filter(project_id=project_id, hash__in=hashes)}
def save(self, *args, **kwargs):
assert (
self.ident or self.username or self.email or self.ip_address
), "No identifying value found for user"
if not self.hash:
self.METHOD_NAME()
super().save(*args, **kwargs)
def METHOD_NAME(self):
self.hash = self.build_hash()
def build_hash(self):
for key, value in self.iter_attributes():
if value:
return md5_text(value).hexdigest()
@property
def tag_value(self):
"""
Return the identifier used with tags to link this user.
"""
for key, value in self.iter_attributes():
if value:
return f"{KEYWORD_MAP[key]}:{value}"
def iter_attributes(self):
"""
Iterate over key/value pairs for this EventUser in priority order.
"""
for key in KEYWORD_MAP.keys():
yield key, getattr(self, key)
def get_label(self):
return self.email or self.username or self.ident or self.ip_address
def get_display_name(self):
return self.name or self.email or self.username
def find_similar_users(self, user):
from sentry.models import OrganizationMemberTeam, Project
# limit to only teams user has opted into
project_ids = list(
Project.objects.filter(
teams__in=OrganizationMemberTeam.objects.filter(
organizationmember__user=user,
organizationmember__organization__project=self.project_id,
is_active=True,
).values("team")
).values_list("id", flat=True)[:1000]
)
if not project_ids:
return type(self).objects.none()
filters = []
if self.email:
filters.append(models.Q(email=self.email))
if self.ip_address:
filters.append(models.Q(ip_address=self.ip_address))
if not filters:
return type(self).objects.none()
return (
type(self)
.objects.exclude(id=self.id)
.filter(reduce(or_, filters), project_id__in=project_ids)
)
|
4,214 |
finalize
|
import logging
import numpy as np
import pymc3 as pm
from typing import Callable
from .inference_task_base import Sampler, Caller, CallerUpdateSummary, \
HybridInferenceTask, HybridInferenceParameters
from .. import config, types
from ..models.model_ploidy import PloidyModelConfig, PloidyModel, \
PloidyWorkspace, PloidyEmissionBasicSampler, PloidyBasicCaller
_logger = logging.getLogger(__name__)
class PloidyCaller(Caller):
"""This class is a wrapper around `PloidyBasicCaller` to be used in a `HybridInferenceTask`."""
def __init__(self,
hybrid_inference_params: HybridInferenceParameters,
ploidy_workspace: PloidyWorkspace):
self.hybrid_inference_params = hybrid_inference_params
self.ploidy_basic_caller = PloidyBasicCaller(hybrid_inference_params, ploidy_workspace)
def snapshot(self):
"""Snapshot is not necessary since there is no internal consistency loop."""
pass
def METHOD_NAME(self):
"""Finalizing is not necessary since there is no internal consistency loop."""
pass
def call(self) -> 'PloidyCallerUpdateSummary':
update_norm_sj = self.ploidy_basic_caller.call()
return PloidyCallerUpdateSummary(
update_norm_sj, self.hybrid_inference_params.caller_summary_statistics_reducer)
def update_auxiliary_vars(self):
pass
class PloidyCallerUpdateSummary(CallerUpdateSummary):
def __init__(self,
update_norm_sj: np.ndarray,
reducer: Callable[[np.ndarray], float]):
self.scalar_update = reducer(update_norm_sj)
def __repr__(self):
return "ploidy update size: {0:2.6}".format(self.scalar_update)
def reduce_to_scalar(self) -> float:
return self.scalar_update
class PloidyEmissionSampler(Sampler):
"""This class is a wrapper around `PloidyEmissionBasicSampler` to be used in a `HybridInferenceTask`."""
def __init__(self,
hybrid_inference_params: HybridInferenceParameters,
ploidy_model: PloidyModel,
ploidy_workspace: PloidyWorkspace):
super().__init__(hybrid_inference_params)
self.ploidy_workspace = ploidy_workspace
self.ploidy_emission_basic_sampler = PloidyEmissionBasicSampler(
ploidy_model, self.hybrid_inference_params.log_emission_samples_per_round)
def update_approximation(self, approx: pm.approximations.MeanField):
self.ploidy_emission_basic_sampler.update_approximation(approx)
def draw(self) -> np.ndarray:
return self.ploidy_emission_basic_sampler.draw()
def reset(self):
self.ploidy_workspace.log_ploidy_emission_sjk.set_value(
np.zeros((self.ploidy_workspace.num_samples,
self.ploidy_workspace.num_contigs,
self.ploidy_workspace.num_ploidy_states),
dtype=types.floatX), borrow=config.borrow_numpy)
def increment(self, update):
self.ploidy_workspace.log_ploidy_emission_sjk.set_value(
self.ploidy_workspace.log_ploidy_emission_sjk.get_value(borrow=True) + update)
def get_latest_log_emission_posterior_mean_estimator(self) -> np.ndarray:
return self.ploidy_workspace.log_ploidy_emission_sjk.get_value(borrow=True)
class CohortPloidyInferenceTask(HybridInferenceTask):
"""Cohort germline contig ploidy determination task."""
def __init__(self,
hybrid_inference_params: HybridInferenceParameters,
ploidy_config: PloidyModelConfig,
ploidy_workspace: PloidyWorkspace):
_logger.info("Instantiating the germline contig ploidy determination model...")
ploidy_model = PloidyModel(ploidy_config, ploidy_workspace)
_logger.info("Instantiating the ploidy emission sampler...")
ploidy_emission_sampler = PloidyEmissionSampler(hybrid_inference_params, ploidy_model, ploidy_workspace)
_logger.info("Instantiating the ploidy caller...")
ploidy_caller = PloidyCaller(hybrid_inference_params, ploidy_workspace)
elbo_normalization_factor = ploidy_workspace.num_samples * ploidy_workspace.num_contigs
super().__init__(hybrid_inference_params, ploidy_model, ploidy_emission_sampler, ploidy_caller,
elbo_normalization_factor=elbo_normalization_factor,
advi_task_name="denoising",
calling_task_name="ploidy calling")
self.ploidy_config = ploidy_config
self.ploidy_workspace = ploidy_workspace
def disengage(self):
pass
|
4,215 |
test results sorted by created desc
|
import datetime
from unittest import mock
import pytest
from h_matchers import Any
from h.models import Group
from h.models.group import ReadableBy
from h.services.group import GroupService, groups_factory
class TestGroupServiceFetch:
def test_it_proxies_to_fetch_by_groupid_if_groupid_valid(self, svc):
svc.fetch_by_groupid = mock.Mock()
result = svc.fetch("group:[email protected]")
assert svc.fetch_by_groupid.called_once_with("group:[email protected]")
assert result == svc.fetch_by_groupid.return_value
def test_it_proxies_to_fetch_by_pubid_if_not_groupid_syntax(self, svc):
svc.fetch_by_pubid = mock.Mock()
result = svc.fetch("abcdppp")
assert svc.fetch_by_pubid.called_once_with("abcdppp")
assert result == svc.fetch_by_pubid.return_value
class TestGroupServiceFetchByPubid:
def test_it_returns_group_model(self, svc, factories):
group = factories.Group()
fetched_group = svc.fetch_by_pubid(group.pubid)
assert fetched_group == group
assert isinstance(fetched_group, Group)
def test_it_returns_None_if_no_group_found(self, svc):
group = svc.fetch_by_pubid("abcdeff")
assert group is None
class TestGroupServiceFetchByGroupid:
def test_it_returns_group_model_of_matching_group(self, svc, factories):
group = factories.Group(authority_provided_id="dingdong", authority="foo.com")
fetched_group = svc.fetch_by_groupid(group.groupid)
assert isinstance(fetched_group, Group)
def test_it_raises_ValueError_if_invalid_groupid(self, svc):
with pytest.raises(ValueError, match="isn't a valid groupid"):
svc.fetch_by_groupid("fiddlesticks")
def test_it_returns_None_if_no_matching_group(self, svc):
assert svc.fetch_by_groupid("group:[email protected]") is None
@pytest.mark.usefixtures("groups")
class TestFilterByName:
def test_it_filters_by_name(self, svc):
filtered_groups = svc.filter_by_name(name="Hello")
assert len(filtered_groups.all()) == 1
assert filtered_groups.all() == [
Any.instance_of(Group).with_attrs({"name": "Hello"})
]
def test_it_returns_all_groups_if_name_is_None(self, svc, groups):
filtered_groups = svc.filter_by_name()
# results include public group in addition to ``groups``
assert len(filtered_groups.all()) == len(groups) + 1
def test_it_is_case_insensitive(self, svc):
filtered_groups = svc.filter_by_name(name="Amber")
assert len(filtered_groups.all()) == 2
def test_it_performs_wildcard_search(self, svc):
filtered_groups = svc.filter_by_name(name="Finger")
assert len(filtered_groups.all()) == 2
def METHOD_NAME(self, svc):
filtered_groups = svc.filter_by_name("Finger")
assert filtered_groups.all() == [
Any.instance_of(Group).with_attrs({"name": "Fingers"}),
Any.instance_of(Group).with_attrs({"name": "Finger"}),
]
@pytest.fixture
def groups(self, factories):
return [
factories.Group(name="Finger", created=datetime.datetime(2015, 8, 2)),
factories.Group(name="Fingers", created=datetime.datetime(2018, 2, 1)),
factories.Group(name="Hello"),
factories.Group(name="Amber"),
factories.Group(name="amber"),
]
class TestGroupServiceGroupIds:
"""
Unit tests for methods related to group IDs.
- :py:meth:`GroupService.groupids_readable_by`
- :py:meth:`GroupService.groupids_created_by`
"""
@pytest.mark.parametrize("with_user", [True, False])
def test_readable_by_includes_world(self, with_user, svc, db_session, factories):
user = None
if with_user:
user = factories.User()
db_session.flush()
assert "__world__" in svc.groupids_readable_by(user)
@pytest.mark.parametrize("with_user", [True, False])
def test_readable_by_includes_world_readable_groups(
self, with_user, svc, db_session, factories
):
# group readable by members
factories.Group(readable_by=ReadableBy.members)
# group readable by everyone
group = factories.Group(readable_by=ReadableBy.world)
user = None
if with_user:
user = factories.User()
db_session.flush()
assert group.pubid in svc.groupids_readable_by(user)
def test_readable_by_includes_memberships(self, svc, db_session, factories):
user = factories.User()
group = factories.Group(readable_by=ReadableBy.members)
group.members.append(user)
db_session.flush()
assert group.pubid in svc.groupids_readable_by(user)
def test_readable_by_applies_filter(self, svc, db_session, factories):
user = factories.User()
factories.Group(
readable_by=ReadableBy.world
) # Group that shouldn't be returned
group = factories.Group(readable_by=ReadableBy.world)
db_session.flush()
pubids = [group.pubid, "doesnotexist"]
assert svc.groupids_readable_by(user, group_ids=pubids) == [group.pubid]
def test_created_by_includes_created_groups(self, svc, factories):
user = factories.User()
group = factories.Group(creator=user)
assert group.pubid in svc.groupids_created_by(user)
def test_created_by_excludes_other_groups(self, svc, db_session, factories):
user = factories.User()
private_group = factories.Group()
private_group.members.append(user)
factories.Group(readable_by=ReadableBy.world)
db_session.flush()
assert svc.groupids_created_by(user) == []
def test_created_by_returns_empty_list_for_missing_user(self, svc):
assert svc.groupids_created_by(None) == []
@pytest.mark.usefixtures("user_service")
class TestGroupsFactory:
def test_returns_groups_service(self, pyramid_request):
svc = groups_factory(None, pyramid_request)
assert isinstance(svc, GroupService)
def test_provides_request_db_as_session(self, pyramid_request):
svc = groups_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
def test_wraps_user_service_as_user_fetcher(self, pyramid_request, user_service):
svc = groups_factory(None, pyramid_request)
svc.user_fetcher("foo")
user_service.fetch.assert_called_once_with("foo")
@pytest.fixture
def svc(db_session, user_service):
return GroupService(db_session, user_service)
|
4,216 |
test bio degradation full run
|
'''
Test biodegradation module
'''
from datetime import timedelta
import pytest
import numpy as np
from gnome.environment import constant_wind, Water, Waves
# from gnome.spills.elements import floating
from gnome.weatherers import (Evaporation,
NaturalDispersion,
# Dissolution,
Biodegradation,
weatherer_sort)
from .conftest import weathering_data_arrays, test_oil
from ..conftest import (sample_model_weathering2)
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=2, width=120)
wind = constant_wind(15., 270, 'knots')
water = Water()
waves = Waves(wind, water)
def test_init():
wind = constant_wind(15., 0)
waves = Waves(wind, Water())
bio_deg = Biodegradation(waves)
print(bio_deg.array_types)
assert all([(at in bio_deg.array_types)
for at in ('mass', 'droplet_avg_size')])
def test_sort_order():
'test sort order for Biodegradation weatherer'
wind = constant_wind(15., 0)
waves = Waves(wind, Water())
bio_deg = Biodegradation(waves)
assert weatherer_sort(bio_deg) == 11
@pytest.mark.skipif(reason="serialization for weatherers overall needs review")
def test_serialize_deseriailize():
'test serialize/deserialize for webapi'
wind = constant_wind(15., 0)
water = Water()
waves = Waves(wind, water)
bio_deg = Biodegradation(waves)
json_ = bio_deg.serialize()
pp.pprint(json_)
assert json_['waves'] == waves.serialize()
# deserialize and ensure the dict's are correct
d_ = Biodegradation.deserialize(json_)
assert d_['waves'] == Waves.deserialize(json_['waves'])
d_['waves'] = waves
bio_deg.update_from_dict(d_)
assert bio_deg.waves is waves
def test_prepare_for_model_run():
bio_deg = Biodegradation(waves)
(sc, time_step) = weathering_data_arrays(bio_deg.array_types,
water)[:2]
assert 'bio_degradation' not in sc.mass_balance
bio_deg.prepare_for_model_run(sc)
assert 'bio_degradation' in sc.mass_balance
@pytest.mark.skipif(reason="refactoring sara out of gnome oil")
@pytest.mark.parametrize(('oil', 'temp', 'num_elems', 'expected_mb', 'on'),
[('oil_ans_mp', 311.15, 3, 0.0, True),
#('ABU SAFAH', 311.15, 3, 0.0, True),
('oil_bahia', 311.15, 3, 0.0, True),
('oil_ans_mp', 311.15, 3, np.nan, False)])
def test_bio_degradation_mass_balance(oil, temp, num_elems, expected_mb, on):
bio_deg = Biodegradation(waves)
(sc, time_step) = weathering_data_arrays(bio_deg.array_types,
water,
num_elements=num_elems)[:2]
model_time = (sc.spills[0].release_time +
timedelta(seconds=time_step))
bio_deg.on = on
bio_deg.prepare_for_model_run(sc)
bio_deg.initialize_data(sc, sc.num_released)
bio_deg.prepare_for_model_step(sc, time_step, model_time)
bio_deg.weather_elements(sc, time_step, model_time)
if on:
assert np.isclose(sc.mass_balance['bio_degradation'], expected_mb)
else:
assert 'bio_degradation' not in sc.mass_balance
@pytest.mark.skipif(reason="refactoring sara out of gnome oil")
@pytest.mark.parametrize(('oil', 'temp', 'expected_balance'),
# TODO - expected ballance values:
[(test_oil, 288.7, -1),
#('ABU SAFAH', 288.7, -1),
('oil_ans_mp', 288.7, -1),
#('ALASKA NORTH SLOPE, OIL & GAS', 279.261,
#-1),
('oil_bahia', 288.7, -1)])
def METHOD_NAME(sample_model_fcn2,
oil, temp, expected_balance):
'''
test bio degradation outputs post step for a full run of model. Dump json
for 'weathering_model.json' in dump directory
'''
model = sample_model_weathering2(sample_model_fcn2, oil, temp)
# model.duration = timedelta(days=5)
model.environment += [Water(temp), wind, waves]
model.weatherers += Evaporation()
model.weatherers += NaturalDispersion()
# model.weatherers += Dissolution(waves)
model.weatherers += Biodegradation()
for sc in model.spills.items():
print(sc.__dict__.keys())
print(sc._data_arrays)
print('num spills:', len(sc.spills))
print('spill[0] amount:', sc.spills[0].amount)
original_amount = sc.spills[0].amount
# set make_default_refs to True for objects contained in model after adding
# objects to the model
model.set_make_default_refs(True)
model.setup_model_run()
bio_degradated = []
for step in model:
for sc in model.spills.items():
if step['step_num'] > 0:
assert (sc.mass_balance['bio_degradation'] > 0)
if 'bio_degradation' in sc.mass_balance:
bio_degradated.append(sc.mass_balance['bio_degradation'])
print('Bio degradated amount: {}'
.format(bio_degradated[-1]))
print('Fraction bio degradated after full run: {}'
.format(bio_degradated[-1] / original_amount))
assert bio_degradated[0] == 0.0
# assert np.isclose(bio_degradated[-1], expected_balance
|
4,217 |
test simple wrong body
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock tests
Unit tests for the Mocks.
"""
from __future__ import absolute_import
__author__ = "[email protected] (Joe Gregorio)"
import os
import unittest
import httplib2
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError, UnexpectedBodyError, UnexpectedMethodError
from googleapiclient.http import HttpMock, RequestMockBuilder
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def datafile(filename):
return os.path.join(DATA_DIR, filename)
class Mocks(unittest.TestCase):
def setUp(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
self.zoo_http = HttpMock(datafile("zoo.json"), {"status": "200"})
def test_default_response(self):
requestBuilder = RequestMockBuilder({})
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = plus.activities().get(activityId="tag:blah").execute()
self.assertEqual({}, activity)
def test_simple_response(self):
requestBuilder = RequestMockBuilder(
{"plus.activities.get": (None, '{"foo": "bar"}')}
)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = plus.activities().get(activityId="tag:blah").execute()
self.assertEqual({"foo": "bar"}, activity)
def test_unexpected_call(self):
requestBuilder = RequestMockBuilder({}, check_unexpected=True)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
plus.activities().get(activityId="tag:blah").execute()
self.fail("UnexpectedMethodError should have been raised")
except UnexpectedMethodError:
pass
def test_simple_unexpected_body(self):
requestBuilder = RequestMockBuilder(
{"zoo.animals.insert": (None, '{"data": {"foo": "bar"}}', None)}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body="{}").execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def test_simple_expected_body(self):
requestBuilder = RequestMockBuilder(
{"zoo.animals.insert": (None, '{"data": {"foo": "bar"}}', "{}")}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body="").execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def METHOD_NAME(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
'{"data": {"foo": "bar"}}',
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body='{"data": {"foo": "blah"}}').execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def test_simple_matching_str_body(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
'{"data": {"foo": "bar"}}',
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = zoo.animals().insert(body={"data": {"foo": "bar"}}).execute()
self.assertEqual({"foo": "bar"}, activity)
def test_simple_matching_dict_body(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
{"data": {"foo": "bar"}},
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = zoo.animals().insert(body={"data": {"foo": "bar"}}).execute()
self.assertEqual({"foo": "bar"}, activity)
def test_errors(self):
errorResponse = httplib2.Response({"status": 500, "reason": "Server Error"})
requestBuilder = RequestMockBuilder(
{"plus.activities.list": (errorResponse, b"{}")}
)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
activity = (
plus.activities().list(collection="public", userId="me").execute()
)
self.fail("An exception should have been thrown")
except HttpError as e:
self.assertEqual(b"{}", e.content)
self.assertEqual(500, e.resp.status)
self.assertEqual("Server Error", e.resp.reason)
if __name__ == "__main__":
unittest.main()
|
4,218 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebAppSlotConfigurationNamesResult',
'AwaitableGetWebAppSlotConfigurationNamesResult',
'get_web_app_slot_configuration_names',
'get_web_app_slot_configuration_names_output',
]
@pulumi.output_type
class GetWebAppSlotConfigurationNamesResult:
"""
Slot Config names azure resource.
"""
def __init__(__self__, app_setting_names=None, azure_storage_config_names=None, connection_string_names=None, id=None, kind=None, METHOD_NAME=None, system_data=None, type=None):
if app_setting_names and not isinstance(app_setting_names, list):
raise TypeError("Expected argument 'app_setting_names' to be a list")
pulumi.set(__self__, "app_setting_names", app_setting_names)
if azure_storage_config_names and not isinstance(azure_storage_config_names, list):
raise TypeError("Expected argument 'azure_storage_config_names' to be a list")
pulumi.set(__self__, "azure_storage_config_names", azure_storage_config_names)
if connection_string_names and not isinstance(connection_string_names, list):
raise TypeError("Expected argument 'connection_string_names' to be a list")
pulumi.set(__self__, "connection_string_names", connection_string_names)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="appSettingNames")
def app_setting_names(self) -> Optional[Sequence[str]]:
"""
List of application settings names.
"""
return pulumi.get(self, "app_setting_names")
@property
@pulumi.getter(METHOD_NAME="azureStorageConfigNames")
def azure_storage_config_names(self) -> Optional[Sequence[str]]:
"""
List of external Azure storage account identifiers.
"""
return pulumi.get(self, "azure_storage_config_names")
@property
@pulumi.getter(METHOD_NAME="connectionStringNames")
def connection_string_names(self) -> Optional[Sequence[str]]:
"""
List of connection string names.
"""
return pulumi.get(self, "connection_string_names")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSlotConfigurationNamesResult(GetWebAppSlotConfigurationNamesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSlotConfigurationNamesResult(
app_setting_names=self.app_setting_names,
azure_storage_config_names=self.azure_storage_config_names,
connection_string_names=self.connection_string_names,
id=self.id,
kind=self.kind,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
type=self.type)
def get_web_app_slot_configuration_names(METHOD_NAME: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSlotConfigurationNamesResult:
"""
Gets the names of app settings and connection strings that stick to the slot (not swapped).
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = METHOD_NAME
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20201001:getWebAppSlotConfigurationNames', __args__, opts=opts, typ=GetWebAppSlotConfigurationNamesResult).value
return AwaitableGetWebAppSlotConfigurationNamesResult(
app_setting_names=pulumi.get(__ret__, 'app_setting_names'),
azure_storage_config_names=pulumi.get(__ret__, 'azure_storage_config_names'),
connection_string_names=pulumi.get(__ret__, 'connection_string_names'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_web_app_slot_configuration_names)
def get_web_app_slot_configuration_names_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppSlotConfigurationNamesResult]:
"""
Gets the names of app settings and connection strings that stick to the slot (not swapped).
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
...
|
4,219 |
send request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import ResourceManagementClientConfiguration
from .operations import (
DeploymentOperationsOperations,
DeploymentsOperations,
Operations,
ProvidersOperations,
ResourceGroupsOperations,
ResourcesOperations,
TagsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ResourceManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Provides operations for working with resources and resource groups.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.resource.resources.v2019_08_01.operations.Operations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.resource.resources.v2019_08_01.operations.DeploymentsOperations
:ivar providers: ProvidersOperations operations
:vartype providers: azure.mgmt.resource.resources.v2019_08_01.operations.ProvidersOperations
:ivar resources: ResourcesOperations operations
:vartype resources: azure.mgmt.resource.resources.v2019_08_01.operations.ResourcesOperations
:ivar resource_groups: ResourceGroupsOperations operations
:vartype resource_groups:
azure.mgmt.resource.resources.v2019_08_01.operations.ResourceGroupsOperations
:ivar tags: TagsOperations operations
:vartype tags: azure.mgmt.resource.resources.v2019_08_01.operations.TagsOperations
:ivar deployment_operations: DeploymentOperationsOperations operations
:vartype deployment_operations:
azure.mgmt.resource.resources.v2019_08_01.operations.DeploymentOperationsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2019-08-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ResourceManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(self._client, self._config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tags = TagsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "ResourceManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
4,220 |
test value at returns expected value
|
# -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
from nautilus_trader.indicators.rsi import RelativeStrengthIndex
from nautilus_trader.test_kit.providers import TestInstrumentProvider
from nautilus_trader.test_kit.stubs.data import TestDataStubs
AUDUSD_SIM = TestInstrumentProvider.default_fx_ccy("AUD/USD")
class TestRelativeStrengthIndex:
def setup(self):
# Fixture Setup
self.rsi = RelativeStrengthIndex(10)
def test_name_returns_expected_string(self):
# Arrange, Act, Assert
assert self.rsi.name == "RelativeStrengthIndex"
def test_str_repr_returns_expected_string(self):
# Arrange, Act, Assert
assert str(self.rsi) == "RelativeStrengthIndex(10, EXPONENTIAL)"
assert repr(self.rsi) == "RelativeStrengthIndex(10, EXPONENTIAL)"
def test_period_returns_expected_value(self):
# Arrange, Act, Assert
assert self.rsi.period == 10
def test_initialized_without_inputs_returns_false(self):
# Arrange, Act, Assert
assert self.rsi.initialized is False
def test_initialized_with_required_inputs_returns_true(self):
# Arrange
self.rsi.update_raw(1.00000)
self.rsi.update_raw(2.00000)
self.rsi.update_raw(3.00000)
self.rsi.update_raw(4.00000)
self.rsi.update_raw(5.00000)
self.rsi.update_raw(6.00000)
self.rsi.update_raw(7.00000)
self.rsi.update_raw(8.00000)
self.rsi.update_raw(9.00000)
self.rsi.update_raw(10.00000)
# Act, Assert
assert self.rsi.initialized is True
def test_handle_bar_updates_indicator(self):
# Arrange
indicator = RelativeStrengthIndex(10)
bar = TestDataStubs.bar_5decimal()
# Act
indicator.handle_bar(bar)
# Assert
assert indicator.has_inputs
assert indicator.value == 1.0
def test_value_with_one_input_returns_expected_value(self):
# Arrange
self.rsi.update_raw(1.00000)
# Act, Assert
assert self.rsi.value == 1
def test_value_with_all_higher_inputs_returns_expected_value(self):
# Arrange
self.rsi.update_raw(1.00000)
self.rsi.update_raw(2.00000)
self.rsi.update_raw(3.00000)
self.rsi.update_raw(4.00000)
# Act, Assert
assert self.rsi.value == 1
def test_value_with_all_lower_inputs_returns_expected_value(self):
# Arrange
self.rsi.update_raw(3.00000)
self.rsi.update_raw(2.00000)
self.rsi.update_raw(1.00000)
self.rsi.update_raw(0.50000)
# Act, Assert
assert self.rsi.value == 0
def test_value_with_various_inputs_returns_expected_value(self):
# Arrange
self.rsi.update_raw(3.00000)
self.rsi.update_raw(2.00000)
self.rsi.update_raw(5.00000)
self.rsi.update_raw(6.00000)
self.rsi.update_raw(7.00000)
self.rsi.update_raw(6.00000)
# Act, Assert
assert self.rsi.value == 0.6837363325825265
def METHOD_NAME(self):
# Arrange
self.rsi.update_raw(3.00000)
self.rsi.update_raw(2.00000)
self.rsi.update_raw(5.00000)
self.rsi.update_raw(6.00000)
self.rsi.update_raw(7.00000)
self.rsi.update_raw(6.00000)
self.rsi.update_raw(6.00000)
self.rsi.update_raw(7.00000)
# Act, Assert
assert self.rsi.value == 0.7615344667662725
def test_reset_successfully_returns_indicator_to_fresh_state(self):
# Arrange
self.rsi.update_raw(1.00020)
self.rsi.update_raw(1.00030)
self.rsi.update_raw(1.00050)
# Act
self.rsi.reset()
# Assert
assert not self.rsi.initialized
assert self.rsi.value == 0
|
4,221 |
wrapper
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Pyresample developers
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Registry of resampler classes."""
from __future__ import annotations
import functools
import warnings
from functools import lru_cache
from typing import Callable, Type
from pyresample._compat import entry_points
from .resampler import Resampler
RESAMPLER_REGISTRY: dict[str, Type[Resampler]] = {}
def register_resampler(resampler_name: str, resampler_cls: Type[Resampler]) -> None:
"""Register :class:`~pyresample.future.resampler.Resampler` subclass for future use.
Args:
resampler_name:
Name of the resampler in the registry. This name can then be used
in functions like
:func:`~pyresample.future.resamplers.registry.create_resampler`.
resampler_cls:
Subclass of
:class:`~pyresample.future.resamplers.resampler.Resampler` that
will be added to the registry.
Examples:
Register a custom class::
register_resampler("my_resampler", MyResamplerClass)
Register as a plugin from third-party package (in your setup.py)::
entry_points = {
"pyresample.resamplers": [
"my_resampler = mypkg.mymodule:MyResamplerClass",
],
}
"""
if resampler_name in RESAMPLER_REGISTRY:
raise ValueError(
f"Resampler with name '{resampler_name} is already registered. "
"Use 'unregister_resampler' to make the name available.")
RESAMPLER_REGISTRY[resampler_name] = resampler_cls
def unregister_resampler(resampler_name: str) -> None:
"""Remove previously registered Resampler so it can't be used anymore."""
del RESAMPLER_REGISTRY[resampler_name]
def with_loaded_registry(callable: Callable) -> Callable:
"""Load and verify registry plugins before calling the decorated object.
Note: This decorator is structured in a way that this plugin loading only
happens on the usage of the provided callable instead of on import time.
"""
def METHOD_NAME(*args, **kwargs) -> Callable:
_load_entry_point_resamplers()
if not RESAMPLER_REGISTRY:
warnings.warn("No builtin resamplers found. This probably means you "
"installed pyresample in editable mode. Try reinstalling "
"pyresample to ensure builtin resamplers are included.", stacklevel=2)
return callable(*args, **kwargs)
return functools.update_wrapper(METHOD_NAME, callable)
@lru_cache(1)
def _load_entry_point_resamplers():
"""Load setuptools plugins via entry_points.
Based on https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata.
"""
discovered_plugins = entry_points(group="pyresample.resamplers")
for entry_point in discovered_plugins:
try:
loaded_resampler = entry_point.load()
except ImportError:
warnings.warn(f"Unable to load resampler from plugin: {entry_point.name}", stacklevel=3)
else:
register_resampler(entry_point.name, loaded_resampler)
@with_loaded_registry
def list_resamplers() -> list[str, ...]:
"""Get sorted list of registered resamplers."""
resampler_names = sorted(RESAMPLER_REGISTRY.keys())
return resampler_names
@with_loaded_registry
def create_resampler(
src_geom,
dst_geom,
resampler: str = None,
cache=None,
**kwargs
) -> Resampler:
"""Create instance of a :class:`~pyresample.future.resampler.Resampler` with the provided arguments.
Args:
src_geom:
Geometry object defining the source geographic region that input
data lies on and will be resampled from.
dst_geom:
Geometry object defining the destination geographic region that
input data will be resampled to.
resampler:
The name of a resampler class that has been previously
registered with :func:`~pyresample.future.resampler_registry` and
will be instantiated. If not provided then a registered Resampler
class will be chosen based on the geometry types provided. This
is currently always the 'nearest' (nearest neighbor) resampler.
cache:
ResampleCache instance used by
the resampler to cache intermediate results for improved resampling
performance on multiple executions or future use of the resampler.
kwargs:
Additional keyword arguments to pass to the Resampler. Note that
most resamplers do not have additional keyword arguments on
creation, but instead have extra arguments passed when their
``resample`` methods are called.
"""
if resampler is None:
resampler = "nearest"
resampler_cls = RESAMPLER_REGISTRY[resampler]
return resampler_cls(src_geom, dst_geom, cache=cache, **kwargs)
|
4,222 |
start
|
import logging
import typing
import rdflib
import rdflib.namespace
from tqdm import tqdm
import ontomatch.utils.blackboard
import ontomatch.utils.util
import ontomatch.knowledge.geocoding
import ontomatch.knowledge.geoNames
class Agent():
def METHOD_NAME(self, addr:str, add_knowledge:bool, http:bool=False) -> typing.Tuple[bool, str]:
enriched, addr, graph = self.load_rdflib_graph(addr, add_knowledge)
# Write the graph into the blackboard and return the handle
# such that other agent can read (serialized) graph from the blackboard.
# This is done both cases where the graph was enriched with addition background knowledge or not
# Thus, in either case, the graph is available via the blackboard and instance matchers can read it
# However, the conversion of a rdflib graph to str does not work properly:
#graph_str = ontomatch.utils.util.serialize_graph_to_str(graph)
#handle = ontomatch.utils.util.call_agent_blackboard_for_writing(addr, graph_str, http)
# Thus, we use a hack here (we use format='xml' because owlready2 does not support loading from turtle file)
handle = ontomatch.utils.blackboard.Agent.create_handle(addr) + '.xml'
path = ontomatch.utils.blackboard.LOCAL_BLACKBOARD_DIR + '/' + handle
logging.info('storing graph to path=%s', path)
graph.serialize(path, format='xml')
handle_turtle = ontomatch.utils.blackboard.Agent.create_handle(addr) + '.ttl'
path = ontomatch.utils.blackboard.LOCAL_BLACKBOARD_DIR + '/' + handle_turtle
logging.info('additionally, storing graph in turtle format to path=%s', path)
graph.serialize(path, format='turtle')
return enriched, handle
def load_rdflib_graph(self, addr, add_knowledge):
frmt = 'xml'
if addr.endswith('.ttl'):
frmt = 'turtle'
graph = rdflib.Graph()
graph.parse(addr, format=frmt)
enriched = False
new_addr = addr
if add_knowledge and add_knowledge != 'False':
logging.info('adding knowledge for %s', addr)
enriched = self.add_knowledge_fct(graph, add_knowledge, addr)
if enriched:
if addr.endswith('.xml') or addr.endswith('.ttl'):
new_addr = addr[:-4] + '_enriched' + addr[-4:]
else:
new_addr = addr + '_enriched'
logging.info('added knowledge, enriched=%s, new_addr=%s', enriched, new_addr)
else:
logging.info('no knowledge was added, enriched=%s, new_addr=%s', enriched, new_addr)
return enriched, new_addr, graph
def add_knowledge_fct(self, graph, agent_name, addr):
query = '''
SELECT DISTINCT ?pred
WHERE {
?subj ?pred ?obj .
}'''
tokens_coord = ['coordinate', 'latitude', 'longitude', 'lat', 'long']
found_coordinate_props = False
result = graph.query(query)
for row in result:
uri = row.pred.n3().lower()
for t in tokens_coord:
if t in uri:
found_coordinate_props = True
break
if found_coordinate_props:
break
if found_coordinate_props:
logging.info('no background knowledge has been added')
return False
logging.info('adding geographic coordinates')
query = '''
PREFIX owl: <http://www.w3.org/2002/07/owl#>
SELECT DISTINCT ?subj
WHERE {
?subj a owl:NamedIndividual .
}'''
if agent_name == 'ontomatch.knowledge.geocoding':
geocoding_agent = ontomatch.knowledge.geocoding.Agent()
elif agent_name == 'ontomatch.knowledge.geoNames':
#TODO-AE 211101 configure country for geoNames
#geocoding_agent = knowledge.geoNames.Agent(country="Germany")
geocoding_agent = ontomatch.knowledge.geoNames.Agent(country="UnitedKingdom")
else:
logging.error('not found geocoding agent with name=%s', agent_name)
geo = rdflib.Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
graph.bind('geo', geo )
count_total = 0
count_geo = 0
for row in tqdm(graph.query(query)):
count_total += 1
# rdflib v5
# address = graph.triples((row.subj, rdflib.SDO['address'], None))
# rdflib v6.0.2
address = graph.triples((row.subj, rdflib.term.URIRef('https://schema.org/address'), None))
address_tmp = [obj for _, _, obj in address]
address = address_tmp[0]
location = None
zipcode = None
for _, pred, obj in graph.triples((address, None, None)):
obj = obj.toPython()
if 'Locality' in pred.n3():
location = obj
elif 'postalCode' in pred.n3():
zipcode = obj
if location or zipcode:
if agent_name == 'ontomatch.knowledge.geocoding':
#latitude, longitude = geocoding_agent.query(location, zipcode)
latitude, longitude = geocoding_agent.query(location, None)
else:
if location is None:
continue
latitude, longitude = geocoding_agent.query(location)
if latitude and longitude:
latitude = rdflib.Literal(latitude, datatype=rdflib.namespace.XSD.float)
graph.add((row.subj, geo['lat'], latitude ))
longitude = rdflib.Literal(longitude, datatype=rdflib.namespace.XSD.float)
graph.add((row.subj, geo['long'], longitude ))
count_geo += 1
else:
pass
logging.info('finished adding geographic coordinates, enriched individuals=%s, total individuals=%s', count_geo, count_total)
return True
|
4,223 |
test delete file success
|
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# pylint: disable=R0904,C0103
#
import unittest
from test.server.server_test_lib import enable_logging, random_string
from unittest.mock import mock_open, patch
from ztpserver.repository import (
FileObject,
FileObjectError,
FileObjectNotFound,
Repository,
RepositoryError,
)
from ztpserver.serializers import SerializerError
class FileObjectUnitTests(unittest.TestCase):
@patch("ztpserver.serializers.load")
def test_read_success(self, m_load):
m_load.return_value = random_string()
obj = FileObject(random_string())
result = obj.read()
self.assertEqual(m_load.return_value, result)
@patch("ztpserver.serializers.load")
def test_read_failure(self, m_load):
m_load.side_effect = SerializerError
obj = FileObject(random_string())
self.assertRaises(FileObjectError, obj.read)
@classmethod
@patch("ztpserver.serializers.dump")
def test_write_success(cls, _):
obj = FileObject(random_string())
obj.write(random_string())
@patch("ztpserver.serializers.dump")
def test_write_failure(self, m_dump):
m_dump.side_effect = SerializerError
obj = FileObject(random_string())
self.assertRaises(FileObjectError, obj.write, random_string())
@patch("builtins.open", new_callable=mock_open, read_data=b"some data")
def test_hash_success(self, _):
obj = FileObject(random_string())
self.assertEqual("baf34551fecb48acc3da868eb85e1b6dac9de356", obj.hash())
class RepositoryUnitTests(unittest.TestCase):
@classmethod
@patch("os.makedirs")
def test_add_folder_success(cls, _):
store = Repository(random_string())
store.add_folder(random_string())
@patch("os.makedirs")
def test_add_folder_failure(self, m_makedirs):
m_makedirs.side_effect = OSError
store = Repository(random_string())
self.assertRaises(RepositoryError, store.add_folder, random_string())
@patch("ztpserver.repository.FileObject")
@patch("os.chmod")
def test_create_file_success(self, _, m_fileobj):
store = Repository(random_string())
store.add_file(random_string())
self.assertFalse(m_fileobj.return_value.write.called)
@patch("ztpserver.repository.FileObject")
@patch("os.chmod")
def test_create_file_with_contents_success(self, _, m_fileobj):
store = Repository(random_string())
store.add_file(random_string(), random_string())
self.assertTrue(m_fileobj.return_value.write.called)
@patch("ztpserver.repository.FileObject")
def test_create_file_failure(self, m_fileobj):
m_fileobj.return_value.write.side_effect = FileObjectError
store = Repository(random_string())
self.assertRaises(FileObjectError, store.add_file, random_string(), random_string())
@patch("os.path.exists")
def test_exists_success(self, _):
store = Repository(random_string())
result = store.exists(random_string())
self.assertTrue(result)
@patch("os.path.exists")
def test_exists_missing_file(self, m_exists):
m_exists.return_value = False
store = Repository(random_string())
result = store.exists(random_string())
self.assertFalse(result)
@patch("os.path.exists")
@patch("ztpserver.repository.FileObject")
def test_get_file_success(self, m_fileobj, _):
store = Repository(random_string())
store.get_file(random_string())
self.assertTrue(m_fileobj.called)
@patch("os.path.exists")
@patch("ztpserver.repository.FileObject")
def test_get_file_failure(self, m_fileobj, m_exists):
m_exists.return_value = False
store = Repository(random_string())
self.assertRaises(FileObjectNotFound, store.get_file, random_string())
self.assertFalse(m_fileobj.called)
@patch("os.remove")
def METHOD_NAME(self, m_remove):
store = Repository(random_string())
store.delete_file(random_string())
self.assertTrue(m_remove.called)
@patch("os.remove")
def test_delete_file_failure(self, m_remove):
m_remove.side_effect = OSError
store = Repository(random_string())
self.assertRaises(RepositoryError, store.delete_file, random_string())
if __name__ == "__main__":
enable_logging()
unittest.main()
|
4,224 |
to json
|
import itertools
import web
import json
from infogami.utils import delegate
from infogami.utils.view import safeint
from openlibrary.core.models import Thing
from openlibrary.plugins.upstream import utils
from openlibrary.plugins.worksearch.search import get_solr
from openlibrary.utils import (
find_olid_in_string,
olid_to_key,
)
def METHOD_NAME(d):
web.header('Content-Type', 'application/json')
return delegate.RawText(json.dumps(d))
class autocomplete(delegate.page):
path = "/_autocomplete"
fq = ['-type:edition']
fl = 'key,type,name,title,score'
olid_suffix: str | None = None
query = 'title:"{q}"^2 OR title:({q}*) OR name:"{q}"^2 OR name:({q}*)'
def db_fetch(self, key: str) -> Thing | None:
if thing := web.ctx.site.get(key):
return thing.as_fake_solr_record()
else:
return None
def doc_wrap(self, doc: dict):
"""Modify the returned solr document in place."""
if 'name' not in doc:
doc['name'] = doc.get('title')
def doc_filter(self, doc: dict) -> bool:
"""Exclude certain documents"""
return True
def GET(self):
return self.direct_get()
def direct_get(self, fq: list[str] | None = None):
i = web.input(q="", limit=5)
i.limit = safeint(i.limit, 5)
solr = get_solr()
# look for ID in query string here
q = solr.escape(i.q).strip()
embedded_olid = None
if self.olid_suffix:
embedded_olid = find_olid_in_string(q, self.olid_suffix)
if embedded_olid:
solr_q = f'key:"{olid_to_key(embedded_olid)}"'
else:
solr_q = self.query.format(q=q)
fq = fq or self.fq
params = {
'q_op': 'AND',
'rows': i.limit,
**({'fq': fq} if fq else {}),
# limit the fields returned for better performance
'fl': self.fl,
}
data = solr.select(solr_q, **params)
docs = data['docs']
if embedded_olid and not docs:
# Grumble! Work not in solr yet. Create a dummy.
fake_doc = self.db_fetch(olid_to_key(embedded_olid))
if fake_doc:
docs = [fake_doc]
result_docs = []
for d in docs:
if self.doc_filter(d):
self.doc_wrap(d)
result_docs.append(d)
return METHOD_NAME(result_docs)
class languages_autocomplete(delegate.page):
path = "/languages/_autocomplete"
def GET(self):
i = web.input(q="", limit=5)
i.limit = safeint(i.limit, 5)
return METHOD_NAME(
list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))
)
class works_autocomplete(autocomplete):
path = "/works/_autocomplete"
fq = ['type:work']
fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'
olid_suffix = 'W'
query = 'title:"{q}"^2 OR title:({q}*)'
def doc_filter(self, doc: dict) -> bool:
# Exclude orphaned editions from autocomplete results
# Note: Do this here instead of with an `fq=key:*W` for performance
# reasons.
return doc['key'][-1] == 'W'
def doc_wrap(self, doc: dict):
doc['full_title'] = doc['title']
if 'subtitle' in doc:
doc['full_title'] += ": " + doc['subtitle']
doc['name'] = doc.get('title')
class authors_autocomplete(autocomplete):
path = "/authors/_autocomplete"
fq = ['type:author']
fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'
olid_suffix = 'A'
query = 'name:({q}*) OR alternate_names:({q}*) OR name:"{q}"^2 OR alternate_names:"{q}"^2'
def doc_wrap(self, doc: dict):
if 'top_work' in doc:
doc['works'] = [doc.pop('top_work')]
else:
doc['works'] = []
doc['subjects'] = doc.pop('top_subjects', [])
class subjects_autocomplete(autocomplete):
# can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+
path = "/subjects_autocomplete"
fq = ['type:subject']
fl = 'key,name'
query = 'name:({q}*)'
def GET(self):
i = web.input(type="")
fq = self.fq
if i.type:
fq = fq + [f'subject_type:{i.type}']
return super().direct_get(fq=fq)
def setup():
"""Do required setup."""
pass
|
4,225 |
test pickle definition syntax error
|
import pickle
import pytest
from pint import (
DefinitionSyntaxError,
DimensionalityError,
LogarithmicUnitCalculusError,
OffsetUnitCalculusError,
PintError,
Quantity,
RedefinitionError,
UndefinedUnitError,
UnitRegistry,
)
from pint.errors import LOG_ERROR_DOCS_HTML, OFFSET_ERROR_DOCS_HTML
class TestErrors:
def test_definition_syntax_error(self):
ex = DefinitionSyntaxError("foo")
assert str(ex) == "foo"
def test_redefinition_error(self):
ex = RedefinitionError("foo", "bar")
assert str(ex) == "Cannot redefine 'foo' (bar)"
with pytest.raises(PintError):
raise ex
def test_undefined_unit_error(self):
x = ("meter",)
msg = "'meter' is not defined in the unit registry"
ex = UndefinedUnitError(x)
assert str(ex) == msg
ex = UndefinedUnitError(list(x))
assert str(ex) == msg
ex = UndefinedUnitError(set(x))
assert str(ex) == msg
with pytest.raises(PintError):
raise ex
def test_undefined_unit_error_multi(self):
x = ("meter", "kg")
msg = "('meter', 'kg') are not defined in the unit registry"
ex = UndefinedUnitError(x)
assert str(ex) == msg
ex = UndefinedUnitError(list(x))
assert str(ex) == msg
with pytest.raises(PintError):
raise ex
def test_dimensionality_error(self):
ex = DimensionalityError("a", "b")
assert str(ex) == "Cannot convert from 'a' to 'b'"
ex = DimensionalityError("a", "b", "c")
assert str(ex) == "Cannot convert from 'a' (c) to 'b' ()"
ex = DimensionalityError("a", "b", "c", "d", extra_msg=": msg")
assert str(ex) == "Cannot convert from 'a' (c) to 'b' (d): msg"
with pytest.raises(PintError):
raise ex
def test_offset_unit_calculus_error(self):
ex = OffsetUnitCalculusError(Quantity("1 kg")._units)
assert (
str(ex)
== "Ambiguous operation with offset unit (kilogram). See "
+ OFFSET_ERROR_DOCS_HTML
+ " for guidance."
)
ex = OffsetUnitCalculusError(Quantity("1 kg")._units, Quantity("1 s")._units)
assert (
str(ex)
== "Ambiguous operation with offset unit (kilogram, second). See "
+ OFFSET_ERROR_DOCS_HTML
+ " for guidance."
)
with pytest.raises(PintError):
raise ex
def test_logarithmic_unit_calculus_error(self):
Quantity = UnitRegistry(autoconvert_offset_to_baseunit=True).Quantity
ex = LogarithmicUnitCalculusError(Quantity("1 dB")._units)
assert (
str(ex)
== "Ambiguous operation with logarithmic unit (decibel). See "
+ LOG_ERROR_DOCS_HTML
+ " for guidance."
)
ex = LogarithmicUnitCalculusError(
Quantity("1 dB")._units, Quantity("1 octave")._units
)
assert (
str(ex)
== "Ambiguous operation with logarithmic unit (decibel, octave). See "
+ LOG_ERROR_DOCS_HTML
+ " for guidance."
)
with pytest.raises(PintError):
raise ex
def METHOD_NAME(self, subtests):
# OffsetUnitCalculusError raised from a custom ureg must be pickleable even if
# the ureg is not registered as the application ureg
ureg = UnitRegistry(filename=None)
ureg.define("foo = [bar]")
ureg.define("bar = 2 foo")
q1 = ureg.Quantity("1 foo")
q2 = ureg.Quantity("1 bar")
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
for ex in (
DefinitionSyntaxError("foo"),
RedefinitionError("foo", "bar"),
UndefinedUnitError("meter"),
DimensionalityError("a", "b", "c", "d", extra_msg=": msg"),
OffsetUnitCalculusError(
Quantity("1 kg")._units, Quantity("1 s")._units
),
OffsetUnitCalculusError(q1._units, q2._units),
):
with subtests.test(protocol=protocol, etype=type(ex)):
pik = pickle.dumps(ureg.Quantity("1 foo"), protocol)
with pytest.raises(UndefinedUnitError):
pickle.loads(pik)
# assert False, ex.__reduce__()
ex2 = pickle.loads(pickle.dumps(ex, protocol))
print(ex)
print(ex2)
assert type(ex) is type(ex2)
assert ex == ex
# assert ex.__dict__ == ex2.__dict__
assert str(ex) == str(ex2)
with pytest.raises(PintError):
raise ex
|
4,226 |
test multapses true
|
# -*- coding: utf-8 -*-
#
# test_connect_fixed_outdegree.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest
import scipy.stats
import connect_test_base
import nest
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, "NEST was compiled without multi-threading")
@nest.ll_api.check_stack
class TestFixedOutDegree(connect_test_base.ConnectTestBase):
# specify connection pattern and specific params
rule = "fixed_outdegree"
conn_dict = {"rule": rule}
# sizes of source-, target-population and outdegree for connection test
N1 = 50
N2 = 70
Nout = 10
conn_dict["outdegree"] = Nout
# sizes of source-, target-population and outdegree for statistical test
N_s = 10
N_t = 10
C = 10
# Critical values and number of iterations of two level test
stat_dict = {"alpha2": 0.05, "n_runs": 400}
# tested on each mpi process separately
def testErrorMessages(self):
got_error = False
conn_params = self.conn_dict.copy()
conn_params["allow_autapses"] = True
conn_params["allow_multapses"] = False
conn_params["outdegree"] = self.N2 + 1
try:
self.setUpNetwork(conn_params)
except nest.kernel.NESTError:
got_error = True
self.assertTrue(got_error)
def testOutDegree(self):
conn_params = self.conn_dict.copy()
conn_params["allow_autapses"] = False
conn_params["allow_multapses"] = False
self.setUpNetwork(conn_params)
# make sure the outdegree is right
M = connect_test_base.get_connectivity_matrix(self.pop1, self.pop2)
outds = np.sum(M, axis=0)
connect_test_base.mpi_assert(outds, self.Nout * np.ones(self.N1), self)
# make sure no connections were drawn from the target to the source
# population
M = connect_test_base.get_connectivity_matrix(self.pop2, self.pop1)
M_none = np.zeros((len(self.pop1), len(self.pop2)))
connect_test_base.mpi_assert(M, M_none, self)
def testStatistics(self):
conn_params = self.conn_dict.copy()
conn_params["allow_autapses"] = True
conn_params["allow_multapses"] = True
conn_params["outdegree"] = self.C
expected = connect_test_base.get_expected_degrees_fixedDegrees(self.C, "out", self.N_s, self.N_t)
pvalues = []
for i in range(self.stat_dict["n_runs"]):
connect_test_base.reset_seed(i + 1, self.nr_threads)
self.setUpNetwork(conn_dict=conn_params, N1=self.N_s, N2=self.N_t)
degrees = connect_test_base.get_degrees("in", self.pop1, self.pop2)
degrees = connect_test_base.gather_data(degrees)
if degrees is not None:
chi, p = connect_test_base.chi_squared_check(degrees, expected)
pvalues.append(p)
connect_test_base.mpi_barrier()
if degrees is not None:
ks, p = scipy.stats.kstest(pvalues, "uniform")
self.assertGreater(p, self.stat_dict["alpha2"])
def testAutapsesTrue(self):
conn_params = self.conn_dict.copy()
N = 10
conn_params["allow_multapses"] = False
# test that autapses exist
conn_params["outdegree"] = N
conn_params["allow_autapses"] = True
pop = nest.Create("iaf_psc_alpha", N)
nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = connect_test_base.get_connectivity_matrix(pop, pop)
connect_test_base.mpi_assert(np.diag(M), np.ones(N), self)
def testAutapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 10
conn_params["allow_multapses"] = False
# test that autapses were excluded
conn_params["outdegree"] = N - 1
conn_params["allow_autapses"] = False
pop = nest.Create("iaf_psc_alpha", N)
nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = connect_test_base.get_connectivity_matrix(pop, pop)
connect_test_base.mpi_assert(np.diag(M), np.zeros(N), self)
def METHOD_NAME(self):
conn_params = self.conn_dict.copy()
N = 3
conn_params["allow_autapses"] = True
# test that multapses were drawn
conn_params["outdegree"] = N + 1
conn_params["allow_multapses"] = True
pop = nest.Create("iaf_psc_alpha", N)
nest.Connect(pop, pop, conn_params)
nr_conns = len(nest.GetConnections(pop, pop))
connect_test_base.mpi_assert(nr_conns, conn_params["outdegree"] * N, self)
def testMultapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 3
conn_params["allow_autapses"] = True
# test that no multapses exist
conn_params["outdegree"] = N
conn_params["allow_multapses"] = False
pop = nest.Create("iaf_psc_alpha", N)
nest.Connect(pop, pop, conn_params)
M = connect_test_base.get_connectivity_matrix(pop, pop)
M = connect_test_base.gather_data(M)
if M is not None:
self.assertTrue(M.flatten, np.ones(N * N))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestFixedOutDegree)
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
4,227 |
double click callback
|
"""Test render window interactor"""
import time
import pytest
import pyvista as pv
from pyvista import _vtk
def empty_callback():
return
@pytest.mark.needs_vtk_version(9, 1)
def test_observers():
pl = pv.Plotter()
# Key events
with pytest.raises(TypeError):
pl.add_key_event('w', 1)
# Callback must not have any empty arguments.
def callback(a, b, *, c, d=1.0):
pass
with pytest.raises(TypeError):
pl.add_key_event('w', callback)
key = 'w'
pl.add_key_event(key, empty_callback)
assert key in pl.iren._key_press_event_callbacks
pl.clear_events_for_key(key)
assert key not in pl.iren._key_press_event_callbacks
# attempting to clear non-existing events doesn't raise by default
pl.clear_events_for_key(key)
with pytest.raises(ValueError, match='No events found for key'):
pl.clear_events_for_key(key, raise_on_missing=True)
# Custom events
assert not pl.iren.interactor.HasObserver(
"PickEvent"
), "Subsequent PickEvent HasObserver tests are wrong if this fails."
# Add different observers
obs_move = pl.iren.add_observer(_vtk.vtkCommand.MouseMoveEvent, empty_callback)
obs_double1 = pl.iren.add_observer(_vtk.vtkCommand.LeftButtonDoubleClickEvent, empty_callback)
obs_double2 = pl.iren.add_observer("LeftButtonDoubleClickEvent", empty_callback)
obs_picks = tuple(pl.iren.add_observer("PickEvent", empty_callback) for _ in range(5))
pl.iren.add_observer("SelectionChangedEvent", empty_callback)
assert pl.iren._observers[obs_move] == "MouseMoveEvent"
assert pl.iren.interactor.HasObserver("MouseMoveEvent")
assert pl.iren._observers[obs_double1] == "LeftButtonDoubleClickEvent"
assert pl.iren._observers[obs_double2] == "LeftButtonDoubleClickEvent"
assert pl.iren.interactor.HasObserver("LeftButtonDoubleClickEvent")
assert all(pl.iren._observers[obs_pick] == "PickEvent" for obs_pick in obs_picks)
assert pl.iren.interactor.HasObserver("SelectionChangedEvent")
# Remove a specific observer
pl.iren.remove_observer(obs_move)
assert obs_move not in pl.iren._observers
# Remove all observers of a specific event
pl.iren.remove_observers(_vtk.vtkCommand.LeftButtonDoubleClickEvent)
assert obs_double1 not in pl.iren._observers and obs_double2 not in pl.iren._observers
# Remove all (remaining) observers
pl.iren.remove_observers()
assert len(pl.iren._observers) == 0
assert not pl.iren.interactor.HasObserver("PickEvent")
def test_clear_key_event_callbacks():
pl = pv.Plotter()
pl.reset_key_events()
@pytest.mark.skip_plotting
def test_track_mouse_position():
pl = pv.Plotter()
pl.track_mouse_position()
pl.show(auto_close=False)
assert pl.mouse_position is None
x, y = 10, 20
pl.iren._mouse_move(x, y)
assert pl.mouse_position == (x, y)
pl.iren.untrack_mouse_position()
assert "MouseMoveEvent" not in pl.iren._observers.values()
@pytest.mark.skip_plotting
def test_track_click_position_multi_render():
points = []
def callback(mouse_point):
points.append(mouse_point)
pl = pv.Plotter()
with pytest.raises(TypeError):
pl.track_click_position(side='dark')
pl.track_click_position(callback=callback, side='left', viewport=True)
pl.show(auto_close=False)
x, y = 10, 20
pl.iren._mouse_right_button_click(2 * x, 2 * y)
pl.iren._mouse_left_button_click(x, y)
assert points[0] == (x, y)
# disable and ensure that clicking is no longer being tracked
pl.untrack_click_position(side='left')
pl.iren._mouse_left_button_click(50, 50)
assert len(points) == 1
@pytest.mark.skip_plotting
def test_track_click_position():
events = []
def single_click_callback(mouse_position):
events.append("single")
def METHOD_NAME(mouse_position):
events.append("double")
pl = pv.Plotter()
pl.track_click_position(callback=single_click_callback, side='left', double=False)
pl.track_click_position(callback=METHOD_NAME, side='left', double=True)
pl.show(auto_close=False)
# Test single and double clicks:
pl.iren._mouse_left_button_click(10, 10)
assert len(events) == 1 and events.pop(0) == "single"
pl.iren._mouse_left_button_click(50, 50, count=2)
assert len(events) == 2 and events.pop(1) == "double" and events.pop(0) == "single"
# Test triple click behaviour:
pl.iren._mouse_left_button_click(10, 10, count=3)
assert len(events) == 3
assert events.pop(2) == "single" and events.pop(1) == "double" and events.pop(0) == "single"
@pytest.mark.skipif(
type(_vtk.vtkRenderWindowInteractor()).__name__
not in ("vtkWin32RenderWindowInteractor", "vtkXRenderWindowInteractor"),
reason='Other RenderWindowInteractors do not invoke TimerEvents during ProcessEvents.',
)
@pytest.mark.needs_vtk_version(
(9, 2),
reason='vtkXRenderWindowInteractor (Linux) does not invoke TimerEvents during ProcessEvents until VTK9.2.',
)
def test_timer():
# Create a normal interactor from the offscreen plotter (not generic,
# which is the default for offscreen rendering)
pl = pv.Plotter()
iren = pv.plotting.render_window_interactor.RenderWindowInteractor(pl)
iren.set_render_window(pl.render_window)
duration = 50 # Duration of created timers
delay = 5 * duration # Extra time we wait for the timers to fire at least once
events = []
def on_timer(obj, event):
# TimerEvent callback
events.append(event)
def process_events(iren, duration):
# Helper function to call process_events for the given duration (in milliseconds).
t = 1000 * time.time()
while 1000 * time.time() - t < duration:
iren.process_events()
# Setup interactor
iren.add_observer("TimerEvent", on_timer)
iren.initialize()
# Test one-shot timer (only fired once for the extended duration)
iren.create_timer(duration, repeating=False)
process_events(iren, delay)
assert len(events) == 1
# Test repeating timer (fired multiple times for extended duration)
repeating_timer = iren.create_timer(duration, repeating=True)
process_events(iren, 2 * delay)
assert len(events) >= 3
E = len(events)
# Test timer destruction (no more events fired)
iren.destroy_timer(repeating_timer)
process_events(iren, delay)
assert len(events) == E
@pytest.mark.skip_plotting
def test_poked_subplot_loc():
pl = pv.Plotter(shape=(2, 2), window_size=(800, 800))
pl.iren._mouse_left_button_press(200, 600)
assert tuple(pl.iren.get_event_subplot_loc()) == (0, 0)
pl.iren._mouse_left_button_press(200, 200)
assert tuple(pl.iren.get_event_subplot_loc()) == (1, 0)
pl.iren._mouse_left_button_press(600, 600)
assert tuple(pl.iren.get_event_subplot_loc()) == (0, 1)
pl.iren._mouse_left_button_press(600, 200)
assert tuple(pl.iren.get_event_subplot_loc()) == (1, 1)
pl.close()
@pytest.mark.skip_plotting
def test_poked_subplot_context(verify_image_cache):
pl = pv.Plotter(shape=(2, 2), window_size=(800, 800))
pl.iren._mouse_left_button_press(200, 600)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Cone(), color=True)
pl.iren._mouse_left_button_press(200, 200)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Cube(), color=True)
pl.iren._mouse_left_button_press(600, 600)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Sphere(), color=True)
pl.iren._mouse_left_button_press(600, 200)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Arrow(), color=True)
pl.show()
|
4,228 |
get file type
|
import json
import re
from django import template
from django.conf import settings
from django.templatetags.static import static
from django.utils.html import format_html
from wagtail.models import Page
register = template.Library()
@register.filter
def clean_whitespace(value):
return re.sub(r'\s+', '-', value)
@register.filter
def lookup(dict, arg):
return dict.get(arg, '')
@register.simple_tag()
def formatted_title(page):
if hasattr(page, 'formatted_title'):
if page.formatted_title:
return format_html(page.formatted_title)
else:
return format_html(page.title)
else:
return page.title
@register.filter()
def districts(max):
"""Returns a list of numbers 1-100 for district filter"""
districts = range(max)
return districts
@register.filter()
def child_page_count(page):
"""Returns the number of pages that are children of a particular page"""
count = Page.objects.child_of(page).live().count()
return "{} {}".format(count, 'result' if count == 1 else 'results')
@register.filter()
def prepend_non_digit(string):
"""
Prepends non-digit-containing string.
Useful in combination with built-in slugify in order to create strings
from titles that can be used as HTML IDs, which cannot begin with digits.
"""
if string[:1].isdigit():
string = "go-to-{0}".format(string)
return string
@register.filter()
def web_app_url(path):
"""
Appends a path to the web app URL as defined in the settings
This is useful for StaticBlocks, which don't have access to the entire context
"""
return "{}{}".format(settings.FEC_APP_URL, path)
@register.filter()
def highlight_matches(text):
"""
Replaces the highlight markers with span tags for Search.gov website search results.
Because format_html uses str.format, remove { and } because they are special characters.
"""
cleaned_text = text.replace("{", "").replace("}", "")
highlighted_text = cleaned_text.replace(
"\ue000", '<span class="t-highlight">'
).replace("\ue001", "</span>")
return format_html(highlighted_text)
@register.filter(name='splitlines')
def splitlines(value):
"""
Returns the value turned into a list.
"""
return value.splitlines()
@register.filter(name='get_touch_icon')
def get_touch_icon(content_section, dimension):
"""
Returns a path to a touch icon for the given dimension and content_section
"""
if content_section in ['legal', 'help']:
return static('img/favicon/{}/apple-touch-icon-{}.png'.format(content_section, dimension))
else:
return static('img/favicon/general/apple-touch-icon-{}.png'.format(dimension))
@register.filter(name='get_meta_description')
def get_meta_description(content_section):
"""
Returns a meta description for social media
"""
return 'Find what you need to know about the federal campaign finance process. \
Explore legal resources, campaign finance data, help for candidates and committees, and more.'
@register.simple_tag
def asset_for_js(path):
"""Looks up the hashed asset path in rev-manifest-js.json
If the path doesn't exist there, then just return the path to the static file
without a hash"""
key = '/static/js/{}'.format(path)
assets = json.load(open(settings.DIST_DIR + '/fec/static/js/rev-manifest-js.json'))
return assets[key] if key in assets else key
@register.simple_tag
def asset_for_css(key):
"""Looks up the hashed asset key in rev-manifest-css.json
If the key doesn't exist there, then just return the key to the static file
without a hash"""
assets = json.load(open(settings.DIST_DIR + '/fec/static/css/rev-manifest-css.json'))
if key in assets:
return '/static/css/' + assets[key]
else:
return key
@register.filter(name='remove_word')
def remove_word(str, words):
"""
Removes a word or words from a string
Returns a new string
"""
return str.replace(words, '')
@register.filter(name='dot_or_not')
def dot_or_not(str):
"""
Puts dot-after, only if string represemts a number
Specifically for footnote lists on ReportingDatesTables
"""
try:
int(str)
return '.'
except ValueError:
return ''
@register.filter(name='get_social_image_path')
def get_social_image_path(identifier):
# """
# Returns a path to a social image for the given content section
# TODO: combine with fec/data/templatetags/filters.py ?
# Called by meta-tags.html
# """
imageFilename = identifier
if identifier == 'advisory-opinions':
imageFilename = 'fec-pen'
elif identifier in ['commission-meetings', 'meeting-page']:
imageFilename = 'fec-microphones'
elif identifier == 'press-release':
imageFilename = 'fec-microphone'
elif identifier == 'weekly-digest':
imageFilename = 'fec-seal'
elif identifier == 'data':
imageFilename = 'fec-data'
elif identifier in ['legal', 'help']:
imageFilename = 'fec-' + identifier
else:
imageFilename = 'fec-logo'
return 'https://www.fec.gov/static/img/social/{}.png'.format(imageFilename)
@register.filter(name='get_file_type')
def METHOD_NAME(value):
file_extension = value.rsplit('.', 1)[1].upper()
xl = (file_extension == 'XLS') or (file_extension == 'XLSX')
file_type = "EXCEL" if xl else file_extension
return file_type
|
4,229 |
verify arcsin 1d
|
from lpython import i32, f32, f64
from numpy import empty, arcsin, arccos, sin, cos, sqrt, arctan, tan, degrees, radians, float32, float64
from math import pi
def verify1d_same(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(array[i] - result[i]) <= eps
def METHOD_NAME(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arcsin(array[i])**f32(2.0) - result[i]) <= eps
def verify_arcsin_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arcsin(array[i, j])**2.0 - result[i, j]) <= eps
def verify_arccos_1d(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arccos(array[i])**f32(2.0) - result[i]) <= eps
def verify_arccos_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arccos(array[i, j])**2.0 - result[i, j]) <= eps
def verify_arctan_1d(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arctan(array[i])**f32(2.0) - result[i]) <= eps
def verify_arctan_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arctan(array[i, j])**2.0 - result[i, j]) <= eps
def elemental_arcsin():
i: i32
j: i32
array1d: f32[201] = empty(201, dtype=float32)
arcsin1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
arcsin1d = arcsin(array1d) ** f32(2.0)
METHOD_NAME(array1d, arcsin1d, 201)
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arcsin2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64): # 2048 = 64 * 32
array2d[i,j]= float((i * 64 + j - 2048 )/2048)
arcsin2d = arcsin(array2d) ** 2.0
verify_arcsin_2d(array2d, arcsin2d, 64, 64)
def elemental_arccos():
i: i32
j: i32
array1d: f32[201] = empty(201, dtype=float32)
arccos1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
arccos1d = arccos(array1d) ** f32(2.0)
verify_arccos_1d(array1d, arccos1d, 201)
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arccos2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64): # 2048 = 64 * 32
array2d[i,j]= float((i * 64 + j - 2048 )/2048)
arccos2d = arccos(array2d) ** 2.0
verify_arccos_2d(array2d, arccos2d, 64, 64)
def elemental_arctan():
i: i32
j: i32
eps: f32
eps = f32(1e-6)
array1d: f32[201] = empty(201, dtype=float32)
array1d_rec: f32[201] = empty(201, dtype=float32)
arctan1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32(i - 100)
arctan1d = arctan(array1d) ** f32(2.0)
verify_arctan_1d(array1d, arctan1d, 201)
for i in range(201):
array1d[i] = f32(i + 1)
array1d_rec[i] = f32(1.0/f64(i+1))
arctan1d = arctan(array1d) + arctan(array1d_rec)
for i in range(201):
assert abs(arctan1d[i] - f32(f64(pi) / 2.0)) <= eps
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arctan2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(64*i + j - 2048)
arctan2d = arctan(array2d) ** 2.0
verify_arctan_2d(array2d, arctan2d, 64, 64)
def elemental_trig_identity():
i: i32
eps: f32
eps = f32(1e-6)
array1d: f32[201] = empty(201, dtype=float32)
observed1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
observed1d = arcsin(array1d) + arccos(array1d)
for i in range(201):
assert abs(observed1d[i] - f32(pi / 2.0)) <= eps
def elemental_reverse():
i: i32
array1d: f32[201] = empty(201, dtype=float32)
observed1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
observed1d = sin(arcsin(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = cos(arccos(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = tan(arctan(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = degrees(radians(array1d))
verify1d_same(observed1d, array1d, 201)
def elemental_trig_identity_extra():
i: i32
array1d: f32[201] = empty(201, dtype=float32)
array_x: f32[201] = empty(201, dtype=float32)
array_y: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
array_x = sin(arccos(array1d))
array_y = cos(arcsin(array1d))
for i in range(201):
array1d[i] = f32(1.0) - array1d[i] ** f32(2.0)
array1d = sqrt(array1d)
verify1d_same(array_x, array_y, 201)
verify1d_same(array_x, array1d, 201)
def elemental_degrees():
i: i32
j: i32
eps_32: f32
eps_64: f64
eps_32 = f32(1e-6)
eps_64 = 1e-12
array1d: f32[200] = empty(200, dtype=float32)
degrees1d: f32[200] = empty(200, dtype=float32)
for i in range(200):
array1d[i] = f32(i)
degrees1d = sin(degrees(array1d))
for i in range(200):
assert abs(degrees1d[i] - sin(degrees(array1d[i]))) <= eps_32
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
degrees2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(i*64+j)
degrees2d = sin(degrees(array2d))
for i in range(64):
for j in range(64):
assert abs(degrees2d[i, j] - sin(degrees(array2d[i, j]))) <= eps_64
def elemental_radians():
i: i32
j: i32
eps_32: f32
eps_64: f64
eps_32 = f32(1e-6)
eps_64 = 1e-12
array1d: f32[200] = empty(200, dtype=float32)
radians1d: f32[200] = empty(200, dtype=float32)
for i in range(200):
array1d[i] = f32(i)
radians1d = cos(radians(array1d))
for i in range(200):
assert abs(radians1d[i] - cos(radians(array1d[i]))) <= eps_32
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
radians2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(i*64+j)
radians2d = cos(radians(array2d))
for i in range(64):
for j in range(64):
assert abs(radians2d[i, j] - cos(radians(array2d[i, j]))) <= eps_64
elemental_arcsin()
elemental_arccos()
elemental_arctan()
elemental_degrees()
elemental_radians()
elemental_trig_identity()
elemental_reverse()
elemental_trig_identity_extra()
|
4,230 |
set up
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
End-to-end testing for the model chat crowdsourcing task.
"""
import glob
import json
import os
import unittest
import parlai.utils.testing as testing_utils
# Inputs
AGENT_DISPLAY_IDS = ('Speaker 1',)
AGENT_MESSAGES = [
("What are you nervous about?",),
("Do you have any plans for the weekend?",),
("Yeah that sounds great! I like to bike and try new restaurants.",),
("Oh, Italian food is great. I also love Thai and Indian.",),
(
"Hmmm - anything with peanuts? Or I like when they have spicy licorice-like herbs.",
),
]
AGENT_TASK_DATA = [
(
{
'problem_data_for_prior_message': {
"bucket_0": False,
"bucket_1": False,
"bucket_2": True,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
}
},
)
] * len(AGENT_MESSAGES)
FORM_MESSAGES = ("",)
# No info is sent through the 'text' field when submitting the form
FORM_TASK_DATA = (
{
"final_rating": 4,
"problem_data_for_prior_message": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": True,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
},
)
try:
import parlai.crowdsourcing.tasks.model_chat.worlds as world_module
from parlai.crowdsourcing.tasks.model_chat.run import TASK_DIRECTORY
from parlai.crowdsourcing.tasks.model_chat.model_chat_blueprint import (
SharedModelChatTaskState,
)
from parlai.crowdsourcing.tasks.model_chat.utils import AbstractModelChatTest
class TestModelChat(AbstractModelChatTest):
"""
Test the model chat crowdsourcing task.
"""
# TODO: remove the inheritance from unittest.TestCase once this test uses pytest
# regressions. Also use a pytest.fixture to call self._setup() and
# self._teardown(), like the other tests use, instead of calling them with
# self.setUp() and self.tearDown()
def METHOD_NAME(self) -> None:
self._setup()
def tearDown(self) -> None:
self._teardown()
@testing_utils.retry(ntries=3)
def test_base_task(self):
with testing_utils.tempdir() as tmpdir:
# Paths
expected_states_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'expected_states'
)
expected_chat_data_path = os.path.join(
expected_states_folder, 'final_chat_data.json'
)
expected_state_path = os.path.join(expected_states_folder, 'state.json')
model_opt_path = os.path.join(tmpdir, 'model_opts.yaml')
chat_data_folder = os.path.join(tmpdir, 'final_chat_data')
# Create a model opt file for the fixed-response model
with open(model_opt_path, 'w') as f:
model_opt_contents = f"""\
fixed_response: >
--model fixed_response
"""
f.write(model_opt_contents)
# Set up the config and database
num_convos = 10
overrides = [
f'mephisto.blueprint.conversations_needed_string=\"fixed_response:{num_convos:d}\"',
f'mephisto.blueprint.chat_data_folder={chat_data_folder}',
f'mephisto.blueprint.model_opt_path={model_opt_path}',
]
self._set_up_config(task_directory=TASK_DIRECTORY, overrides=overrides)
# Set up the operator and server
shared_state = SharedModelChatTaskState(world_module=world_module)
self._set_up_server(shared_state=shared_state)
# Check that the agent states are as they should be
self._get_live_run().task_runner.task_run.get_blueprint().use_onboarding = (
False
)
# Don't require onboarding for this test agent
with open(expected_state_path) as f:
expected_state = json.load(f)
self._test_agent_states(
num_agents=1,
agent_display_ids=AGENT_DISPLAY_IDS,
agent_messages=AGENT_MESSAGES,
form_messages=FORM_MESSAGES,
form_task_data=FORM_TASK_DATA,
expected_states=(expected_state,),
agent_task_data=AGENT_TASK_DATA,
)
# Check that the contents of the chat data file are as expected
with open(expected_chat_data_path) as f:
expected_chat_data = json.load(f)
results_path = list(
glob.glob(os.path.join(chat_data_folder, '*/*_*_*_sandbox.json'))
)[0]
with open(results_path) as f:
actual_chat_data = json.load(f)
self._check_final_chat_data(
actual_value=actual_chat_data, expected_value=expected_chat_data
)
def _remove_non_deterministic_keys(self, actual_state: dict) -> dict:
actual_state = super()._remove_non_deterministic_keys(actual_state)
# This chat task additionally includes a non-deterministic key in the first message
custom_data = self._get_custom_data(actual_state)
del custom_data['dialog'][0]['update_id']
return actual_state
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
|
4,231 |
update
|
#!/usr/bin/env python3
"""
Test program to demonstrate the remote config interfaces in
rclone.
This program can simulate
rclone config create
rclone config update
rclone config password - NOT implemented yet
rclone authorize - NOT implemented yet
Pass the desired action as the first argument then any parameters.
This assumes passwords will be passed in the clear.
"""
import argparse
import subprocess
import json
from pprint import pprint
sep = "-"*60
def rpc(args, command, params):
"""
Run the command. This could be either over the CLI or the API.
Here we run over the API either using `rclone rc --loopback` which
is useful for making sure state is saved properly or to an
existing rclone rcd if `--rc` is used on the command line.
"""
if args.rc:
import requests
kwargs = {
"json": params,
}
if args.user:
kwargs["auth"] = (args.user, args.password)
r = requests.post('http://localhost:5572/'+command, **kwargs)
if r.status_code != 200:
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
return r.json()
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(result.stdout)
def parse_parameters(parameters):
"""
Parse the incoming key=value parameters into a dict
"""
d = {}
for param in parameters:
parts = param.split("=", 1)
if len(parts) != 2:
raise ValueError("bad format for parameter need name=value")
d[parts[0]] = parts[1]
return d
def ask(opt):
"""
Ask the user to enter the option
This is the user interface for asking a user a question.
If there are examples they should be presented.
"""
while True:
if opt["IsPassword"]:
print("*** Inputting a password")
print(opt['Help'])
examples = opt.get("Examples", ())
or_number = ""
if len(examples) > 0:
or_number = " or choice number"
for i, example in enumerate(examples):
print(f"{i:3} value: {example['Value']}")
print(f" help: {example['Help']}")
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
print(f"{opt['Name']}> ", end='')
s = input()
if s == "":
return opt["DefaultStr"]
try:
i = int(s)
if i >= 0 and i < len(examples):
return examples[i]["Value"]
except ValueError:
pass
if opt["Exclusive"]:
for example in examples:
if s == example["Value"]:
return s
# Exclusive is set but the value isn't one of the accepted
# ones so continue
print("Value isn't one of the acceptable values")
else:
return s
return s
def create_or_update(what, args):
"""
Run the equivalent of rclone config create
or rclone config update
what should either be "create" or "update
"""
print(what, args)
params = parse_parameters(args.parameters)
inp = {
"name": args.name,
"parameters": params,
"opt": {
"nonInteractive": True,
"all": args.all,
"noObscure": args.obscured_passwords,
"obscure": not args.obscured_passwords,
},
}
if what == "create":
inp["type"] = args.type
while True:
print(sep)
print("Input to API")
pprint(inp)
print(sep)
out = rpc(args, "config/"+what, inp)
print(sep)
print("Output from API")
pprint(out)
print(sep)
if out["State"] == "":
return
if out["Error"]:
print("Error", out["Error"])
result = ask(out["Option"])
inp["opt"]["state"] = out["State"]
inp["opt"]["result"] = result
inp["opt"]["continue"] = True
def create(args):
"""Run the equivalent of rclone config create"""
create_or_update("create", args)
def METHOD_NAME(args):
"""Run the equivalent of rclone config update"""
create_or_update("update", args)
def password(args):
"""Run the equivalent of rclone config password"""
print("password", args)
raise NotImplementedError()
def authorize(args):
"""Run the equivalent of rclone authorize"""
print("authorize", args)
raise NotImplementedError()
def main():
"""
Make the command line parser and dispatch
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("-a", "--all", action='store_true',
help="Ask all the config questions if set")
parser.add_argument("-o", "--obscured-passwords", action='store_true',
help="If set assume the passwords are obscured")
parser.add_argument("--rc", action='store_true',
help="If set use the rc (you'll need to start an rclone rcd)")
parser.add_argument("--user", type=str, default="",
help="Username for use with --rc")
parser.add_argument("--pass", type=str, default="", dest='password',
help="Password for use with --rc")
subparsers = parser.add_subparsers(dest='command', required=True)
subparser = subparsers.add_parser('create')
subparser.add_argument("name", type=str, help="Name of remote to create")
subparser.add_argument("type", type=str, help="Type of remote to create")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=create)
subparser = subparsers.add_parser('update')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=METHOD_NAME)
subparser = subparsers.add_parser('password')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=password)
subparser = subparsers.add_parser('authorize')
subparser.set_defaults(func=authorize)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
4,232 |
test parse op selection invalid
|
import pytest
from dagster import In, asset, define_asset_job, in_process_executor, job, op, repository
from dagster._core.errors import DagsterExecutionStepNotFoundError, DagsterInvalidSubsetError
from dagster._core.selector.subset_selector import (
MAX_NUM,
Traverser,
clause_to_subset,
generate_dep_graph,
parse_clause,
parse_op_queries,
parse_step_selection,
)
@op
def return_one():
return 1
@op
def return_two():
return 2
@op(ins={"num1": In(), "num2": In()})
def add_nums(num1, num2):
return num1 + num2
@op(ins={"num": In()})
def multiply_two(num):
return num * 2
@op(ins={"num": In()})
def add_one(num):
return num + 1
@job(executor_def=in_process_executor)
def foo_job():
"""return_one ---> add_nums --> multiply_two --> add_one
return_two --|.
"""
add_one(multiply_two(add_nums(return_one(), return_two())))
def test_generate_dep_graph():
graph = generate_dep_graph(foo_job)
assert graph == {
"upstream": {
"return_one": set(),
"return_two": set(),
"add_nums": {"return_one", "return_two"},
"multiply_two": {"add_nums"},
"add_one": {"multiply_two"},
},
"downstream": {
"return_one": {"add_nums"},
"return_two": {"add_nums"},
"add_nums": {"multiply_two"},
"multiply_two": {"add_one"},
"add_one": set(),
},
}
def test_traverser():
graph = generate_dep_graph(foo_job)
traverser = Traverser(graph)
assert traverser.fetch_upstream(item_name="return_one", depth=1) == set()
assert traverser.fetch_downstream(item_name="return_one", depth=1) == {"add_nums"}
assert traverser.fetch_upstream(item_name="multiply_two", depth=0) == set()
assert traverser.fetch_upstream(item_name="multiply_two", depth=2) == {
"add_nums",
"return_one",
"return_two",
}
assert traverser.fetch_downstream(item_name="multiply_two", depth=2) == {"add_one"}
def test_traverser_invalid():
graph = generate_dep_graph(foo_job)
traverser = Traverser(graph)
assert traverser.fetch_upstream(item_name="some_solid", depth=1) == set()
def test_parse_clause():
assert parse_clause("some_solid") == (0, "some_solid", 0)
assert parse_clause("*some_solid") == (MAX_NUM, "some_solid", 0)
assert parse_clause("some_solid+") == (0, "some_solid", 1)
assert parse_clause("+some_solid+") == (1, "some_solid", 1)
assert parse_clause("*some_solid++") == (MAX_NUM, "some_solid", 2)
def test_parse_clause_invalid():
assert parse_clause("1+some_solid") is None
def test_parse_op_selection_single():
op_selection_single = parse_op_queries(foo_job, ["add_nums"])
assert len(op_selection_single) == 1
assert op_selection_single == {"add_nums"}
op_selection_star = parse_op_queries(foo_job, ["add_nums*"])
assert len(op_selection_star) == 3
assert set(op_selection_star) == {"add_nums", "multiply_two", "add_one"}
op_selection_both = parse_op_queries(foo_job, ["*add_nums+"])
assert len(op_selection_both) == 4
assert set(op_selection_both) == {
"return_one",
"return_two",
"add_nums",
"multiply_two",
}
def test_parse_op_selection_multi():
op_selection_multi_disjoint = parse_op_queries(foo_job, ["return_one", "add_nums+"])
assert len(op_selection_multi_disjoint) == 3
assert set(op_selection_multi_disjoint) == {
"return_one",
"add_nums",
"multiply_two",
}
op_selection_multi_overlap = parse_op_queries(foo_job, ["*add_nums", "return_one+"])
assert len(op_selection_multi_overlap) == 3
assert set(op_selection_multi_overlap) == {
"return_one",
"return_two",
"add_nums",
}
with pytest.raises(
DagsterInvalidSubsetError,
match="No qualified ops to execute found for op_selection",
):
parse_op_queries(foo_job, ["*add_nums", "a"])
def METHOD_NAME():
with pytest.raises(
DagsterInvalidSubsetError,
match="No qualified ops to execute found for op_selection",
):
parse_op_queries(foo_job, ["some,solid"])
step_deps = {
"return_one": set(),
"return_two": set(),
"add_nums": {"return_one", "return_two"},
"multiply_two": {"add_nums"},
"add_one": {"multiply_two"},
}
@pytest.mark.parametrize(
"clause,expected_subset",
[
("a", "a"),
("b+", "b,c,d"),
("+f", "f,d,e"),
("++f", "f,d,e,c,a,b"),
("+++final", "final,a,d,start,b"),
("b++", "b,c,d,e,f,final"),
("start*", "start,a,d,f,final"),
],
)
def test_clause_to_subset(clause, expected_subset):
graph = {
"upstream": {
"start": set(),
"a": {"start"},
"b": set(),
"c": {"b"},
"d": {"a", "b"},
"e": {"c"},
"f": {"e", "d"},
"final": {"a", "d"},
},
"downstream": {
"start": {"a"},
"b": {"c", "d"},
"a": {"final", "d"},
"c": {"e"},
"d": {"final", "f"},
"e": {"f"},
},
}
assert set(clause_to_subset(graph, clause, lambda x: x)) == set(expected_subset.split(","))
def test_parse_step_selection_single():
step_selection_single = parse_step_selection(step_deps, ["add_nums"])
assert len(step_selection_single) == 1
assert step_selection_single == {"add_nums"}
step_selection_star = parse_step_selection(step_deps, ["add_nums*"])
assert len(step_selection_star) == 3
assert set(step_selection_star) == {
"add_nums",
"multiply_two",
"add_one",
}
step_selection_both = parse_step_selection(step_deps, ["*add_nums+"])
assert len(step_selection_both) == 4
assert set(step_selection_both) == {
"return_one",
"return_two",
"add_nums",
"multiply_two",
}
def test_parse_step_selection_multi():
step_selection_multi_disjoint = parse_step_selection(step_deps, ["return_one", "add_nums+"])
assert len(step_selection_multi_disjoint) == 3
assert set(step_selection_multi_disjoint) == {
"return_one",
"add_nums",
"multiply_two",
}
step_selection_multi_overlap = parse_step_selection(step_deps, ["*add_nums", "return_one+"])
assert len(step_selection_multi_overlap) == 3
assert set(step_selection_multi_overlap) == {
"return_one",
"return_two",
"add_nums",
}
with pytest.raises(
DagsterExecutionStepNotFoundError,
match="Step selection refers to unknown step: a",
):
parse_step_selection(step_deps, ["*add_nums", "a"])
def test_parse_step_selection_invalid():
with pytest.raises(
DagsterInvalidSubsetError,
match="No qualified steps to execute found for step_selection",
):
parse_step_selection(step_deps, ["1+some_solid"])
@asset
def my_asset(context):
assert context.job_def.asset_selection_data is not None
return 1
@asset
def asset_2(my_asset):
return my_asset
@repository
def asset_house():
return [
my_asset,
asset_2,
define_asset_job("asset_selection_job", selection="*", executor_def=in_process_executor),
]
def get_asset_selection_job():
return asset_house.get_job("asset_selection_job")
|
4,233 |
publish target info
|
#!/usr/bin/env python3
import sys
import cv2
import image_geometry
import mil_ros_tools
import numpy as np
import rospy
from geometry_msgs.msg import Pose2D
from mil_msgs.msg import RangeStamped
from std_msgs.msg import Header
from subjugator_msgs.srv import VisionRequest2D, VisionRequest2DResponse
def contour_sort(l_arr):
"""Sort contours by area largest to smallest."""
length = len(l_arr)
if length <= 1:
return l_arr
else:
pivot = l_arr.pop(int(length / 2))
less, more = [], []
for x in l_arr:
if cv2.contourArea(x) >= cv2.contourArea(pivot):
less.append(x)
else:
more.append(x)
return [*contour_sort(less), pivot, *contour_sort(more)]
def evaluate_bin(roi):
"""Check for orangeness."""
b1 = 163
g1 = 145
r1 = 223
b2 = 251
g2 = 240
r2 = 255
lower_value = np.array([b1, g1, r1], np.uint8)
upper_value = np.array([b2, g2, r2], np.uint8)
temp = np.array(0)
mask = cv2.inRange(roi, lower_value, upper_value)
bimg = cv2.bitwise_or(mask, temp)
orangeness = bimg.mean()
return orangeness
class BinFinder:
def __init__(self):
rospy.sleep(1.0)
self.bin_type = None
self.last_image = None
self.last_draw_image = None
self.last_image_time = None
self.camera_model = None
self.pose_service = rospy.Service(
"vision/bin/2D",
VisionRequest2D,
self.request_bin,
)
self.image_sub = mil_ros_tools.Image_Subscriber(
"/down/left/image_rect_color",
self.image_cb,
)
self.image_pub = mil_ros_tools.Image_Publisher("/vision/bin_2d/target_info")
self.range = None
self.range_sub = rospy.Subscriber(
"dvl/range",
RangeStamped,
self.range_callback,
)
# Occasional status publisher
self.timer = rospy.Timer(rospy.Duration(1.0), self.METHOD_NAME)
self.bins = {
"orange": "/color/bin/orange",
"norange": "/color/bin/norange",
}
def request_bin(self, srv):
self.bin_type = srv.target_name
if self.last_image is not None:
response = self.find_single_bin(np.copy(self.last_image), srv.target_name)
if response is False or response is None:
rospy.loginfo("did not find")
resp = VisionRequest2DResponse(
header=mil_ros_tools.make_header(frame="/down"),
found=False,
)
else:
# Fill in
center, radius = response
resp = VisionRequest2DResponse(
header=Header(stamp=self.last_image_time, frame_id="/down"),
pose=Pose2D(x=center[0], y=center[1], theta=radius),
max_x=self.last_image.shape[0],
max_y=self.last_image.shape[1],
camera_info=self.image_sub.camera_info,
found=True,
)
return resp
def METHOD_NAME(self, *args):
if self.last_image is None:
return
self.find_bins(np.copy(self.last_image), self.bin_type)
if self.last_draw_image is not None:
self.image_pub.publish(self.last_draw_image)
def image_cb(self, image):
"""Hang on to last image"""
self.last_image = image
self.last_image_time = self.image_sub.last_image_time
if self.camera_model is None:
if self.image_sub.camera_info is None:
return
self.camera_model = image_geometry.PinholeCameraModel()
self.camera_model.fromCameraInfo(self.image_sub.camera_info)
def find_single_bin(self, img, bin_type):
"""Find the bins and their orientations."""
assert (
bin_type in self.bins[bin_type]
), f"Bins_2d does not know bin color: {bin_type}"
if img is not None:
kernel = np.ones((2, 2), np.float32) / 4
img = cv2.filter2D(img, -1, kernel)
debug_image = np.copy(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
np.copy(img),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE,
)
contours = contour_sort(contours)
"""This finds the bins and looks for the one that is orange or is not
orange. Each bin is given an orangeness rating and either the most
or least orange bin is selected.
"""
if len(contours) > 0:
bins = 2
orangeness = 0 if self.bin_type == "orange" else 100000
if len(contours) < bins:
bins = len(contours)
for i in range(0, bins + 1):
x, y, w, h = cv2.boundingRect(contours[i])
roi = debug_image[y : y + h, x : x + w]
temp = evaluate_bin(roi)
if (orangeness > temp and self.bin_type == "norange") or (
orangeness < temp and self.bin_type == "orange"
):
orangeness = temp
M = cv2.moments(contours[i])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
img_h, img_w, _ = np.shape(debug_image)
point = (cx, cy)
(_, _), (_, _), rad = cv2.fitEllipse(contours[i])
cv2.rectangle(debug_image, (x, y), (x + w, y + h), (127), 2)
ellipse = cv2.fitEllipse(contours[i])
cv2.ellipse(debug_image, ellipse, (170), 2)
if point is not None:
cv2.circle(debug_image, point, 5, (0, 0, 255), -1)
pixels = np.copy(point)
point = [cx - (img_w / 2), cy - (img_h / 2)]
tuple_center = (point[0], point[1], 0)
rad = ((rad) * np.pi) / 180.0
P = np.asarray(self.image_sub.camera_info.P).reshape(3, 4)
_P = np.linalg.pinv(P)
pixels = np.asarray([pixels[0], pixels[1], 1])
ray = _P.dot(pixels)
tuple_center = self.range * ray
tuple_center[2] = (
-tuple_center[2] + 0.45 + 1
) # height of the bin and some buffer
self.last_draw_image = debug_image
return tuple_center, rad
def range_callback(self, msg):
"""Handle range data grabbed from dvl"""
self.range = msg.range
def find_bins(self, img, srv):
return self.find_single_bin(img, self.bin_type)
def main(args):
BinFinder()
rospy.spin()
if __name__ == "__main__":
rospy.init_node("bin_vision")
main(sys.argv)
|
4,234 |
transform legacy config
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from enum import Enum
from typing import Any, List, Mapping, Optional, Type, Union
from airbyte_cdk.sources.file_based.config.avro_format import AvroFormat
from airbyte_cdk.sources.file_based.config.csv_format import CsvFormat
from airbyte_cdk.sources.file_based.config.jsonl_format import JsonlFormat
from airbyte_cdk.sources.file_based.config.parquet_format import ParquetFormat
from airbyte_cdk.sources.file_based.exceptions import ConfigValidationError, FileBasedSourceError
from airbyte_cdk.sources.file_based.schema_helpers import type_mapping_to_jsonschema
from pydantic import BaseModel, Field, validator
PrimaryKeyType = Optional[Union[str, List[str]]]
VALID_FILE_TYPES: Mapping[str, Type[BaseModel]] = {"avro": AvroFormat, "csv": CsvFormat, "jsonl": JsonlFormat, "parquet": ParquetFormat}
class ValidationPolicy(Enum):
emit_record = "Emit Record"
skip_record = "Skip Record"
wait_for_discover = "Wait for Discover"
class FileBasedStreamConfig(BaseModel):
name: str = Field(title="Name", description="The name of the stream.")
file_type: str = Field(title="File Type", description="The data file type that is being extracted for a stream.")
globs: Optional[List[str]] = Field(
title="Globs",
description='The pattern used to specify which files should be selected from the file system. For more information on glob pattern matching look <a href="https://en.wikipedia.org/wiki/Glob_(programming)">here</a>.',
)
legacy_prefix: Optional[str] = Field(
title="Legacy Prefix",
description="The path prefix configured in v3 versions of the S3 connector. This option is deprecated in favor of a single glob.",
airbyte_hidden=True,
)
validation_policy: ValidationPolicy = Field(
title="Validation Policy",
description="The name of the validation policy that dictates sync behavior when a record does not adhere to the stream schema.",
default=ValidationPolicy.emit_record,
)
input_schema: Optional[str] = Field(
title="Input Schema",
description="The schema that will be used to validate records extracted from the file. This will override the stream schema that is auto-detected from incoming files.",
)
primary_key: Optional[str] = Field(
title="Primary Key", description="The column or columns (for a composite key) that serves as the unique identifier of a record."
)
days_to_sync_if_history_is_full: int = Field(
title="Days To Sync If History Is Full",
description="When the state history of the file store is full, syncs will only read files that were last modified in the provided day range.",
default=3,
)
format: Optional[Union[AvroFormat, CsvFormat, JsonlFormat, ParquetFormat]] = Field(
title="Format",
description="The configuration options that are used to alter how to read incoming files that deviate from the standard formatting.",
)
schemaless: bool = Field(
title="Schemaless",
description="When enabled, syncs will not validate or structure records against the stream's schema.",
default=False,
)
@validator("file_type", pre=True)
def validate_file_type(cls, v: str) -> str:
if v not in VALID_FILE_TYPES:
raise ValueError(f"Format filetype {v} is not a supported file type")
return v
@classmethod
def METHOD_NAME(cls, legacy_config: Mapping[str, Any], file_type: str) -> Mapping[str, Any]:
if file_type.casefold() not in VALID_FILE_TYPES:
raise ValueError(f"Format filetype {file_type} is not a supported file type")
if file_type.casefold() == "parquet" or file_type.casefold() == "avro":
legacy_config = cls._transform_legacy_parquet_or_avro_config(legacy_config)
return {file_type: VALID_FILE_TYPES[file_type.casefold()].parse_obj({key: val for key, val in legacy_config.items()})}
@classmethod
def _transform_legacy_parquet_or_avro_config(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
"""
The legacy parquet parser converts decimal fields to numbers. This isn't desirable because it can lead to precision loss.
To avoid introducing a breaking change with the new default, we will set decimal_as_float to True in the legacy configs.
"""
filetype = config.get("filetype")
if filetype != "parquet" and filetype != "avro":
raise ValueError(
f"Expected {filetype} format, got {config}. This is probably due to a CDK bug. Please reach out to the Airbyte team for support."
)
if config.get("decimal_as_float"):
raise ValueError(
f"Received legacy {filetype} file form with 'decimal_as_float' set. This is unexpected. Please reach out to the Airbyte team for support."
)
return {**config, **{"decimal_as_float": True}}
@validator("input_schema", pre=True)
def validate_input_schema(cls, v: Optional[str]) -> Optional[str]:
if v:
if type_mapping_to_jsonschema(v):
return v
else:
raise ConfigValidationError(FileBasedSourceError.ERROR_PARSING_USER_PROVIDED_SCHEMA)
return None
def get_input_schema(self) -> Optional[Mapping[str, Any]]:
"""
User defined input_schema is defined as a string in the config. This method takes the string representation
and converts it into a Mapping[str, Any] which is used by file-based CDK components.
"""
if self.input_schema:
schema = type_mapping_to_jsonschema(self.input_schema)
if not schema:
raise ValueError(f"Unable to create JSON schema from input schema {self.input_schema}")
return schema
return None
|
4,235 |
get measurement
|
# coding=utf-8
#
# Created in part with code with the following copyright:
#
# Copyright (c) 2014 D. Alex Gray
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
import time
import copy
from mycodo.inputs.base_input import AbstractInput
from mycodo.inputs.sensorutils import calculate_dewpoint
from mycodo.inputs.sensorutils import calculate_vapor_pressure_deficit
# Measurements
measurements_dict = {
0: {
'measurement': 'temperature',
'unit': 'C'
},
1: {
'measurement': 'humidity',
'unit': 'percent'
},
2: {
'measurement': 'dewpoint',
'unit': 'C'
},
3: {
'measurement': 'vapor_pressure_deficit',
'unit': 'Pa'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'HTU21D',
'input_manufacturer': 'TE Connectivity',
'input_name': 'HTU21D',
'input_library': 'pigpio',
'measurements_name': 'Humidity/Temperature',
'measurements_dict': measurements_dict,
'url_manufacturer': 'https://www.te.com/usa-en/product-CAT-HSC0004.html',
'url_datasheet': 'https://www.te.com/commerce/DocumentDelivery/DDEController?Action=showdoc&DocId=Data+Sheet%7FHPC199_6%7FA6%7Fpdf%7FEnglish%7FENG_DS_HPC199_6_A6.pdf%7FCAT-HSC0004',
'url_product_purchase': 'https://www.adafruit.com/product/1899',
'options_enabled': [
'i2c_location',
'measurements_select',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('internal', 'file-exists /opt/mycodo/pigpio_installed', 'pigpio'),
('pip-pypi', 'pigpio', 'pigpio==1.78')
],
'interfaces': ['I2C'],
'i2c_location': ['0x40'],
'i2c_address_editable': False
}
class InputModule(AbstractInput):
"""
A sensor support class that measures the HTU21D's humidity and temperature
and calculates the dew point
"""
def __init__(self, input_dev, testing=False):
super().__init__(input_dev, testing=testing, name=__name__)
self.pi = None
self.i2c_bus = None
self.i2c_address = 0x40 # HTU21D-F Address
if not testing:
self.try_initialize()
def initialize(self):
import pigpio
self.i2c_bus = self.input_dev.i2c_bus
self.pi = pigpio.pi()
def METHOD_NAME(self):
"""Gets the humidity and temperature."""
if not self.pi.connected:
self.logger.error("Could not connect to pigpiod. Ensure it is running and try again.")
return None
self.return_dict = copy.deepcopy(measurements_dict)
self.htu_reset()
# wtreg = 0xE6
# rdreg = 0xE7
rdtemp = 0xE3
rdhumi = 0xE5
handle = self.pi.i2c_open(self.i2c_bus, self.i2c_address) # open i2c bus
self.pi.i2c_write_byte(handle, rdtemp) # send read temp command
time.sleep(0.055) # readings take up to 50ms, lets give it some time
(_, byte_array) = self.pi.i2c_read_device(handle, 3) # vacuum up those bytes
self.pi.i2c_close(handle) # close the i2c bus
t1 = byte_array[0] # most significant byte msb
t2 = byte_array[1] # least significant byte lsb
temp_reading = (t1 * 256) + t2 # combine both bytes into one big integer
temp_reading = float(temp_reading)
temperature = ((temp_reading / 65536) * 175.72) - 46.85 # formula from datasheet
handle = self.pi.i2c_open(self.i2c_bus, self.i2c_address) # open i2c bus
self.pi.i2c_write_byte(handle, rdhumi) # send read humi command
time.sleep(0.055) # readings take up to 50ms, lets give it some time
(_, byte_array) = self.pi.i2c_read_device(handle, 3) # vacuum up those bytes
self.pi.i2c_close(handle) # close the i2c bus
h1 = byte_array[0] # most significant byte msb
h2 = byte_array[1] # least significant byte lsb
humi_reading = (h1 * 256) + h2 # combine both bytes into one big integer
humi_reading = float(humi_reading)
uncomp_humidity = ((humi_reading / 65536) * 125) - 6 # formula from datasheet
humidity = ((25 - temperature) * -0.15) + uncomp_humidity
if self.is_enabled(0):
self.value_set(0, temperature)
if self.is_enabled(1):
self.value_set(1, humidity)
if self.is_enabled(2) and self.is_enabled(0) and self.is_enabled(1):
self.value_set(2, calculate_dewpoint(self.value_get(0), self.value_get(1)))
if self.is_enabled(3) and self.is_enabled(0) and self.is_enabled(1):
self.value_set(3, calculate_vapor_pressure_deficit(self.value_get(0), self.value_get(1)))
return self.return_dict
def htu_reset(self):
reset = 0xFE
handle = self.pi.i2c_open(self.i2c_bus, self.i2c_address) # open i2c bus
self.pi.i2c_write_byte(handle, reset) # send reset command
self.pi.i2c_close(handle) # close i2c bus
time.sleep(0.2) # reset takes 15ms so let's give it some time
|
4,236 |
test invalid launch 2
|
# Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robocorp_code_debug_adapter_tests.fixtures import _DebuggerAPI
import pytest
from robocorp_code.rcc import Rcc
def test_invalid_launch_1(debugger_api: _DebuggerAPI, rcc_config_location: str):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
LaunchRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
debugger_api.initialize(rcc_config_location=rcc_config_location)
debugger_api.write(
LaunchRequest(
LaunchRequestArguments(
__sessionId="some_id",
noDebug=True,
# robot=robot, -- error: don't add robot
terminal="none",
cwd=None,
)
)
)
launch_response = debugger_api.read(Response)
assert launch_response.success == False
def METHOD_NAME(debugger_api: _DebuggerAPI, rcc_config_location: str):
debugger_api.initialize(rcc_config_location=rcc_config_location)
debugger_api.launch("invalid_file.robot", "task1", debug=False, success=False)
def test_simple_launch(debugger_api: _DebuggerAPI, rcc: Rcc, rcc_config_location: str):
"""
This is an integrated test of the debug adapter. It communicates with it as if it was
VSCode.
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import TerminatedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
rcc.check_conda_installed()
debugger_api.initialize(rcc_config_location=rcc_config_location)
robot = debugger_api.get_dap_case_file("minimal/robot.yaml")
debugger_api.launch(robot, "task1", debug=False)
debugger_api.configuration_done()
# i.e.: Big timeout because creating the environment may be slow.
debugger_api.read(TerminatedEvent, timeout=360)
debugger_api.assert_message_found(
OutputEvent, lambda msg: "Task 1 executed" in msg.body.output
)
with pytest.raises(AssertionError):
debugger_api.assert_message_found(
OutputEvent, lambda msg: "Task 2 executed" in msg.body.output
)
@pytest.mark.parametrize("override", [True, False])
def test_work_item_variables_not_overridden(
debugger_api: _DebuggerAPI, rcc: Rcc, rcc_config_location: str, override: bool
):
"""
Verifies that variables from env.json don't override variables related
to work items set by Robocorp Code:
- RPA_INPUT_WORKITEM_PATH
- RPA_OUTPUT_WORKITEM_PATH
- RPA_WORKITEMS_ADAPTER
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import TerminatedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
rcc.check_conda_installed()
debugger_api.initialize(rcc_config_location=rcc_config_location)
robot = debugger_api.get_dap_case_file("project_with_env/robot.yaml")
environ = {}
if override:
environ = {
"RPA_INPUT_WORKITEM_PATH": "input workitem path",
"RPA_OUTPUT_WORKITEM_PATH": "output workitem path",
"RPA_WORKITEMS_ADAPTER": "workitems adapter",
}
debugger_api.launch(
robot,
"task1",
debug=False,
environ=environ,
)
debugger_api.configuration_done()
# i.e.: Big timeout because creating the environment may be slow.
debugger_api.read(TerminatedEvent, timeout=360)
debugger_api.assert_message_found(
OutputEvent,
lambda msg: "SOME_OTHER_VAR: some other variable" in msg.body.output,
)
if override:
with pytest.raises(AssertionError):
# The environment variables from env.json were overridden and shouldn't be printed to stdout.
debugger_api.assert_message_found(
OutputEvent, lambda msg: "Will be ignored" in msg.body.output
)
for val in environ.values():
debugger_api.assert_message_found(
OutputEvent, lambda msg: val in msg.body.output
)
else:
# Variables used will be the ones in env.json.
debugger_api.assert_message_found(
OutputEvent, lambda msg: "Will be ignored" in msg.body.output
)
def not_supported_test_launch_in_external_terminal(
debugger_api: _DebuggerAPI, rcc_config_location: str
):
"""
This is an integrated test of the debug adapter. It communicates with it as if it was
VSCode.
Note: we don't currently support launching in an external terminal because there's
no easy way to get the pid (it'd be possible to do that by creating a wrapper script
which would then really launch rcc and then it'd connect back to some port and
provide the pid of the process which was spawned, but the value gained vs the
effort to do so seems low, which means we can only run without a terminal for
now so that we have an easy way of tracking the RCC process pid).
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import TerminatedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import RunInTerminalRequest
import os
from robocorp_ls_core.basic import as_str
from robocorp_ls_core.subprocess_wrapper import subprocess
debugger_api.initialize(rcc_config_location=rcc_config_location)
robot = debugger_api.get_dap_case_file("minimal/robot.yaml")
debugger_api.launch(robot, "task2", debug=False, terminal="external")
debugger_api.configuration_done()
run_in_terminal_request = debugger_api.read(RunInTerminalRequest)
env = os.environ.copy()
for key, val in run_in_terminal_request.arguments.env.to_dict().items():
env[as_str(key)] = as_str(val)
cwd = run_in_terminal_request.arguments.cwd
popen_args = run_in_terminal_request.arguments.args
subprocess.Popen(popen_args, cwd=cwd, env=env)
# i.e.: Big timeout because creating the environment may be slow.
debugger_api.read(TerminatedEvent, timeout=120)
|
4,237 |
produce sdf
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
import argparse
from prjxray.util import OpenSafeFile
def get_elems_count(timings, slice, site, bel_type):
combinational = 0
sequential = 0
for delay in timings[slice][site][bel_type]:
if 'sequential' in timings[slice][site][bel_type][delay]:
sequential += 1
else:
combinational += 1
return combinational, sequential
def METHOD_NAME(timings, outdir):
for slice in timings:
sdf = \
"""
(DELAYFILE
(SDFVERSION \"3.0\")
(TIMESCALE 1ns)
"""
for site in sorted(timings[slice]):
for bel_type in sorted(timings[slice][site]):
combinational, sequential = get_elems_count(
timings, slice, site, bel_type)
#define CELL
cell= \
"""
(CELL
(CELLTYPE \"{name}\")
(INSTANCE {location})""".format(name=bel_type.upper(), location=site)
sdf += cell
#define delay header (if needed)
if combinational > 0:
delay_hdr = \
"""
(DELAY
(ABSOLUTE"""
sdf += delay_hdr
# add all delays definitions
for delay in sorted(timings[slice][site][bel_type]):
if 'sequential' in timings[slice][site][bel_type][
delay]:
continue
dly = \
"""
(IOPATH {input} {output} ({FAST_MIN}::{FAST_MAX})({SLOW_MIN}::{SLOW_MAX}))""".format(**timings[slice][site][bel_type][delay])
if 'extra_ports' in timings[slice][site][bel_type][
delay] is not None:
dly += \
""" #extra ports {}""".format(timings[slice][site][bel_type][delay]['extra_ports'])
sdf += dly
# close DELAY definition
enddelay = \
"""
)
)"""
sdf += enddelay
# define TIMINGCHECK header (if needed)
if sequential > 0:
timingcheck_hdr = \
"""
(TIMINGCHECK"""
sdf += timingcheck_hdr
for delay in sorted(timings[slice][site][bel_type]):
if 'sequential' not in timings[slice][site][bel_type][
delay]:
continue
timingcheck = \
"""
({prop} {input} (posedge {clock}) ({SLOW_MIN}::{SLOW_MAX}))""".format(
prop=timings[slice][site][bel_type][delay]['sequential'].upper(),
**timings[slice][site][bel_type][delay])
if 'extra_ports' in timings[slice][site][bel_type][
delay] is not None:
timingcheck += \
""" #extra ports {}""".format(timings[slice][site][bel_type][delay]['extra_ports'])
sdf += timingcheck
# close TIMINGCHECK definition
endtimingcheck = \
"""
)"""
sdf += endtimingcheck
endcell = \
"""
)"""
sdf += endcell
# end of SDF
sdf += \
"""
)"""
with OpenSafeFile(outdir + '/' + slice + '.sdf', "w") as fp:
fp.write(sdf)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--json', type=str, help="Input JSON file")
parser.add_argument('--sdf', type=str, help="SDF files output directory")
args = parser.parse_args()
with OpenSafeFile(args.json, 'r') as fp:
timings = json.load(fp)
METHOD_NAME(timings, args.sdf)
if __name__ == '__main__':
main()
|
4,238 |
read homepage
|
import asyncio
from tempfile import NamedTemporaryFile
import time
from typing import Optional
from fastapi import BackgroundTasks
from fastapi import FastAPI
from fastapi import HTTPException
from fastapi import Header
from fastapi.responses import FileResponse
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
fake_secret_token = "DataDog"
fake_db = {
"foo": {"id": "foo", "name": "Foo", "description": "This item's description is foo."},
"bar": {"id": "bar", "name": "Bar", "description": "The bartenders"},
"testUserID": {"userid": "testUserID", "name": "Test User"},
}
class Item(BaseModel):
id: str
name: str
description: Optional[str] = None
class User(BaseModel):
userid: int
name: str
def get_app():
app = FastAPI()
@app.get("/")
async def METHOD_NAME(sleep: str = Header(...)): # noqa: B008
if sleep == "True":
time.sleep(2)
return {"Homepage Read": "Sleep"}
return {"Homepage Read": "Success"}
@app.get("/items/{item_id}", response_model=Item)
async def read_item(item_id: str, x_token: str = Header(...)): # noqa: B008
if x_token != fake_secret_token:
raise HTTPException(status_code=401, detail="Invalid X-Token header")
if item_id not in fake_db:
raise HTTPException(status_code=404, detail="Item not found")
return fake_db[item_id]
@app.post("/items/", response_model=Item)
async def create_item(item: Item, x_token: str = Header(...)): # noqa: B008
if x_token != fake_secret_token:
raise HTTPException(status_code=401, detail="Invalid X-Token header")
if item.id in fake_db:
raise HTTPException(status_code=400, detail="Item already exists")
fake_db[item.id] = item
return item
@app.get("/users/{userid:str}")
async def get_user(userid: str, x_token: str = Header(...)): # noqa: B008
if x_token != fake_secret_token:
raise HTTPException(status_code=401, detail="Invalid X-Token header")
if userid not in fake_db:
raise HTTPException(status_code=404, detail="User not found")
return fake_db[userid]
@app.get("/users/{userid:str}/info")
async def get_user_info(userid: str, x_token: str = Header(...)): # noqa: B008
if x_token != fake_secret_token:
raise HTTPException(status_code=401, detail="Invalid X-Token header")
if userid not in fake_db:
raise HTTPException(status_code=404, detail="User not found")
return {"User Info": "Here"}
@app.get("/users/{userid:str}/{attribute:str}")
async def get_user_attribute(userid: str, attribute: str, x_token: str = Header(...)): # noqa: B008
if x_token != fake_secret_token:
raise HTTPException(status_code=401, detail="Invalid X-Token header")
if userid not in fake_db:
raise HTTPException(status_code=404, detail="User not found")
return {"User Attribute": fake_db[userid].get(attribute, "Fake Attribute")}
@app.get("/500")
async def error():
"""
An example error. Switch the `debug` setting to see either tracebacks or 500 pages.
"""
raise RuntimeError("Server error")
@app.get("/stream")
async def stream():
def stream_response():
yield b"streaming"
return StreamingResponse(stream_response())
@app.get("/file")
async def file():
with NamedTemporaryFile(delete=False) as fp:
fp.write(b"Datadog says hello!")
fp.flush()
return FileResponse(fp.name)
async def custom_task():
await asyncio.sleep(1)
@app.get("/asynctask")
async def asynctask(bg_tasks: BackgroundTasks):
bg_tasks.add_task(custom_task)
return "task added"
subapp = FastAPI()
@subapp.get("/hello/{name}")
def hello():
return {"Greeting": "Hello"}
app.mount("/sub-app", subapp)
return app
|
4,239 |
test basic api
|
import json
import logging
from unittest.case import skipIf
from tenacity import after_log, retry_if_exception_type, stop_after_attempt, wait_exponential, wait_random
from integration.config.service_names import MODE, REST_API
from integration.helpers.base_test import BaseTest
from integration.helpers.exception import StatusCodeError
from integration.helpers.resource import current_region_does_not_support
LOG = logging.getLogger(__name__)
@skipIf(current_region_does_not_support([REST_API]), "Rest API is not supported in this testing region")
class TestBasicApi(BaseTest):
"""
Basic AWS::Serverless::Api tests
"""
def METHOD_NAME(self):
"""
Creates an API and updates its DefinitionUri
"""
self.create_and_verify_stack("single/basic_api")
first_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(first_dep_ids), 1)
self.set_template_resource_property("MyApi", "DefinitionUri", self.get_s3_uri("swagger2.json"))
self.update_stack()
second_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(second_dep_ids), 1)
self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)
@skipIf(current_region_does_not_support([MODE]), "Mode is not supported in this testing region")
def test_basic_api_with_mode(self):
"""
Creates an API and updates its DefinitionUri
"""
# Create an API with get and put
self.create_and_verify_stack("single/basic_api_with_mode")
stack_output = self.get_stack_outputs()
api_endpoint = stack_output.get("ApiEndpoint")
self.verify_get_request_response(f"{api_endpoint}/get", 200)
# Removes get from the API
self.update_and_verify_stack(file_path="single/basic_api_with_mode_update")
# API Gateway by default returns 403 if a path do not exist
self.verify_get_request_response.retry_with(
stop=stop_after_attempt(20),
wait=wait_exponential(multiplier=1, min=4, max=10) + wait_random(0, 1),
retry=retry_if_exception_type(StatusCodeError),
after=after_log(LOG, logging.WARNING),
reraise=True,
)(self, f"{api_endpoint}/get", 403)
LOG.log(msg=f"retry times {self.verify_get_request_response.retry.statistics}", level=logging.WARNING)
def test_basic_api_inline_openapi(self):
"""
Creates an API with and inline OpenAPI and updates its DefinitionBody basePath
"""
self.create_and_verify_stack("single/basic_api_inline_openapi")
first_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(first_dep_ids), 1)
body = self.get_template_resource_property("MyApi", "DefinitionBody")
body["basePath"] = "/newDemo"
self.set_template_resource_property("MyApi", "DefinitionBody", body)
self.update_stack()
second_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(second_dep_ids), 1)
self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)
def test_basic_api_inline_swagger(self):
"""
Creates an API with an inline Swagger and updates its DefinitionBody basePath
"""
self.create_and_verify_stack("single/basic_api_inline_swagger")
first_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(first_dep_ids), 1)
body = self.get_template_resource_property("MyApi", "DefinitionBody")
body["basePath"] = "/newDemo"
self.set_template_resource_property("MyApi", "DefinitionBody", body)
self.update_stack()
second_dep_ids = self.get_stack_deployment_ids()
self.assertEqual(len(second_dep_ids), 1)
self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)
def test_basic_api_with_tags(self):
"""
Creates an API with tags
"""
self.create_and_verify_stack("single/basic_api_with_tags")
stages = self.get_api_stack_stages()
self.assertEqual(len(stages), 2)
stage = next(s for s in stages if s["stageName"] == "my-new-stage-name")
self.assertIsNotNone(stage)
self.assertEqual(stage["tags"]["TagKey1"], "TagValue1")
self.assertEqual(stage["tags"]["TagKey2"], "")
def test_state_machine_with_api_single_quotes_input(self):
"""
Pass single quotes in input JSON to a StateMachine
See https://github.com/aws/serverless-application-model/issues/1895
"""
self.create_and_verify_stack("single/state_machine_with_api")
stack_output = self.get_stack_outputs()
api_endpoint = stack_output.get("ApiEndpoint")
input_json = {"f'oo": {"hello": "'wor'l'd'''"}}
response = self.verify_post_request(api_endpoint, input_json, 200)
execution_arn = response.json()["executionArn"]
execution = self.client_provider.sfn_client.describe_execution(executionArn=execution_arn)
execution_input = json.loads(execution["input"])
self.assertEqual(execution_input, input_json)
|
4,240 |
test logistic mean
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.platform import test
class LogisticTest(test.TestCase):
def testReparameterizable(self):
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
self.assertTrue(
dist.reparameterization_type == distribution.FULLY_REPARAMETERIZED)
def testLogisticLogProb(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
dist = logistic.Logistic(loc, scale)
expected_log_prob = stats.logistic.logpdf(x, np_loc, scale)
log_prob = dist.log_prob(x)
self.assertEqual(log_prob.get_shape(), (6,))
self.assertAllClose(log_prob.eval(), expected_log_prob)
prob = dist.prob(x)
self.assertEqual(prob.get_shape(), (6,))
self.assertAllClose(prob.eval(), np.exp(expected_log_prob))
def testLogisticCDF(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
cdf = dist.cdf(x)
expected_cdf = stats.logistic.cdf(x, np_loc, scale)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testLogisticLogCDF(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
logcdf = dist.log_cdf(x)
expected_logcdf = stats.logistic.logcdf(x, np_loc, scale)
self.assertEqual(logcdf.get_shape(), (6,))
self.assertAllClose(logcdf.eval(), expected_logcdf)
def testLogisticSurvivalFunction(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
survival_function = dist.survival_function(x)
expected_survival_function = stats.logistic.sf(x, np_loc, scale)
self.assertEqual(survival_function.get_shape(), (6,))
self.assertAllClose(survival_function.eval(), expected_survival_function)
def testLogisticLogSurvivalFunction(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
logsurvival_function = dist.log_survival_function(x)
expected_logsurvival_function = stats.logistic.logsf(x, np_loc, scale)
self.assertEqual(logsurvival_function.get_shape(), (6,))
self.assertAllClose(logsurvival_function.eval(),
expected_logsurvival_function)
def METHOD_NAME(self):
with self.cached_session():
loc = [2.0, 1.5, 1.0]
scale = 1.5
expected_mean = stats.logistic.mean(loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.mean().eval(), expected_mean)
def testLogisticVariance(self):
with self.cached_session():
loc = [2.0, 1.5, 1.0]
scale = 1.5
expected_variance = stats.logistic.var(loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.variance().eval(), expected_variance)
def testLogisticEntropy(self):
with self.cached_session():
batch_size = 3
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
expected_entropy = stats.logistic.entropy(np_loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.entropy().eval(), expected_entropy)
def testLogisticSample(self):
with self.cached_session():
loc = [3.0, 4.0, 2.0]
scale = 1.0
dist = logistic.Logistic(loc, scale)
sample = dist.sample(seed=100)
self.assertEqual(sample.get_shape(), (3,))
self.assertAllClose(sample.eval(), [6.22460556, 3.79602098, 2.05084133])
def testDtype(self):
loc = constant_op.constant([0.1, 0.4], dtype=dtypes.float32)
scale = constant_op.constant(1.0, dtype=dtypes.float32)
dist = logistic.Logistic(loc, scale)
self.assertEqual(dist.dtype, dtypes.float32)
self.assertEqual(dist.loc.dtype, dist.scale.dtype)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.loc.dtype, dist.mean().dtype)
self.assertEqual(dist.loc.dtype, dist.variance().dtype)
self.assertEqual(dist.loc.dtype, dist.stddev().dtype)
self.assertEqual(dist.loc.dtype, dist.entropy().dtype)
self.assertEqual(dist.loc.dtype, dist.prob(0.2).dtype)
self.assertEqual(dist.loc.dtype, dist.log_prob(0.2).dtype)
loc = constant_op.constant([0.1, 0.4], dtype=dtypes.float64)
scale = constant_op.constant(1.0, dtype=dtypes.float64)
dist64 = logistic.Logistic(loc, scale)
self.assertEqual(dist64.dtype, dtypes.float64)
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
if __name__ == "__main__":
test.main()
|
4,241 |
json response from error
|
from typing import Any, Dict, Iterator, List, Mapping, Optional
import orjson
from django.http import HttpRequest, HttpResponse, HttpResponseNotAllowed
from zerver.lib.exceptions import JsonableError, UnauthorizedError
class MutableJsonResponse(HttpResponse):
def __init__(
self,
data: Dict[str, Any],
*,
content_type: str,
status: int,
) -> None:
# Mirror the behavior of Django's TemplateResponse and pass an
# empty string for the initial content value. Because that will
# set _needs_serialization to False, we initialize it to True
# after the call to super __init__.
super().__init__("", content_type=content_type, status=status)
self._data = data
self._needs_serialization = True
def get_data(self) -> Dict[str, Any]:
"""Get data for this MutableJsonResponse. Calling this method
after the response's content has already been serialized
will mean the next time the response's content is accessed
it will be reserialized because the caller may have mutated
the data."""
self._needs_serialization = True
return self._data
# This always returns bytes, but in Django's HttpResponse the return
# value can be bytes, an iterable of bytes or some other object. Any
# is used here to encompass all of those return values.
# See https://github.com/typeddjango/django-stubs/commit/799b41fe47cfe2e56be33eee8cfbaf89a9853a8e
# and https://github.com/python/mypy/issues/3004.
@property
def content(self) -> Any:
"""Get content for the response. If the content hasn't been
overridden by the property setter, it will be the response data
serialized lazily to JSON."""
if self._needs_serialization:
# Because we don't pass a default handler, OPT_PASSTHROUGH_DATETIME
# actually causes orjson to raise a TypeError on datetime objects. This
# helps us avoid relying on the particular serialization used by orjson.
self.content = orjson.dumps(
self._data,
option=orjson.OPT_APPEND_NEWLINE | orjson.OPT_PASSTHROUGH_DATETIME,
)
return super().content
# There are two ways this might be called. The first is in the getter when
# the response data is being serialized into JSON. The second is when it
# is called from some other part of the code. This happens for instance in
# the parent class constructor. In this case, the new content overrides the
# serialized JSON.
@content.setter
def content(self, value: Any) -> None:
"""Set the content for the response."""
assert isinstance(HttpResponse.content, property)
assert HttpResponse.content.fset is not None
HttpResponse.content.fset(self, value)
self._needs_serialization = False
# The superclass HttpResponse defines an iterator that doesn't access the content
# property, so in order to not break the implementation of the superclass with
# our lazy content generation, we override the iterator to access `self.content`
# through our getter.
def __iter__(self) -> Iterator[bytes]:
return iter([self.content])
def json_unauthorized(
message: Optional[str] = None, www_authenticate: Optional[str] = None
) -> HttpResponse:
return METHOD_NAME(
UnauthorizedError(msg=message, www_authenticate=www_authenticate)
)
def json_method_not_allowed(methods: List[str]) -> HttpResponseNotAllowed:
resp = HttpResponseNotAllowed(methods)
resp.content = orjson.dumps(
{"result": "error", "msg": "Method Not Allowed", "allowed_methods": methods}
)
return resp
def json_response(
res_type: str = "success", msg: str = "", data: Mapping[str, Any] = {}, status: int = 200
) -> MutableJsonResponse:
content = {"result": res_type, "msg": msg}
content.update(data)
return MutableJsonResponse(
data=content,
content_type="application/json",
status=status,
)
def json_success(request: HttpRequest, data: Mapping[str, Any] = {}) -> MutableJsonResponse:
return json_response(data=data)
def json_partial_success(request: HttpRequest, data: Mapping[str, Any] = {}) -> MutableJsonResponse:
return json_response(res_type="partially_completed", data=data, status=200)
def METHOD_NAME(exception: JsonableError) -> MutableJsonResponse:
"""
This should only be needed in middleware; in app code, just raise.
When app code raises a JsonableError, the JsonErrorHandler
middleware takes care of transforming it into a response by
calling this function.
"""
response_type = "error"
if 200 <= exception.http_status_code < 300:
response_type = "success"
response = json_response(
response_type, msg=exception.msg, data=exception.data, status=exception.http_status_code
)
for header, value in exception.extra_headers.items():
response[header] = value
return response
class AsynchronousResponse(HttpResponse):
"""
This response is just a sentinel to be discarded by Tornado and replaced
with a real response later; see zulip_finish.
"""
status_code = 399
|
4,242 |
set command entry
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import enum
from abc import ABC, abstractmethod
from nvflare.fuel.common.ctx import SimpleContext
from nvflare.fuel.hci.reg import CommandModule
from nvflare.fuel.hci.table import Table
class CommandCtxKey(object):
API = "api"
CMD = "cmd"
CMD_ENTRY = "cmd_entry"
CMD_ARGS = "cmd_args"
REPLY_PROCESSOR = "reply_processor"
RESULT = "result"
JSON_PROCESSOR = "json_processor"
META = "meta"
CUSTOM_PROPS = "custom_props"
class CommandContext(SimpleContext):
def set_command_result(self, result):
self.set_prop(CommandCtxKey.RESULT, result)
def get_command_result(self):
return self.get_prop(CommandCtxKey.RESULT)
def set_api(self, api):
self.set_prop(CommandCtxKey.API, api)
def get_api(self):
return self.get_prop(CommandCtxKey.API)
def set_command(self, command):
self.set_prop(CommandCtxKey.CMD, command)
def get_command(self):
return self.get_prop(CommandCtxKey.CMD)
def get_command_name(self):
args = self.get_command_args()
full_name = args[0]
parts = full_name.split(".")
return parts[-1]
def set_command_args(self, cmd_args):
self.set_prop(CommandCtxKey.CMD_ARGS, cmd_args)
def get_command_args(self):
return self.get_prop(CommandCtxKey.CMD_ARGS)
def METHOD_NAME(self, entry):
self.set_prop(CommandCtxKey.CMD_ENTRY, entry)
def get_command_entry(self):
return self.get_prop(CommandCtxKey.CMD_ENTRY)
def set_reply_processor(self, processor):
self.set_prop(CommandCtxKey.REPLY_PROCESSOR, processor)
def get_reply_processor(self):
return self.get_prop(CommandCtxKey.REPLY_PROCESSOR)
def set_json_processor(self, processor):
self.set_prop(CommandCtxKey.JSON_PROCESSOR, processor)
def get_json_processor(self):
return self.get_prop(CommandCtxKey.JSON_PROCESSOR)
def set_meta(self, meta):
self.set_prop(CommandCtxKey.META, meta)
def get_meta(self):
return self.get_prop(CommandCtxKey.META)
def set_custom_props(self, value):
self.set_prop(CommandCtxKey.CUSTOM_PROPS, value)
def get_custom_props(self):
return self.get_prop(CommandCtxKey.CUSTOM_PROPS)
class ApiPocValue(object):
ADMIN = "admin"
class CommandInfo(enum.Enum):
OK = 0
UNKNOWN = 1
AMBIGUOUS = 2
CONFIRM_PWD = 3
CONFIRM_YN = 4
CONFIRM_USER_NAME = 5
CONFIRM_AUTH = 6
class ReplyProcessor:
"""A base class for parsing server's response."""
def reply_start(self, ctx: CommandContext, reply_json):
pass
def process_string(self, ctx: CommandContext, item: str, meta: {}):
pass
def process_success(self, ctx: CommandContext, item: str):
pass
def process_error(self, ctx: CommandContext, err: str):
pass
def process_table(self, ctx: CommandContext, table: Table):
pass
def process_dict(self, ctx: CommandContext, data: dict):
pass
def process_shutdown(self, ctx: CommandContext, msg: str):
pass
def process_token(self, ctx: CommandContext, token: str):
pass
def protocol_error(self, ctx: CommandContext, err: str):
pass
def reply_done(self, ctx: CommandContext):
pass
class AdminAPISpec(ABC):
@abstractmethod
def is_ready(self) -> bool:
"""Whether the API is ready for executing commands."""
pass
@abstractmethod
def do_command(self, command: str):
"""Executes a command.
The command could be a client command or a server command.
Args:
command: The command to be executed.
"""
pass
@abstractmethod
def server_execute(self, command: str, reply_processor=None):
"""Executes a command on server side.
Args:
command: The command to be executed.
reply_processor: processor to process reply from server
"""
pass
@abstractmethod
def check_command(self, command: str) -> CommandInfo:
"""Checks the specified command for processing info.
The command could be a client command or a server command.
Args:
command: command to be checked
Returns: command processing info
"""
pass
def service_address_changed_cb_signature(host: str, port: int, ssid: str):
pass
class ServiceFinder(ABC):
@abstractmethod
def start(self, service_address_changed_cb):
pass
@abstractmethod
def stop(self):
pass
def set_secure_context(self, ca_cert_path: str, cert_path: str, private_key_path: str):
pass
def get_command_module(self) -> CommandModule:
pass
|
4,243 |
average hash and sizes
|
import os
import platform
import sys
from env_indigo import getPlatform, isIronPython, isJython
if sys.version_info > (3, 0):
from .bistring3 import BitString
else:
from .bistring1 import BitString
HASH_SIZE = 32
if isIronPython():
import math
elif isJython():
from java.awt import Image, RenderingHints
from java.awt.image import BufferedImage
from java.io import File
from java.lang import System
from javax.imageio import ImageIO
else:
try:
import Image
except ImportError:
from PIL import Image
class RenderingTestException(Exception):
pass
class ImageHash(object):
def __init__(self, path, size=HASH_SIZE):
self.image_path = path
self.hash_size = size
def getBitString(self, pixels):
avg = sum(pixels) / len(pixels)
diff = []
for pixel in pixels:
value = 1 if pixel > avg else 0
diff.append(str(value))
bits = BitString(bin="".join(diff))
return bits
def METHOD_NAME(self):
height = 0
width = 0
if isJython():
image = ImageIO.read(File(self.image_path))
height = image.getHeight()
width = image.getWidth()
newImage = BufferedImage(
self.hash_size, self.hash_size, BufferedImage.TYPE_INT_ARGB
)
g = newImage.createGraphics()
g.setRenderingHint(
RenderingHints.KEY_INTERPOLATION,
RenderingHints.VALUE_INTERPOLATION_BICUBIC,
)
g.setRenderingHint(
RenderingHints.KEY_RENDERING,
RenderingHints.VALUE_RENDER_QUALITY,
)
g.setRenderingHint(
RenderingHints.KEY_ANTIALIASING,
RenderingHints.VALUE_ANTIALIAS_ON,
)
g.drawImage(image, 0, 0, self.hash_size, self.hash_size, None)
g.dispose()
allchannelpixels = [[], [], [], []]
for i in range(self.hash_size):
for j in range(self.hash_size):
pixel = int(newImage.getRGB(i, j))
allchannelpixels[0].append((pixel >> 16) & 0xFF)
allchannelpixels[1].append((pixel >> 8) & 0xFF)
allchannelpixels[2].append((pixel) & 0xFF)
allchannelpixels[3].append((pixel >> 24) & 0xFF)
elif isIronPython():
allchannelpixels = [[1], [1], [1], [0]]
file_size = round(os.path.getsize(self.image_path))
width = round(math.sqrt(file_size))
height = round(math.sqrt(file_size))
else:
self.image = Image.open(self.image_path)
width, height = self.image.size
image = self.image.resize(
(self.hash_size, self.hash_size), Image.ANTIALIAS
)
# image.show()
allchannelpixels = [
list(channel.getdata()) for channel in image.split()
]
bits = []
for pixels in allchannelpixels:
bits.append(self.getBitString(pixels))
return bits, width, height
def imageDiff(imp1, imp2):
imh1, im1_width, im1_height = ImageHash(
imp1, HASH_SIZE
).METHOD_NAME()
imh2, im2_width, im2_height = ImageHash(
imp2, HASH_SIZE
).METHOD_NAME()
if (abs((float(im1_width) / float(im2_width)) - 1.0) > 0.2) or (
abs((float(im1_height) / float(im2_height)) - 1.0) > 0.2
):
raise RenderingTestException(
"Images have different sizes: %sx%s (ref) and %sx%s (out)"
% (im1_width, im1_height, im2_width, im2_height)
)
if len(imh1) != len(imh2):
raise RenderingTestException(
"Images have different channels count: %s (ref) and %s (out)"
% (len(imh1), len(imh2))
)
results = []
for i in range(len(imh1)):
results.append((imh1[i] ^ imh2[i]).bin.count("1"))
return results
# def checkSvgSimilarity(filename):
# def blankId(n):
# return 'id=""'
#
# def roundFloat(n):
# try:
# return str(int(round(float(n.group(0)))))
# except TypeError, e:
# return n.group(0)
#
# f = open('ref/%s' % filename)
# ref = f.read()
# f.close()
# f = open('out/%s' % filename)
# out = f.read()
# f.close()
#
# pattern = re.compile('([0-9]+\.[0-9]+)')
# ref = pattern.sub(roundFloat, ref)
# out = pattern.sub(roundFloat, out)
#
# pattern = re.compile('id=\"(.+)\"')
# ref = pattern.sub(blankId, ref)
# out = pattern.sub(blankId, out)
#
# value = difflib.SequenceMatcher(None, ref, out).ratio()
# if value >= 0.75:
# print '%s rendering status: OK' % filename
# else:
# print '%s rendering status: Problem: SVG similarity is %s' % (filename, round(value, 2))
def checkBitmapSimilarity(filename, ref_filename):
if ref_filename is None:
ref_filename = filename
try:
system = getPlatform()
if system != "mac" and system != "linux":
if os.name == "nt":
system = "win"
elif os.name == "posix":
if not platform.mac_ver()[0]:
system = "linux"
else:
system = "mac"
elif os.name == "java":
osName = System.getProperty("os.name")
if osName.find("Windows") != -1:
system = "win"
elif osName.find("Linux") != -1:
system = "linux"
elif osName.find("Mac OS") != -1:
system = "mac"
else:
raise RenderingTestException(
"No reference images for this operating system: {0}".format(
osName
)
)
else:
raise RenderingTestException(
"No reference images for this operating system: {0}".format(
os.name
)
)
dirname = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "..", "tests", "rendering"
)
)
)
results = imageDiff(
"%s/ref/%s/%s" % (dirname, system, ref_filename),
"%s/out/%s" % (dirname, filename),
)
except RenderingTestException as e:
return "%s rendering status: Problem: %s" % (filename, str(e))
channels = ["red", "green", "blue", "alpha"]
for i, result in enumerate(results):
if result > (HASH_SIZE**2) * 0.1:
return (
"%s rendering status: Problem: PNG similarity is %s for %s channel"
% (
filename,
round(1 - (result / float(HASH_SIZE**2)), 2),
channels[i],
)
)
return "%s rendering status: OK" % filename
def checkImageSimilarity(filename, ref_filename=None):
if filename.endswith(".svg"):
# checkSvgSimilarity(filename)
return ""
else:
return checkBitmapSimilarity(filename, ref_filename)
|
4,244 |
test has identites
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2023 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo <[email protected]>
# Valerio Cosentino <[email protected]>
#
import logging
import unittest
from base import TestBaseBackend
from grimoire_elk.enriched.utils import REPO_LABELS
class TestPuppetForge(TestBaseBackend):
"""Test PuppetForge backend"""
connector = "puppetforge"
ocean_index = "test_" + connector
enrich_index = "test_" + connector + "_enrich"
def METHOD_NAME(self):
"""Test value of has_identities method"""
enrich_backend = self.connectors[self.connector][2]()
self.assertTrue(enrich_backend.has_identities())
def test_items_to_raw(self):
"""Test whether JSON items are properly inserted into ES"""
result = self._test_items_to_raw()
self.assertEqual(result['items'], 3)
self.assertEqual(result['raw'], 3)
def test_raw_to_enrich(self):
"""Test whether the raw index is properly enriched"""
result = self._test_raw_to_enrich()
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
def test_enrich_repo_labels(self):
"""Test whether the field REPO_LABELS is present in the enriched items"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
self.assertIn(REPO_LABELS, eitem)
def test_raw_to_enrich_sorting_hat(self):
"""Test enrich with SortingHat"""
result = self._test_raw_to_enrich(sortinghat=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
enrich_backend = self.connectors[self.connector][2]()
url = self.es_con + "/" + self.enrich_index + "/_search"
response = enrich_backend.requests.get(url, verify=False).json()
for hit in response['hits']['hits']:
source = hit['_source']
if 'author_uuid' in source:
self.assertIn('author_domain', source)
self.assertIn('author_gender', source)
self.assertIn('author_gender_acc', source)
self.assertIn('author_org_name', source)
self.assertIn('author_bot', source)
self.assertIn('author_multi_org_names', source)
def test_raw_to_enrich_projects(self):
"""Test enrich with Projects"""
result = self._test_raw_to_enrich(projects=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
def test_copy_raw_fields(self):
"""Test copied raw fields"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
for attribute in enrich_backend.RAW_FIELDS_COPY:
if attribute in item:
self.assertEqual(item[attribute], eitem[attribute])
else:
self.assertIsNone(eitem[attribute])
def test_refresh_identities(self):
"""Test refresh identities"""
result = self._test_refresh_identities()
# ... ?
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
unittest.main(warnings='ignore')
|
4,245 |
location
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTopicAuthorizationRuleResult',
'AwaitableGetTopicAuthorizationRuleResult',
'get_topic_authorization_rule',
'get_topic_authorization_rule_output',
]
@pulumi.output_type
class GetTopicAuthorizationRuleResult:
"""
Description of a namespace authorization rule.
"""
def __init__(__self__, id=None, METHOD_NAME=None, name=None, rights=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rights and not isinstance(rights, list):
raise TypeError("Expected argument 'rights' to be a list")
pulumi.set(__self__, "rights", rights)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rights(self) -> Sequence[str]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs"
"""
return pulumi.get(self, "type")
class AwaitableGetTopicAuthorizationRuleResult(GetTopicAuthorizationRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTopicAuthorizationRuleResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
rights=self.rights,
system_data=self.system_data,
type=self.type)
def get_topic_authorization_rule(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTopicAuthorizationRuleResult:
"""
Returns the specified authorization rule.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20221001preview:getTopicAuthorizationRule', __args__, opts=opts, typ=GetTopicAuthorizationRuleResult).value
return AwaitableGetTopicAuthorizationRuleResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
rights=pulumi.get(__ret__, 'rights'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_topic_authorization_rule)
def get_topic_authorization_rule_output(authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTopicAuthorizationRuleResult]:
"""
Returns the specified authorization rule.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
"""
...
|
4,246 |
check max ber size
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2022 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
import logging
import pytest
from lib389.tasks import *
from lib389.topologies import topology_st
from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD
pytestmark = pytest.mark.tier2
log = logging.getLogger(__name__)
MYSUFFIX = 'dc=example,dc=com'
MYSUFFIXBE = 'userRoot'
def getMaxBerSizeFromDseLdif(topology_st):
topology_st.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n")
dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif
topology_st.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD)
grepMaxBerOUT = os.popen(grepMaxBerCMD, "r")
running = True
maxbersize = -1
while running:
l = grepMaxBerOUT.readline()
if l == "":
topology_st.standalone.log.info(" Empty: %s\n" % l)
running = False
elif "nsslapd-maxbersize:" in l.lower():
running = False
fields = l.split()
if len(fields) >= 2:
maxbersize = fields[1]
topology_st.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1]))
else:
topology_st.standalone.log.info(" Wrong format - %s\n" % l)
else:
topology_st.standalone.log.info(" Else?: %s\n" % l)
return maxbersize
def METHOD_NAME(topology_st):
topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n")
maxbersizestr = getMaxBerSizeFromDseLdif(topology_st)
maxbersize = int(maxbersizestr)
isdefault = True
defaultvalue = 2097152
if maxbersize < 0:
topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n")
elif maxbersize == 0:
topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
else:
isdefault = False
topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
try:
entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE,
"(cn=*)",
['nsslapd-maxbersize'])
if entry:
searchedsize = entry[0].getValue('nsslapd-maxbersize')
topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize)
else:
topology_st.standalone.log.fatal('ERROR: cn=config is not found?')
assert False
except ldap.LDAPError as e:
topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc'])
assert False
if isdefault:
topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue))
assert int(searchedsize) == defaultvalue
def test_ticket48214_run(topology_st):
"""
Check ldapsearch returns the correct maxbersize when it is not explicitly set.
"""
log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value')
# bind as directory manager
topology_st.standalone.log.info("Bind as %s" % DN_DM)
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n")
METHOD_NAME(topology_st)
topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n")
topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'0')])
METHOD_NAME(topology_st)
topology_st.standalone.log.info(
"\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n")
topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'10000')])
METHOD_NAME(topology_st)
topology_st.standalone.log.info("ticket48214 was successfully verified.")
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
|
4,247 |
read topographic data
|
############################################################
# Program is part of MintPy #
# Copyright (c) 2013, Zhang Yunjun, Heresh Fattahi #
# Author: Zhang Yunjun, Heresh Fattahi, 2013 #
############################################################
import os
import numpy as np
from mintpy.mask import mask_matrix
from mintpy.multilook import multilook_data
from mintpy.objects import timeseries
from mintpy.utils import readfile, writefile
############################################################################
def design_matrix(dem, poly_order=1):
"""Design matrix for phase/elevation ratio estimation
Parameters: dem : 1D array in size of (length*width, ), or
2D array in size of (length, width)
poly_order : int
Returns: A : 2D array in size of (length*width, poly_order+1)
"""
dem = np.reshape(dem, (-1, 1))
A = np.ones((dem.size, 1), np.float64)
for i in range(poly_order):
Ai = np.array(dem**(i+1), np.float64)
A = np.hstack((A, Ai))
return A
def METHOD_NAME(geom_file, meta):
print('read height & incidenceAngle from file: '+geom_file)
dem = readfile.read(geom_file, datasetName='height', print_msg=False)[0]
inc_angle = readfile.read(geom_file, datasetName='incidenceAngle', print_msg=False)[0]
dem *= 1.0/np.cos(inc_angle*np.pi/180.0)
ref_y = int(meta['REF_Y'])
ref_x = int(meta['REF_X'])
dem -= dem[ref_y, ref_x]
# Design matrix for elevation v.s. phase
# dem = dem.flatten()
return dem
def estimate_phase_elevation_ratio(dem, ts_data, inps):
"""Estimate phase/elevation ratio for each acquisition of timeseries
Parameters: dem : 2D array in size of ( length, width)
ts_data : 3D array in size of (num_date, length, width)
inps : Namespace
Returns: X : 2D array in size of (poly_num+1, num_date)
"""
num_date = ts_data.shape[0]
# prepare phase and elevation data
print('reading mask from file: '+inps.mask_file)
mask = readfile.read(inps.mask_file, datasetName='mask')[0]
dem = mask_matrix(np.array(dem), mask)
ts_data = mask_matrix(np.array(ts_data), mask)
# display
# 1. effect of multilooking --> narrow phase range --> better ratio estimation
debug_mode = False
if debug_mode:
import matplotlib.pyplot as plt
d_index = 47 # np.argmax(topo_trop_corr)
data = ts_data[d_index, :, :]
title = inps.date_list[d_index]
plt.figure()
plt.plot(dem[~np.isnan(dem)],
data[~np.isnan(dem)],
'.', label='Number of Looks = 1')
mli_dem = multilook_data(dem, 8, 8)
mli_data = multilook_data(data, 8, 8)
plt.plot(mli_dem[~np.isnan(mli_dem)],
mli_data[~np.isnan(mli_dem)],
'.', label='Number of Looks = 8')
plt.legend()
plt.xlabel('Elevation (m)')
plt.ylabel('Range Change (m)')
plt.title(title)
out_file = f'phase_elevation_ratio_{title}.png'
plt.savefig(out_file, bbox_inches='tight', transparent=True, dpi=300)
print(f'save to {out_file}')
plt.show()
print('----------------------------------------------------------')
print('Empirical tropospheric delay correction based on phase/elevation ratio (Doin et al., 2009)')
print(f'polynomial order: {inps.poly_order}')
if inps.num_multilook > 1:
print(f'number of multilook: {inps.num_multilook} (multilook data for estimation only)')
mask = multilook_data(mask, inps.num_multilook, inps.num_multilook)
dem = multilook_data(dem, inps.num_multilook, inps.num_multilook)
ts_data = multilook_data(ts_data, inps.num_multilook, inps.num_multilook)
if inps.threshold > 0.:
print(f'correlation threshold: {inps.threshold}')
mask_nan = ~np.isnan(dem)
dem = dem[mask_nan]
ts_data = ts_data[:, mask_nan]
# calculate correlation coefficient
print('----------------------------------------------------------')
print('calculate correlation of DEM with each acquisition')
topo_trop_corr = np.zeros(num_date, np.float32)
for i in range(num_date):
phase = ts_data[i, :]
cc = 0.
if np.count_nonzero(phase) > 0:
comp_data = np.vstack((dem, phase))
cc = np.corrcoef(comp_data)[0, 1]
topo_trop_corr[i] = cc
print(f'{inps.date_list[i]}: {cc:>5.2f}')
topo_trop_corr = np.abs(topo_trop_corr)
print(f'average correlation magnitude: {np.nanmean(topo_trop_corr):>5.2f}')
# estimate ratio parameter
print('----------------------------------------------------------')
print('estimate phase/elevation ratio')
A = design_matrix(dem=dem, poly_order=inps.poly_order)
X = np.dot(np.linalg.pinv(A), ts_data.T)
X = np.array(X, dtype=np.float32)
X[:, topo_trop_corr < inps.threshold] = 0.
return X
def estimate_tropospheric_delay(dem, X, metadata):
poly_order = X.shape[0]-1
num_date = X.shape[1]
length, width = dem.shape
print('estimate the stratified tropospheric delay')
B = design_matrix(dem=dem, poly_order=poly_order)
trop_data = np.array(np.dot(B, X).T, dtype=np.float32)
ref_index = int(metadata['REF_Y']) * width + int(metadata['REF_X'])
ref_value = trop_data[:, ref_index].reshape(-1, 1)
trop_data -= np.tile(ref_value, (1, length*width))
trop_data = np.reshape(trop_data, (num_date, length, width))
return trop_data
############################################################################
def run_tropo_phase_elevation(inps):
# read time-series data
ts_obj = timeseries(inps.timeseries_file)
ts_obj.open()
ts_data = ts_obj.read()
inps.date_list = list(ts_obj.dateList)
# read topographic data (DEM)
dem = METHOD_NAME(inps.geom_file, ts_obj.metadata)
# estimate tropo delay
X = estimate_phase_elevation_ratio(dem, ts_data, inps)
trop_data = estimate_tropospheric_delay(dem, X, ts_obj.metadata)
# correct for trop delay
mask = ts_data == 0.
ts_data -= trop_data
ts_data[mask] = 0.
# write corrected time-series file
meta = dict(ts_obj.metadata)
meta['mintpy.troposphericDelay.polyOrder'] = str(inps.poly_order)
if not inps.outfile:
fbase = os.path.splitext(inps.timeseries_file)[0]
inps.outfile = f'{fbase}_tropHgt.h5'
writefile.write(
ts_data,
out_file=inps.outfile,
metadata=meta,
ref_file=inps.timeseries_file,
)
return
|
4,248 |
serve
|
from typing import TYPE_CHECKING, Optional
from django import shortcuts
from django.core import paginator
from django.db import models
from modelcluster.fields import ParentalKey
from wagtail import models as wagtail_models
from wagtail.admin.panels import FieldPanel, InlinePanel
from wagtail.contrib.routable_page import models as routable_models
from wagtail.models import Orderable, TranslatableMixin
from wagtail_localize.fields import SynchronizedField, TranslatableField
from networkapi.utility import orderables
from networkapi.wagtailpages.pagemodels.base import BasePage
from networkapi.wagtailpages.pagemodels.buyersguide.utils import (
get_buyersguide_featured_cta,
get_categories_for_locale,
)
from networkapi.wagtailpages.utils import get_language_from_request
if TYPE_CHECKING:
from django import http
from networkapi.wagtailpages import models as pagemodels
class BuyersGuideEditorialContentIndexPage(
routable_models.RoutablePageMixin,
BasePage,
):
parent_page_types = ["wagtailpages.BuyersGuidePage"]
subpage_types = [
"wagtailpages.BuyersGuideArticlePage",
"wagtailpages.BuyersGuideCampaignPage",
]
template = "pages/buyersguide/editorial_content_index_page.html"
content_panels = wagtail_models.Page.content_panels + [
InlinePanel(
"related_article_relations",
heading="Popular articles",
label="Article",
max_num=3,
),
]
items_per_page: int = 10
translatable_fields = [
# Content tab fields
TranslatableField("title"),
SynchronizedField("related_article_relations"),
# Promote tab fields
SynchronizedField("slug"),
TranslatableField("seo_title"),
SynchronizedField("show_in_menus"),
TranslatableField("search_description"),
SynchronizedField("search_image"),
]
def METHOD_NAME(self, request: "http.HttpRequest", *args, **kwargs) -> "http.HttpResponse":
if request.htmx:
# This is an HTMX request and we are only interested in the items list.
items = self.get_items()
paginated_items = self.paginate_items(
items=items,
page=request.GET.get("page"),
)
return self.render_items(request=request, items=paginated_items)
return super().METHOD_NAME(request, *args, **kwargs)
def render_items(
self,
request: "http.HttpRequest",
items: "models.QuerySet[pagemodels.BuyersGuideArticlePage]",
) -> "http.HttpResponse":
"""
Method to return only the content index items.
This method does not return a full page, but only an HTML fragment of list
items that is meant to be requested with AJAX and used to extend an existing
list of items.
Because this method is only meant for AJAX requests, we can also assume that JS works
and thus show the 'load more' button immediately.
"""
return shortcuts.render(
request=request,
template_name="fragments/buyersguide/editorial_content_index_items.html",
context={
"index_page": self,
"items": items,
"show_load_more_button_immediately": True,
},
)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
context["home_page"] = self.get_parent().specific
context["featured_cta"] = get_buyersguide_featured_cta(self)
language_code = get_language_from_request(request)
context["categories"] = get_categories_for_locale(language_code)
items = self.get_items()
context["items"] = self.paginate_items(
items=items,
page=request.GET.get("page"),
expanded=request.GET.get("expanded", "false") == "true",
)
return context
def paginate_items(
self,
items: "models.QuerySet[pagemodels.BuyersGuideArticlePage]",
page: "Optional[str]" = None,
expanded: bool = False,
) -> "paginator.Page[pagemodels.BuyersGuideArticlePage]":
"""
Pagingate the given items.
Return only the requested page of items. The number of items per page is
defined by `self.items_per_page`.
The page can be expanded. This means the page will include the items from
all previous pages as well. It does not include items from following pages.
"""
items_paginator = paginator.Paginator(
object_list=items,
per_page=self.items_per_page,
)
page_of_items = items_paginator.get_page(page)
if not expanded:
return page_of_items
# Override the object_list on the page with the full object list, but trimmed to the last index
# that the page would display.
index_of_last_item_on_page = page_of_items.end_index()
page_of_items.object_list = items_paginator.object_list[:index_of_last_item_on_page]
# The page is expanded, so there should be no previous page. All items are already on the page.
page_of_items.has_previous = lambda: False
return page_of_items
def get_items(self) -> "models.QuerySet[pagemodels.BuyersGuideArticlePage]":
"""Get items to list in the index."""
return self.get_descendants().order_by("-first_published_at").public().live().specific()
def get_related_articles(self) -> list["pagemodels.BuyersGuideArticlePage"]:
return orderables.get_related_items(
self.related_article_relations.all(),
"article",
)
class BuyersGuideEditorialContentIndexPageArticlePageRelation(TranslatableMixin, Orderable):
page = ParentalKey(
"wagtailpages.BuyersGuideEditorialContentIndexPage",
related_name="related_article_relations",
)
article = models.ForeignKey(
"wagtailpages.BuyersGuideArticlePage",
on_delete=wagtail_models.models.CASCADE,
null=False,
blank=False,
)
panels = [FieldPanel("article")]
def __str__(self):
return f"{self.category.name} -> {self.article.title}"
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
|
4,249 |
supports device
|
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0613
from collections import namedtuple
from typing import Any, Dict, NewType, Optional, Sequence, Tuple, Type
import numpy
import onnx.checker
import onnx.onnx_cpp2py_export.checker as c_checker
from onnx import IR_VERSION, ModelProto, NodeProto
class DeviceType:
"""
Describes device type.
"""
_Type = NewType("_Type", int)
CPU: _Type = _Type(0)
CUDA: _Type = _Type(1)
class Device:
"""
Describes device type and device id
syntax: device_type:device_id(optional)
example: 'CPU', 'CUDA', 'CUDA:1'
"""
def __init__(self, device: str) -> None:
options = device.split(":")
self.type = getattr(DeviceType, options[0])
self.device_id = 0
if len(options) > 1:
self.device_id = int(options[1])
def namedtupledict(
typename: str, field_names: Sequence[str], *args: Any, **kwargs: Any
) -> Type[Tuple[Any, ...]]:
field_names_map = {n: i for i, n in enumerate(field_names)}
# Some output names are invalid python identifier, e.g. "0"
kwargs.setdefault("rename", True)
data = namedtuple(typename, field_names, *args, **kwargs) # type: ignore
def getitem(self: Any, key: Any) -> Any:
if isinstance(key, str):
key = field_names_map[key]
return super(type(self), self).__getitem__(key) # type: ignore
data.__getitem__ = getitem # type: ignore[assignment]
return data
class BackendRep:
"""
BackendRep is the handle that a Backend returns after preparing to execute
a model repeatedly. Users will then pass inputs to the run function of
BackendRep to retrieve the corresponding results.
"""
def run(self, inputs: Any, **kwargs: Any) -> Tuple[Any, ...]:
"""Abstract function."""
return (None,)
class Backend:
"""
Backend is the entity that will take an ONNX model with inputs,
perform a computation, and then return the output.
For one-off execution, users can use run_node and run_model to obtain results quickly.
For repeated execution, users should use prepare, in which the Backend
does all of the preparation work for executing the model repeatedly
(e.g., loading initializers), and returns a BackendRep handle.
"""
@classmethod
def is_compatible(
cls, model: ModelProto, device: str = "CPU", **kwargs: Any
) -> bool:
# Return whether the model is compatible with the backend.
return True
@classmethod
def prepare(
cls, model: ModelProto, device: str = "CPU", **kwargs: Any
) -> Optional[BackendRep]:
# TODO Remove Optional from return type
onnx.checker.check_model(model)
return None
@classmethod
def run_model(
cls, model: ModelProto, inputs: Any, device: str = "CPU", **kwargs: Any
) -> Tuple[Any, ...]:
backend = cls.prepare(model, device, **kwargs)
assert backend is not None
return backend.run(inputs)
@classmethod
def run_node(
cls,
node: NodeProto,
inputs: Any,
device: str = "CPU",
outputs_info: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]] = None,
**kwargs: Dict[str, Any],
) -> Optional[Tuple[Any, ...]]:
"""Simple run one operator and return the results.
Args:
outputs_info: a list of tuples, which contains the element type and
shape of each output. First element of the tuple is the dtype, and
the second element is the shape. More use case can be found in
https://github.com/onnx/onnx/blob/main/onnx/backend/test/runner/__init__.py
"""
# TODO Remove Optional from return type
if "opset_version" in kwargs:
special_context = c_checker.CheckerContext()
special_context.ir_version = IR_VERSION
special_context.opset_imports = {"": kwargs["opset_version"]} # type: ignore
onnx.checker.check_node(node, special_context)
else:
onnx.checker.check_node(node)
return None
@classmethod
def METHOD_NAME(cls, device: str) -> bool:
"""
Checks whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
return True
|
4,250 |
freeze field value
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""The `BuildFileDefaultsParserState.set_defaults` is used by the pants.engine.internals.Parser,
exposed as the `__defaults__` BUILD file symbol.
When parsing a BUILD (from the rule `pants.engine.internals.build_files.parse_address_family`) the
defaults from the closest parent BUILD file is passed as input to the parser, and the new defaults
resulting after the BUILD file have been parsed is returned in the `AddressFamily`.
These defaults are then applied when creating the `TargetAdaptor` targets by the `Registrar` in the
parser.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Iterable, Mapping, Tuple, Union
from pants.engine.addresses import Address
from pants.engine.internals.parametrize import Parametrize
from pants.engine.target import (
Field,
ImmutableValue,
InvalidFieldException,
RegisteredTargetTypes,
Target,
TargetGenerator,
)
from pants.engine.unions import UnionMembership
from pants.util.frozendict import FrozenDict
SetDefaultsValueT = Mapping[str, Any]
SetDefaultsKeyT = Union[str, Tuple[str, ...]]
SetDefaultsT = Mapping[SetDefaultsKeyT, SetDefaultsValueT]
class BuildFileDefaults(FrozenDict[str, FrozenDict[str, ImmutableValue]]):
"""Map target types to default field values."""
class ParametrizeDefault(Parametrize):
"""Parametrize for default field values.
This is to have eager validation on the field values rather than erroring first when applied on
an actual target.
"""
@classmethod
def create(
cls, freeze: Callable[[Any], ImmutableValue], parametrize: Parametrize
) -> ParametrizeDefault:
return cls(
*map(freeze, parametrize.args),
**{kw: freeze(arg) for kw, arg in parametrize.kwargs.items()},
)
@dataclass
class BuildFileDefaultsParserState:
address: Address
defaults: dict[str, Mapping[str, Any]]
registered_target_types: RegisteredTargetTypes
union_membership: UnionMembership
@classmethod
def create(
cls,
path: str,
defaults: BuildFileDefaults,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
) -> BuildFileDefaultsParserState:
return cls(
address=Address(path, generated_name="__defaults__"),
defaults=dict(defaults),
registered_target_types=registered_target_types,
union_membership=union_membership,
)
def METHOD_NAME(self, field_type: type[Field], value: Any) -> ImmutableValue:
if isinstance(value, ParametrizeDefault):
return value
elif isinstance(value, Parametrize):
def freeze(v: Any) -> ImmutableValue:
return self.METHOD_NAME(field_type, v)
return ParametrizeDefault.create(freeze, value)
else:
return field_type.compute_value(raw_value=value, address=self.address)
def get_frozen_defaults(self) -> BuildFileDefaults:
types = self.registered_target_types.aliases_to_types
return BuildFileDefaults(
{
target_alias: FrozenDict(
{
field_type.alias: self.METHOD_NAME(field_type, default)
for field_alias, default in fields.items()
for field_type in self._target_type_field_types(types[target_alias])
if field_alias in (field_type.alias, field_type.deprecated_alias)
}
)
for target_alias, fields in self.defaults.items()
}
)
def get(self, target_alias: str) -> Mapping[str, Any]:
# Used by `pants.engine.internals.parser.Parser._generate_symbols.Registrar.__call__`
return self.defaults.get(target_alias, {})
def set_defaults(
self,
*args: SetDefaultsT,
all: SetDefaultsValueT | None = None,
extend: bool = False,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
) -> None:
defaults: dict[str, dict[str, Any]] = (
{} if not extend else {k: dict(v) for k, v in self.defaults.items()}
)
if all is not None:
self._process_defaults(
defaults,
{tuple(self.registered_target_types.aliases): all},
ignore_unknown_fields=True,
ignore_unknown_targets=ignore_unknown_targets,
)
for arg in args:
self._process_defaults(
defaults,
arg,
ignore_unknown_fields=ignore_unknown_fields,
ignore_unknown_targets=ignore_unknown_targets,
)
# Update with new defaults, dropping targets without any default values.
for tgt, default in defaults.items():
if not default:
self.defaults.pop(tgt, None)
else:
self.defaults[tgt] = default
def _target_type_field_types(self, target_type: type[Target]) -> tuple[type[Field], ...]:
return (
*target_type.class_field_types(self.union_membership),
*(target_type.moved_fields if issubclass(target_type, TargetGenerator) else ()),
)
def _process_defaults(
self,
defaults: dict[str, dict[str, Any]],
targets_defaults: SetDefaultsT,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
):
if not isinstance(targets_defaults, dict):
raise ValueError(
f"Expected dictionary mapping targets to default field values for {self.address} "
f"but got: {type(targets_defaults).__name__}."
)
types = self.registered_target_types.aliases_to_types
for target, default in targets_defaults.items():
if not isinstance(default, dict):
raise ValueError(
f"Invalid default field values in {self.address} for target type {target}, "
f"must be an `dict` but was {default!r} with type `{type(default).__name__}`."
)
targets: Iterable[str]
targets = target if isinstance(target, tuple) else (target,)
for target_alias in map(str, targets):
if target_alias in types:
target_type = types[target_alias]
elif ignore_unknown_targets:
continue
else:
raise ValueError(f"Unrecognized target type {target_alias} in {self.address}.")
# Copy default dict if we may mutate it.
raw_values = dict(default) if ignore_unknown_fields else default
# Validate that field exists on target
valid_field_aliases = set(
target_type._get_field_aliases_to_field_types(
self._target_type_field_types(target_type)
).keys()
)
for field_alias in default.keys():
if field_alias not in valid_field_aliases:
if ignore_unknown_fields:
del raw_values[field_alias]
else:
raise InvalidFieldException(
f"Unrecognized field `{field_alias}` for target {target_type.alias}. "
f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.",
)
# Merge all provided defaults for this call.
defaults.setdefault(target_type.alias, {}).update(raw_values)
|
4,251 |
tgs to webm
|
# mautrix-telegram - A Matrix-Telegram puppeting bridge
# Telegram lottie sticker converter
# Copyright (C) 2019 Randall Eramde Lawrence
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Any, Awaitable, Callable
import asyncio.subprocess
import logging
import os
import os.path
import shutil
import tempfile
from attr import dataclass
from mautrix.util import ffmpeg
log: logging.Logger = logging.getLogger("mau.util.tgs")
@dataclass
class ConvertedSticker:
mime: str
data: bytes
thumbnail_mime: str | None = None
thumbnail_data: bytes | None = None
width: int = 0
height: int = 0
Converter = Callable[[bytes, int, int, Any], Awaitable[ConvertedSticker]]
converters: dict[str, Converter] = {}
def abswhich(program: str | None) -> str | None:
path = shutil.which(program)
return os.path.abspath(path) if path else None
lottieconverter = abswhich("lottieconverter")
async def _run_lottieconverter(args: tuple[str, ...], input_data: bytes) -> bytes:
proc = await asyncio.create_subprocess_exec(
lottieconverter,
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate(input_data)
if proc.returncode == 0:
return stdout
else:
err_text = stderr.decode("utf-8") if stderr else f"unknown ({proc.returncode})"
raise ffmpeg.ConverterError(f"lottieconverter error: {err_text}")
if lottieconverter:
async def tgs_to_png(file: bytes, width: int, height: int, **_: Any) -> ConvertedSticker:
frame = 1
try:
converted_png = await _run_lottieconverter(
args=("-", "-", "png", f"{width}x{height}", str(frame)),
input_data=file,
)
return ConvertedSticker("image/png", converted_png)
except ffmpeg.ConverterError as e:
log.error(str(e))
return ConvertedSticker("application/gzip", file)
async def tgs_to_gif(
file: bytes, width: int, height: int, fps: int = 25, **_: Any
) -> ConvertedSticker:
try:
converted_gif = await _run_lottieconverter(
args=("-", "-", "gif", f"{width}x{height}", str(fps)),
input_data=file,
)
return ConvertedSticker("image/gif", converted_gif)
except ffmpeg.ConverterError as e:
log.error(str(e))
return ConvertedSticker("application/gzip", file)
converters["png"] = tgs_to_png
converters["gif"] = tgs_to_gif
if lottieconverter and ffmpeg.ffmpeg_path:
async def METHOD_NAME(
file: bytes, width: int, height: int, fps: int = 30, **_: Any
) -> ConvertedSticker:
with tempfile.TemporaryDirectory(prefix="tgs_") as tmpdir:
file_template = tmpdir + "/out_"
try:
await _run_lottieconverter(
args=("-", file_template, "pngs", f"{width}x{height}", str(fps)),
input_data=file,
)
first_frame_name = min(os.listdir(tmpdir))
with open(f"{tmpdir}/{first_frame_name}", "rb") as first_frame_file:
first_frame_data = first_frame_file.read()
webm_data = await ffmpeg.convert_path(
input_args=("-framerate", str(fps), "-pattern_type", "glob"),
input_file=f"{file_template}*.png",
output_args=("-c:v", "libvpx-vp9", "-pix_fmt", "yuva420p", "-f", "webm"),
output_path_override="-",
output_extension=None,
)
return ConvertedSticker("video/webm", webm_data, "image/png", first_frame_data)
except ffmpeg.ConverterError as e:
log.error(str(e))
return ConvertedSticker("application/gzip", file)
async def tgs_to_webp(
file: bytes, width: int, height: int, fps: int = 30, **_: Any
) -> ConvertedSticker:
with tempfile.TemporaryDirectory(prefix="tgs_") as tmpdir:
file_template = tmpdir + "/out_"
try:
await _run_lottieconverter(
args=("-", file_template, "pngs", f"{width}x{height}", str(fps)),
input_data=file,
)
first_frame_name = min(os.listdir(tmpdir))
with open(f"{tmpdir}/{first_frame_name}", "rb") as first_frame_file:
first_frame_data = first_frame_file.read()
webp_data = await ffmpeg.convert_path(
input_args=("-framerate", str(fps), "-pattern_type", "glob"),
input_file=f"{file_template}*.png",
output_args=("-c:v", "libwebp_anim", "-pix_fmt", "yuva420p", "-f", "webp"),
output_path_override="-",
output_extension=None,
)
return ConvertedSticker("image/webp", webp_data, "image/png", first_frame_data)
except ffmpeg.ConverterError as e:
log.error(str(e))
return ConvertedSticker("application/gzip", file)
converters["webm"] = METHOD_NAME
converters["webp"] = tgs_to_webp
async def convert_tgs_to(
file: bytes, convert_to: str, width: int, height: int, **kwargs: Any
) -> ConvertedSticker:
if convert_to in converters:
converter = converters[convert_to]
converted = await converter(file, width, height, **kwargs)
converted.width = width
converted.height = height
return converted
elif convert_to != "disable":
log.warning(f"Unable to convert animated sticker, type {convert_to} not supported")
return ConvertedSticker("application/gzip", file)
|
4,252 |
tear down
|
import sys
from importlib import reload, import_module
from django.contrib.auth.models import User
from django.http import JsonResponse, HttpResponse
from django.urls import reverse, clear_url_caches
from django.test import override_settings
from django.conf import settings
from tethys_apps.base.testing.testing import TethysTestCase
class TethysPortalApiTests(TethysTestCase):
def reload_urlconf(self, urlconf=None):
clear_url_caches()
if urlconf is None:
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
def set_up(self):
self.user = User.objects.create_user(username="foo")
self.user.save()
pass
@override_settings(PREFIX_URL="/")
def METHOD_NAME(self):
self.user.delete()
self.reload_urlconf()
pass
def test_get_csrf_not_authenticated(self):
"""Test get_csrf API endpoint not authenticated."""
response = self.client.get(reverse("api:get_csrf"))
self.assertEqual(response.status_code, 401)
def test_get_csrf_authenticated(self):
"""Test get_csrf API endpoint authenticated."""
self.client.force_login(self.user)
response = self.client.get(reverse("api:get_csrf"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, HttpResponse)
self.assertIn("X-CSRFToken", response.headers)
def test_get_session_not_authenticated(self):
"""Test get_session API endpoint not authenticated."""
response = self.client.get(reverse("api:get_session"))
self.assertEqual(response.status_code, 401)
def test_get_session_authenticated(self):
"""Test get_session API endpoint authenticated."""
self.client.force_login(self.user)
response = self.client.get(reverse("api:get_session"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
# self.assertIn('Set-Cookie', response.headers)
json = response.json()
self.assertIn("isAuthenticated", json)
self.assertTrue(json["isAuthenticated"])
def test_get_whoami_not_authenticated(self):
"""Test get_whoami API endpoint not authenticated."""
response = self.client.get(reverse("api:get_whoami"))
self.assertEqual(response.status_code, 401)
def test_get_whoami_authenticated(self):
"""Test get_whoami API endpoint authenticated."""
self.client.force_login(self.user)
response = self.client.get(reverse("api:get_whoami"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
json = response.json()
self.assertIn("username", json)
self.assertIn("firstName", json)
self.assertIn("lastName", json)
self.assertIn("email", json)
self.assertIn("isAuthenticated", json)
self.assertIn("isStaff", json)
self.assertEqual("foo", json["username"])
self.assertTrue(json["isAuthenticated"])
@override_settings(STATIC_URL="/static")
@override_settings(PREFIX_URL="/")
@override_settings(LOGIN_URL="/accounts/login/")
def test_get_app_valid_id(self):
self.reload_urlconf()
"""Test get_app API endpoint with valid app id."""
response = self.client.get(reverse("api:get_app", kwargs={"app": "test-app"}))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
json = response.json()
self.assertIn("title", json)
self.assertIn("description", json)
self.assertIn("tags", json)
self.assertIn("package", json)
self.assertIn("urlNamespace", json)
self.assertIn("color", json)
self.assertIn("icon", json)
self.assertIn("exitUrl", json)
self.assertIn("rootUrl", json)
self.assertIn("settingsUrl", json)
self.assertEqual("Test App", json["title"])
self.assertEqual(
"Place a brief description of your app here.", json["description"]
)
self.assertEqual("", json["tags"])
self.assertEqual("test_app", json["package"])
self.assertEqual("test_app", json["urlNamespace"])
self.assertEqual("#2c3e50", json["color"])
self.assertEqual("/static/test_app/images/icon.gif", json["icon"])
self.assertEqual("/apps/", json["exitUrl"])
self.assertEqual("/apps/test-app/", json["rootUrl"])
self.assertRegex(
json["settingsUrl"],
r"^/admin/tethys_apps/tethysapp/[0-9]+/change/$",
)
@override_settings(PREFIX_URL="test/prefix")
@override_settings(LOGIN_URL="/test/prefix/test/login/")
@override_settings(STATIC_URL="/test/prefix/test/static/")
def test_get_app_valid_id_with_prefix(self):
self.reload_urlconf()
"""Test get_app API endpoint with valid app id."""
response = self.client.get(reverse("api:get_app", kwargs={"app": "test-app"}))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
json = response.json()
self.assertIn("title", json)
self.assertIn("description", json)
self.assertIn("tags", json)
self.assertIn("package", json)
self.assertIn("urlNamespace", json)
self.assertIn("color", json)
self.assertIn("icon", json)
self.assertIn("exitUrl", json)
self.assertIn("rootUrl", json)
self.assertIn("settingsUrl", json)
self.assertEqual("Test App", json["title"])
self.assertEqual(
"Place a brief description of your app here.", json["description"]
)
self.assertEqual("", json["tags"])
self.assertEqual("test_app", json["package"])
self.assertEqual("test_app", json["urlNamespace"])
self.assertEqual("#2c3e50", json["color"])
self.assertEqual(
"/test/prefix/test/static/test_app/images/icon.gif", json["icon"]
)
self.assertEqual("/test/prefix/apps/", json["exitUrl"])
self.assertEqual("/test/prefix/apps/test-app/", json["rootUrl"])
self.assertRegex(
json["settingsUrl"],
r"^/test/prefix/admin/tethys_apps/tethysapp/[0-9]+/change/$",
)
def test_get_app_invalid_id(self):
"""Test get_app API endpoint with invalid app id."""
response = self.client.get(reverse("api:get_app", kwargs={"app": "foo-bar"}))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
json = response.json()
self.assertIn("error", json)
self.assertEqual('Could not find app "foo-bar".', json["error"])
|
4,253 |
to str
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import, unicode_literals
from pprint import pformat
from six import iteritems
class SeriesImagesQueryParam(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SeriesImagesQueryParam - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'key_type': 'text_type',
'language_id': 'text_type',
'resolution': 'list[text_type]',
'sub_key': 'list[text_type]'
}
self.attribute_map = {
'key_type': 'keyType',
'language_id': 'languageId',
'resolution': 'resolution',
'sub_key': 'subKey'
}
self._key_type = None
self._language_id = None
self._resolution = None
self._sub_key = None
@property
def key_type(self):
"""
Gets the key_type of this SeriesImagesQueryParam.
:return: The key_type of this SeriesImagesQueryParam.
:rtype: text_type
"""
return self._key_type
@key_type.setter
def key_type(self, key_type):
"""
Sets the key_type of this SeriesImagesQueryParam.
:param key_type: The key_type of this SeriesImagesQueryParam.
:type: text_type
"""
self._key_type = key_type
@property
def language_id(self):
"""
Gets the language_id of this SeriesImagesQueryParam.
:return: The language_id of this SeriesImagesQueryParam.
:rtype: text_type
"""
return self._language_id
@language_id.setter
def language_id(self, language_id):
"""
Sets the language_id of this SeriesImagesQueryParam.
:param language_id: The language_id of this SeriesImagesQueryParam.
:type: text_type
"""
self._language_id = language_id
@property
def resolution(self):
"""
Gets the resolution of this SeriesImagesQueryParam.
:return: The resolution of this SeriesImagesQueryParam.
:rtype: list[text_type]
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""
Sets the resolution of this SeriesImagesQueryParam.
:param resolution: The resolution of this SeriesImagesQueryParam.
:type: list[text_type]
"""
self._resolution = resolution
@property
def sub_key(self):
"""
Gets the sub_key of this SeriesImagesQueryParam.
:return: The sub_key of this SeriesImagesQueryParam.
:rtype: list[text_type]
"""
return self._sub_key
@sub_key.setter
def sub_key(self, sub_key):
"""
Sets the sub_key of this SeriesImagesQueryParam.
:param sub_key: The sub_key of this SeriesImagesQueryParam.
:type: list[text_type]
"""
self._sub_key = sub_key
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.METHOD_NAME()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
4,254 |
prepare
|
#
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""cam record storage and handling.
Updating NAV's cam table goes rougly like this:
* Load all open CAM records for the current netbox.
- In the database, an open cam record is considered one whose
end_time='infinity' or whose miss_count is NON-NULL.
* An ifindex+mac combination the collector found, which wasn't already in the
list of open records is added as a new record, with end_time='infinity' and
miss_count=0.
* An ifindex+mac combination the collector found which is also among the open
records is left untouched, unless its miss_count <> 0; in such a case the
miss_count is reset to 0.
* If an ifindex+mac combination from the open records list is not found by the
collector, we ensure it's end_time is set to anything but infinity, and its
miss_count is incremented by one.
- If a record's miss_count becomes greater or equal to MAX_MISS_COUNT, the
miss_count is set to a NULL value.
The point of miss_count in this algorithm is that closed cam records have a
grace period of MAX_MISS_COUNT collector runs. If a "closed" cam record is
found again within MAX_MISS_COUNT collector runs, the existing record can be
reclaimed by resetting end_time to infinity.
"""
import datetime
import logging
from collections import namedtuple
from django.db.models import Q
from django.db import transaction
from nav.models import manage
from nav.models.fields import INFINITY
from nav.ipdevpoll.storage import DefaultManager
from .netbox import Netbox
from .interface import Interface
MAX_MISS_COUNT = 3
Cam = namedtuple('Cam', 'ifindex mac')
Cam.sentinel = Cam(None, None)
CamDetails = namedtuple('CamDetails', 'id end_time miss_count')
class CamManager(DefaultManager):
"""Manages Cam records"""
_previously_open = None
_now_open = None
_keepers = None
_missing = None
_new = None
_ifnames = None
def __init__(self, *args, **kwargs):
super(CamManager, self).__init__(*args, **kwargs)
self.netbox = self.containers.get(None, Netbox)
def METHOD_NAME(self):
self._remove_sentinel()
self._load_open_records()
self._map_found_to_open()
self._log_stats()
def _remove_sentinel(self):
if Cam.sentinel in self.containers[Cam]:
del self.containers[Cam][Cam.sentinel]
def _load_open_records(self):
match_open = Q(end_time__gte=INFINITY) | Q(miss_count__gte=0)
camlist = manage.Cam.objects.filter(netbox__id=self.netbox.id)
camlist = camlist.filter(match_open).values_list(
'ifindex', 'mac', 'id', 'end_time', 'miss_count'
)
self._previously_open = dict(
(Cam(*cam[0:2]), CamDetails(*cam[2:])) for cam in camlist
)
def _map_found_to_open(self):
self._now_open = set(self.get_managed())
self._new = self._now_open.difference(self._previously_open)
missing = set(self._previously_open).difference(self._now_open)
self._missing = set(self._previously_open[key] for key in missing)
self._keepers = self._now_open.intersection(self._previously_open)
def _log_stats(self):
if not self._logger.isEnabledFor(logging.DEBUG):
return
reclaimable_count = sum(
1 for cam in self._previously_open.values() if cam.end_time < INFINITY
)
self._logger.debug(
"existing=%d (reclaimable=%d) / " "found=%d (known=%d new=%d missing=%d)",
len(self._previously_open),
reclaimable_count,
len(self._now_open),
len(self._keepers),
len(self._new),
len(self._missing),
)
@transaction.atomic()
def save(self):
# Reuse the same object over and over in an attempt to avoid the
# overhead of Python object creation
record = manage.Cam(
netbox_id=self.netbox.id,
sysname=self.netbox.sysname,
start_time=datetime.datetime.now(),
end_time=INFINITY,
)
for cam in self._new:
record.id = None
record.port = self._get_port_for(cam.ifindex)
record.ifindex = cam.ifindex
record.mac = cam.mac
record.save()
# reclaim recently closed records
keepers = (self._previously_open[cam] for cam in self._keepers)
reclaim = [cam.id for cam in keepers if cam.end_time < INFINITY]
if reclaim:
self._logger.debug("reclaiming %r", reclaim)
manage.Cam.objects.filter(id__in=reclaim).update(
end_time=INFINITY, miss_count=0
)
def _get_port_for(self, ifindex):
"""Gets a port name from an ifindex, either from newly collected or
previously saved data.
"""
port = self.containers.get(ifindex, Interface)
if port and port.ifname:
return port.ifname
else:
return self._get_saved_ifname_for(ifindex)
def _get_saved_ifname_for(self, ifindex):
if not self._ifnames:
ifcs = manage.Interface.objects.filter(
netbox__id=self.netbox.id, ifindex__isnull=False
).values('ifindex', 'ifname', 'ifdescr')
self._ifnames = dict(
(row['ifindex'], row['ifname'] or row['ifdescr']) for row in ifcs
)
return self._ifnames.get(ifindex, '')
def cleanup(self):
for cam_detail in self._missing:
self._close_missing(cam_detail)
@classmethod
def _close_missing(cls, cam_detail):
upd = {}
cls._logger.debug("closing %r", cam_detail)
if cam_detail.end_time >= INFINITY:
upd['end_time'] = datetime.datetime.now()
if cam_detail.miss_count >= 0:
miss_count = cam_detail.miss_count + 1
upd['miss_count'] = miss_count if miss_count < MAX_MISS_COUNT else None
if upd:
manage.Cam.objects.filter(id=cam_detail.id).update(**upd)
@classmethod
def add_sentinel(cls, containers):
"""Adds a Cam cleanup sentinel to a ContainerRepository, signifying
that a full CAM collection has taken place and that old CAM records
can be safely expired.
"""
containers.setdefault(Cam, {})[Cam.sentinel] = Cam.sentinel
Cam.manager = CamManager
CamManager.sentinel = Cam.sentinel
|
4,255 |
get seconds
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from Qt import QtCore, QtWidgets, QtGui
from rezgui.util import update_font, create_pane
from rez.utils.formatting import readable_time_duration
import math
class Canvas(QtWidgets.QWidget):
secondsHover = QtCore.Signal(int)
secondsClicked = QtCore.Signal(int)
def __init__(self, width, height, parent=None):
super(Canvas, self).__init__(parent)
self.setCursor(QtCore.Qt.CrossCursor)
self.setMouseTracking(True)
self._width = width
self._height = height
def paintEvent(self, event):
rect = self.rect()
w = rect.width()
h = rect.height()
margin = 5
j = h / 4
p = QtGui.QPainter(self)
update_font(p, italic=True)
pal = QtGui.QPalette()
bg_brush = pal.brush(QtGui.QPalette.Active, QtGui.QPalette.Light)
p.fillRect(rect, bg_brush)
p.setPen(QtCore.Qt.DotLine)
p.drawLine(0, j, w, j)
p.drawLine(0, j * 2, w, j * 2)
p.drawLine(0, j * 3, w, j * 3)
p.setPen(pal.color(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText))
p.drawText(margin, j - margin, "days")
p.drawText(margin, j * 2 - margin, "hours")
p.drawText(margin, j * 3 - margin, "minutes")
p.drawText(margin, j * 4 - margin, "seconds")
def leaveEvent(self, event):
self.secondsHover.emit(-1)
def mousePressEvent(self, event):
secs = self.METHOD_NAME(event.pos())
self.secondsClicked.emit(secs)
def mouseMoveEvent(self, event):
secs = self.METHOD_NAME(event.pos())
self.secondsHover.emit(secs)
def sizeHint(self):
return QtCore.QSize(self._width, self._height)
def METHOD_NAME(self, pos):
rect = self.rect()
x_norm = pos.x() / float(rect.width())
y_norm = min(1.0 - (pos.y() / float(rect.height())), 0.99)
unit = int(y_norm / 0.25)
y_norm -= unit * 0.25
y_norm *= 4.0
x_norm = max(min(x_norm, 0.99), 0.0)
y_norm = max(min(y_norm, 0.99), 0.0)
j = 2.5 * (1.0 - y_norm)
x_pow = 0.5 + (j * j / 2.5)
f = math.pow(x_norm, x_pow)
if unit == 0: # seconds
j = int(1.0 + f * 59)
secs = min(j, 59)
elif unit == 1: # minutes
j = int((1.0 + f * 60) * 60)
secs = min(j, 3600)
elif unit == 2: # hours
j = int((1.0 + f * 24) * 3600)
secs = min(j, 3600 * 24)
else: # days
j = int((1.0 + f * 7) * 3600 * 24)
secs = min(j, 3600 * 24 * 7)
return secs
class TimeSelecterPopup(QtWidgets.QFrame):
secondsClicked = QtCore.Signal(int)
def __init__(self, pivot_widget, width=240, height=160, parent=None):
super(TimeSelecterPopup, self).__init__(parent)
self.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Raised)
self.setWindowFlags(QtCore.Qt.Popup)
self.seconds = None
self.label = QtWidgets.QLabel("")
canvas_frame = QtWidgets.QFrame()
canvas_frame.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Sunken)
canvas = Canvas(width, height)
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(2)
layout.setContentsMargins(2, 2, 2, 2)
layout.addWidget(canvas)
canvas_frame.setLayout(layout)
create_pane([self.label, canvas_frame], False, compact=True,
parent_widget=self)
self.adjustSize()
pt = pivot_widget.rect().topLeft()
global_pt = pivot_widget.mapToGlobal(pt)
self.move(global_pt - QtCore.QPoint(0, self.height()))
canvas.secondsHover.connect(self._secondsHover)
canvas.secondsClicked.connect(self._secondsClicked)
def _secondsHover(self, seconds):
if seconds == -1:
self.label.setText("")
else:
secs_txt = readable_time_duration(seconds)
self.label.setText("%s ago" % secs_txt)
def _secondsClicked(self, seconds):
self.secondsClicked.emit(seconds)
self.close()
|
4,256 |
test
|
from typing import Any, Text, Dict, Union, List, Optional, TYPE_CHECKING
import rasa.shared.constants
# WARNING: Be careful about adding any top level imports at this place!
# These functions are imported in `rasa.__init__` and any top level import
# added here will get executed as soon as someone runs `import rasa`.
# Some imports are very slow (e.g. `tensorflow`) and we want them to get
# imported when running `import rasa`. If you add more imports here,
# please check that in the chain you are importing, no slow packages
# are getting imported.
if TYPE_CHECKING:
from rasa.model_training import TrainingResult
def run(
model: "Text",
endpoints: "Text",
connector: "Text" = None,
credentials: "Text" = None,
**kwargs: "Dict[Text, Any]",
) -> None:
"""Runs a Rasa model.
Args:
model: Path to model archive.
endpoints: Path to endpoints file.
connector: Connector which should be use (overwrites `credentials`
field).
credentials: Path to channel credentials file.
**kwargs: Additional arguments which are passed to
`rasa.core.run.serve_application`.
"""
import rasa.core.run
from rasa.core.utils import AvailableEndpoints
from rasa.shared.utils.cli import print_warning
import rasa.shared.utils.common
from rasa.shared.constants import DOCS_BASE_URL
_endpoints = AvailableEndpoints.read_endpoints(endpoints)
if not connector and not credentials:
connector = "rest"
print_warning(
f"No chat connector configured, falling back to the "
f"REST input channel. To connect your bot to another channel, "
f"read the docs here: {DOCS_BASE_URL}/messaging-and-voice-channels"
)
kwargs = rasa.shared.utils.common.minimal_kwargs(
kwargs, rasa.core.run.serve_application
)
rasa.core.run.serve_application(
model,
channel=connector,
credentials=credentials,
endpoints=_endpoints,
**kwargs,
)
def train(
domain: "Text",
config: "Text",
training_files: "Union[Text, List[Text]]",
output: "Text" = rasa.shared.constants.DEFAULT_MODELS_PATH,
dry_run: bool = False,
force_training: bool = False,
fixed_model_name: "Optional[Text]" = None,
persist_nlu_training_data: bool = False,
core_additional_arguments: "Optional[Dict]" = None,
nlu_additional_arguments: "Optional[Dict]" = None,
model_to_finetune: "Optional[Text]" = None,
finetuning_epoch_fraction: float = 1.0,
) -> "TrainingResult":
"""Runs Rasa Core and NLU training in `async` loop.
Args:
domain: Path to the domain file.
config: Path to the config for Core and NLU.
training_files: Paths to the training data for Core and NLU.
output: Output path.
dry_run: If `True` then no training will be done, and the information about
whether the training needs to be done will be printed.
force_training: If `True` retrain model even if data has not changed.
fixed_model_name: Name of model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
core_additional_arguments: Additional training parameters for core training.
nlu_additional_arguments: Additional training parameters forwarded to training
method of each NLU component.
model_to_finetune: Optional path to a model which should be finetuned or
a directory in case the latest trained model should be used.
finetuning_epoch_fraction: The fraction currently specified training epochs
in the model configuration which should be used for finetuning.
Returns:
An instance of `TrainingResult`.
"""
from rasa.model_training import train
return train(
domain=domain,
config=config,
training_files=training_files,
output=output,
dry_run=dry_run,
force_training=force_training,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
core_additional_arguments=core_additional_arguments,
nlu_additional_arguments=nlu_additional_arguments,
model_to_finetune=model_to_finetune,
finetuning_epoch_fraction=finetuning_epoch_fraction,
)
def METHOD_NAME(
model: "Text",
stories: "Text",
nlu_data: "Text",
output: "Text" = rasa.shared.constants.DEFAULT_RESULTS_PATH,
additional_arguments: "Optional[Dict]" = None,
) -> None:
"""Test a Rasa model against a set of test data.
Args:
model: model to test
stories: path to the dialogue test data
nlu_data: path to the NLU test data
output: path to folder where all output will be stored
additional_arguments: additional arguments for the test call
"""
from rasa.model_testing import test_core
from rasa.model_testing import test_nlu
if additional_arguments is None:
additional_arguments = {}
test_core(model, stories, output, additional_arguments) # type: ignore[unused-coroutine] # noqa: E501
test_nlu(model, nlu_data, output, additional_arguments) # type: ignore[unused-coroutine] # noqa: E501
|
4,257 |
test contains conversions
|
#!/usr/bin/python3
import pytest
from brownie.convert.datatypes import EthAddress, HexString, ReturnValue, Wei
from brownie.project import compile_source
string_fixture = "bar baz"
@pytest.fixture
def return_value(accounts, tester):
yield tester.manyValues(
88, [False, False, False], accounts[2], [("0x1234", "0x6666")], string_fixture
)
def test_type(return_value):
assert isinstance(return_value, ReturnValue)
assert isinstance(return_value["_addr"], EthAddress)
assert isinstance(return_value["_bool"], ReturnValue)
assert isinstance(return_value["_bool"][0], bool)
assert isinstance(return_value["_num"], Wei)
assert isinstance(return_value["_bytes"], ReturnValue)
assert isinstance(return_value["_bytes"][0][0], HexString)
def test_len(return_value):
assert len(return_value) == 5
def test_count(return_value):
assert return_value.count(2) == 0
assert return_value.count([("0x1234", "0x6666")]) == 1
def test_index(return_value):
assert return_value.index([("0x1234", "0x6666")]) == 3
assert return_value.index([("0x1234", "0x6666")], 1, 4) == 3
with pytest.raises(ValueError):
return_value.index([("0x1234", "0x6666")], stop=2)
with pytest.raises(ValueError):
return_value.index("foo")
def METHOD_NAME(accounts, return_value):
assert 88 in return_value
assert "88 wei" in return_value
assert False in return_value[1]
assert True not in return_value[1]
assert 0 not in return_value[1]
assert accounts[2] in return_value
assert str(accounts[2]) in return_value
assert accounts[1] not in return_value
assert "0x1234" in return_value[3][0]
assert "0x00001234" in return_value[3][0]
def test_eq_conversions(accounts, return_value):
data = [88, [False, False, False], accounts[2], [("0x1234", "0x6666")], string_fixture]
assert return_value == data
assert return_value == tuple(data)
data[1] = tuple(data[1])
data[3] = tuple(data[3])
assert return_value == tuple(data)
def test_ne_conversions(accounts, return_value):
data = [88, [False, False, False], accounts[2], [("0x1234", "0x6666")], string_fixture]
assert not return_value != data
assert not return_value != tuple(data)
data[1] = tuple(data[1])
data[3] = tuple(data[3])
assert not return_value != tuple(data)
def test_dict(accounts, return_value):
d = return_value.dict()
assert isinstance(d, dict)
assert len(d) == 5
assert len(d["_bool"]) == 3
assert sorted(d) == ["_addr", "_bool", "_bytes", "_num", "_string"]
assert d["_addr"] == accounts[2]
def test_keys(return_value):
assert list(return_value.keys()) == ["_num", "_bool", "_addr", "_bytes", "_string"]
def test_items(return_value):
assert return_value.items() == return_value.dict().items()
def test_getitem(accounts, return_value):
assert return_value[2] == return_value["_addr"] == accounts[2]
assert return_value[0] == return_value["_num"] == 88
def test_getitem_slice(accounts, return_value):
s = return_value[1:3]
assert s == [[False, False, False], accounts[2]]
assert isinstance(s, ReturnValue)
assert s[0] == s["_bool"]
assert "_num" not in s
def test_ethaddress_typeerror():
e = EthAddress("0x0063046686E46Dc6F15918b61AE2B121458534a5")
with pytest.raises(TypeError):
e == "potato"
with pytest.raises(TypeError):
e == "0x00"
assert str(e) != "potato"
def test_hexstring_typeerror():
b = HexString("0x1234", "bytes32")
with pytest.raises(TypeError):
b == "potato"
with pytest.raises(TypeError):
b == "1234"
assert str(b) != "potato"
def test_hexstring_length():
b = HexString("0x1234", "bytes32")
assert b == "0x1234"
assert b == "0x000000000000001234"
def test_hashable():
assert hash(ReturnValue([1, 2])) == hash(tuple([1, 2]))
assert set(ReturnValue([3, 1, 3, 3, 7])) == set([3, 1, 3, 3, 7])
def test_decimals(vypertester):
ret = vypertester.fixedType("1.234", ["-42", "3.1337"])
assert ret == ["1.234", "-42", "3.1337"]
def test_dynamic_tuple_array(accounts):
code = """
pragma solidity ^0.6.0;
pragma experimental ABIEncoderV2;
contract Test {
struct Foo { uint256 a; }
Foo[] bar;
function foo() public returns (Foo[] memory a) {
bar.push(Foo(1));
bar.push(Foo(6));
return bar;
}
}
"""
contract = compile_source(code).Test.deploy({"from": accounts[0]})
assert contract.foo.call() == [(1,), (6,)]
def test_fixed_tuple_array(accounts):
code = """
pragma solidity ^0.6.0;
pragma experimental ABIEncoderV2;
contract Test {
struct Foo { uint256 a; string b; }
Foo[2][2] bar;
function foo() public returns (Foo[2][2] memory, Foo[2] memory) {
bar[0][0].a = 42;
bar[0][0].b = "hello";
bar[1][1].a = 69;
return (bar, bar[1]);
}
}
"""
contract = compile_source(code).Test.deploy({"from": accounts[0]})
assert contract.foo.call() == [
([(42, "hello"), (0, "")], [(0, ""), (69, "")]),
[(0, ""), (69, "")],
]
|
4,258 |
authenticate
|
import datetime
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import UserService, AuthenticationService
from org.gluu.util import StringHelper, ArrayHelper
from com.unboundid.util import StaticUtils
from java.util import GregorianCalendar, TimeZone
from java.util import Arrays
# This script expect that user has attribute oxPasswordExpirationDate with valid expiration date
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Basic (with password update). Initialization"
print "Basic (with password update). Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Basic (with password update). Destroy"
print "Basic (with password update). Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def METHOD_NAME(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
userService = CdiUtil.bean(UserService)
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
if step == 1:
print "Basic (with password update). Authenticate for step 1"
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.METHOD_NAME(user_name, user_password)
if not logged_in:
return False
find_user_by_uid = authenticationService.getAuthenticatedUser()
user_expDate = find_user_by_uid.getAttribute("oxPasswordExpirationDate", False)
if user_expDate == None:
print "Basic (with password update). Authenticate for step 1. User has no oxPasswordExpirationDate date"
return False
dt = StaticUtils.decodeGeneralizedTime(user_expDate)
# Get Current Date
calendar = GregorianCalendar(TimeZone.getTimeZone("UTC"));
now = calendar.getTime()
if now.compareTo(dt) > 0:
# Add 90 Days to current date
calendar.setTime(now)
calendar.add(calendar.DATE, 1)
dt_plus_90 = calendar.getTime()
expDate = StaticUtils.encodeGeneralizedTime(dt_plus_90)
identity.setWorkingParameter("expDate", expDate)
return True
elif step == 2:
print "Basic (with password update). Authenticate for step 2"
user = authenticationService.getAuthenticatedUser()
if user == None:
print "Basic (with password update). Authenticate for step 2. Failed to determine user name"
return False
user_name = user.getUserId()
find_user_by_uid = userService.getUser(user_name)
newExpDate = identity.getWorkingParameter("expDate")
if find_user_by_uid == None:
print "Basic (with password update). Authenticate for step 2. Failed to find user"
return False
print "Basic (with password update). Authenticate for step 2"
update_button = requestParameters.get("loginForm:updateButton")
if ArrayHelper.isEmpty(update_button):
return True
find_user_by_uid.setAttribute("oxPasswordExpirationDate", newExpDate)
new_password_array = requestParameters.get("loginForm:password")
if ArrayHelper.isEmpty(new_password_array) or StringHelper.isEmpty(new_password_array[0]):
print "Basic (with password update). Authenticate for step 2. New password is empty"
return False
new_password = new_password_array[0]
find_user_by_uid.setAttribute("userPassword", new_password)
print "Basic (with password update). Authenticate for step 2. Attempting to set new user '%s' password" % user_name
userService.updateUser(find_user_by_uid)
print "Basic (with password update). Authenticate for step 2. Password updated successfully"
return True
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if step == 1:
print "Basic (with password update). Prepare for Step 1"
return True
elif step == 2:
print "Basic (with password update). Prepare for Step 2"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return Arrays.asList("expDate")
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if identity.isSetWorkingParameter("expDate"):
return 2
else:
return 1
def getPageForStep(self, configurationAttributes, step):
if step == 2:
return "/auth/pwd/newpassword.xhtml"
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
|
4,259 |
url
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network public-ip prefix wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/publicipprefixes/{}", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the public IP prefix.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.expand = AAZStrArg(
options=["--expand"],
help="Expands referenced resources.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PublicIPPrefixesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class PublicIPPrefixesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"publicIpPrefixName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$expand", self.ctx.args.expand,
),
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType()
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.sku = AAZObjectType()
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.zones = AAZListType()
properties = cls._schema_on_200.properties
properties.ip_prefix = AAZStrType(
serialized_name="ipPrefix",
)
properties.ip_tags = AAZListType(
serialized_name="ipTags",
)
properties.load_balancer_frontend_ip_configuration = AAZObjectType(
serialized_name="loadBalancerFrontendIpConfiguration",
)
properties.prefix_length = AAZIntType(
serialized_name="prefixLength",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
properties.public_ip_address_version = AAZStrType(
serialized_name="publicIPAddressVersion",
)
properties.public_ip_addresses = AAZListType(
serialized_name="publicIPAddresses",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
)
ip_tags = cls._schema_on_200.properties.ip_tags
ip_tags.Element = AAZObjectType()
_element = cls._schema_on_200.properties.ip_tags.Element
_element.ip_tag_type = AAZStrType(
serialized_name="ipTagType",
)
_element.tag = AAZStrType()
load_balancer_frontend_ip_configuration = cls._schema_on_200.properties.load_balancer_frontend_ip_configuration
load_balancer_frontend_ip_configuration.id = AAZStrType()
public_ip_addresses = cls._schema_on_200.properties.public_ip_addresses
public_ip_addresses.Element = AAZObjectType()
_element = cls._schema_on_200.properties.public_ip_addresses.Element
_element.id = AAZStrType()
sku = cls._schema_on_200.sku
sku.name = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
zones = cls._schema_on_200.zones
zones.Element = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"]
|
4,260 |
test 01 create token
|
# coding: utf-8
"""
This test file tests the lib.tokens.sshkeytoken
This depends on lib.tokenclass
"""
from privacyidea.lib.error import TokenAdminError
from .base import MyTestCase
from privacyidea.lib.tokenclass import ROLLOUTSTATE
from privacyidea.lib.tokens.sshkeytoken import SSHkeyTokenClass
from privacyidea.models import Token
class SSHTokenTestCase(MyTestCase):
otppin = "topsecret"
serial1 = "ser1"
serial2 = "ser2"
serial3 = "ser3"
serial4 = "ser4"
sshkey = "ssh-rsa " \
"AAAAB3NzaC1yc2EAAAADAQABAAACAQDJy0rLoxqc8SsY8DVAFijMsQyCv" \
"hBu4K40hdZOacXK4O6OgnacnSKN56MP6pzz2+4svzvDzwvkFsvf34pbsgD" \
"F67PPSCsimmjEQjf0UfamBKh0cl181CbPYsph3UTBOCgHh3FFDXBduPK4DQz" \
"EVQpmqe80h+lsvQ81qPYagbRW6fpd0uWn9H7a/qiLQZsiKLL07HGB+NwWue4os" \
"0r9s4qxeG76K6QM7nZKyC0KRAz7CjAf+0X7YzCOu2pzyxVdj/T+KArFcMmq8V" \
"dz24mhcFFXTzU3wveas1A9rwamYWB+Spuohh/OrK3wDsrryStKQv7yofgnPMs" \
"TdaL7XxyQVPCmh2jVl5ro9BPIjTXsre9EUxZYFVr3EIECRDNWy3xEnUHk7Rzs" \
"734Rp6XxGSzcSLSju8/MBzUVe35iXfXDRcqTcoA0700pIb1ANYrPUO8Up05v4" \
"EjIyBeU61b4ilJ3PNcEVld6FHwP3Z7F068ef4DXEC/d7pibrp4Up61WYQIXV/" \
"utDt3NDg/Zf3iqoYcJNM/zIZx2j1kQQwqtnbGqxJMrL6LtClmeWteR4420uZx" \
"afLE9AtAL4nnMPuubC87L0wJ88un9teza/N02KJMHy01Yz3iJKt3Ou9eV6kqO" \
"ei3kvLs5dXmriTHp6g9whtnN6/Liv9SzZPJTs8YfThi34Wccrw== " \
"NetKnights GmbH Descröption"
unsupported_keytype = "ssh-something AAAAA comment"
sshkey_ecdsa = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzd" \
"HAyNTYAAABBBHGCdIk0pO1HFr/mF4oLb43ZRyQJ4K7ICLrAhAiQERVa0tUvyY5TE" \
"zurWTqxSMx203rY77t6xnHLZBMPPpv8rk0= cornelius@puck"
sshkey_ed25519 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC38dIb3tM6nPrT" \
"3j1UfsQxOCBbf3JogwsKeVPM893Pi cornelius@puck"
ecdsa_sk = "[email protected] AAAAInNrLWVjZHNhLXNoYTItbmlz" \
"dHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBOStamg+GO4TSgtoWjc82p" \
"OKZIDuOeAt/8PU/jbzEmth6VuNhghRTCPqPMFtR6mB3Pb12yMDRiLH/t1VwkvWWYIA" \
"AAAEc3NoOg=="
wrong_sshkey = """---- BEGIN SSH2 PUBLIC KEY ----
AAAAB3NzaC1kc3MAAACBAKrFC6uDvuxl9vnYL/Fu/Vq+12KJF4
RyMSQe4mn8oHJma2VzepBRBpLt7Q==
---- END SSH2 PUBLIC KEY ----"""
INVALID_SSH = "ssh-rsa"
def METHOD_NAME(self):
db_token = Token(self.serial1, tokentype="sshkey")
db_token.save()
token = SSHkeyTokenClass(db_token)
# An invalid key, raises an exception
self.assertRaises(TokenAdminError, token.update, {"sshkey": "InvalidKey"})
self.assertEqual(token.rollout_state, ROLLOUTSTATE.BROKEN)
# An invalid key, raises an exception
self.assertRaises(TokenAdminError, token.update, {"sshkey": self.INVALID_SSH})
self.assertEqual(token.rollout_state, ROLLOUTSTATE.BROKEN)
# An invalid key, raises an exception
self.assertRaises(TokenAdminError, token.update, {"sshkey": self.wrong_sshkey})
self.assertEqual(token.rollout_state, ROLLOUTSTATE.BROKEN)
# An unsupported keytype
self.assertRaises(TokenAdminError, token.update, {"sshkey": self.unsupported_keytype})
self.assertEqual(token.rollout_state, ROLLOUTSTATE.BROKEN)
# Set valid key
token.update({"sshkey": self.sshkey})
self.assertTrue(token.token.serial == self.serial1, token)
self.assertTrue(token.token.tokentype == "sshkey",
token.token.tokentype)
self.assertTrue(token.type == "sshkey", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "SSHK", class_prefix)
self.assertTrue(token.get_class_type() == "sshkey", token)
# ecdsa
db_token = Token(self.serial2, tokentype="sshkey")
db_token.save()
token = SSHkeyTokenClass(db_token)
token.update({"sshkey": self.sshkey_ecdsa})
# ed25519
db_token = Token(self.serial3, tokentype="sshkey")
db_token.save()
token = SSHkeyTokenClass(db_token)
token.update({"sshkey": self.sshkey_ed25519})
# ecdsa_sk
db_token = Token(self.serial4, tokentype="sshkey")
db_token.save()
token = SSHkeyTokenClass(db_token)
token.update({"sshkey": self.ecdsa_sk})
def test_02_class_methods(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = SSHkeyTokenClass(db_token)
info = token.get_class_info()
self.assertTrue(info.get("title") == "SSHkey Token",
"{0!s}".format(info.get("title")))
info = token.get_class_info("title")
self.assertTrue(info == "SSHkey Token", info)
def test_03_get_sshkey(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = SSHkeyTokenClass(db_token)
sshkey = token.get_sshkey()
self.assertTrue(sshkey == self.sshkey, sshkey)
self.assertIsInstance(sshkey, str)
db_token = Token.query.filter(Token.serial == self.serial2).first()
token = SSHkeyTokenClass(db_token)
sshkey = token.get_sshkey()
self.assertTrue(sshkey == self.sshkey_ecdsa, sshkey)
self.assertIsInstance(sshkey, str)
db_token = Token.query.filter(Token.serial == self.serial3).first()
token = SSHkeyTokenClass(db_token)
sshkey = token.get_sshkey()
self.assertTrue(sshkey == self.sshkey_ed25519, sshkey)
self.assertIsInstance(sshkey, str)
db_token = Token.query.filter(Token.serial == self.serial4).first()
token = SSHkeyTokenClass(db_token)
sshkey = token.get_sshkey()
self.assertEqual(self.ecdsa_sk, sshkey)
self.assertIsInstance(sshkey, str)
|
4,261 |
list
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._extension_type_versions_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExtensionTypeVersionsOperations:
"""ExtensionTypeVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.v2022_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def METHOD_NAME(
self,
location: str,
extension_type_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExtensionVersionList"]:
"""List available versions for an Extension Type.
:param location: extension location.
:type location: str
:param extension_type_name: Extension type name.
:type extension_type_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtensionVersionList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2022_01_01_preview.models.ExtensionVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtensionVersionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
extension_type_name=extension_type_name,
template_url=self.METHOD_NAME.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
extension_type_name=extension_type_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ExtensionVersionList", pipeline_response)
list_of_elem = deserialized.versions
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KubernetesConfiguration/locations/{location}/extensionTypes/{extensionTypeName}/versions'} # type: ignore
|
4,262 |
make log msg
|
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from enum import Flag
from logging import Logger as builtinLogger, FileHandler, Formatter
from typing import Union
class OutputType(Flag):
NONE = 0
CONSOLE = 1
FILE = 2
CUSTOM = 4
class LogConfig:
default_fmt = "[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s"
def __init__(self,
name: str,
level: str,
file_path: str,
fmt: str,
output_type: 'OutputType'):
self.name: str = name
self.level: str = level
self.file_path: str = file_path
self.fmt: str = fmt
self.output_type: 'OutputType' = output_type
@classmethod
def from_dict(cls, src_config: dict):
config: dict = src_config.get('log')
if config is None:
return
name: str = config.get('name', "Logger")
level: str = config.get('level', 'info').upper()
file_path: str = config.get('filePath', "")
fmt: str = config.get('format', cls.default_fmt)
output_type: 'OutputType' = OutputType.NONE
output_types: str = config.get('outputType')
if output_types:
outputs = output_types.split('|')
for output in outputs:
output_type |= OutputType[output.upper()]
return LogConfig(name, level, file_path, fmt, output_type)
class LoggerUtil(object):
_formatter: 'Formatter' = None
@classmethod
def apply_config(cls, logger: 'builtinLogger', config: dict, handler=None):
log_config: 'LogConfig' = LogConfig.from_dict(config)
logger.handlers.clear()
logger.name = log_config.name
logger.setLevel(log_config.level)
cls._apply_config(logger, log_config, handler)
@classmethod
def print_config(cls, logger: 'builtinLogger', config: dict):
logger.info(f'====================LOG CONFIG START====================')
cls._view_config_info(logger, config, "CONFIG")
logger.info(f'====================LOG CONFIG END======================')
@classmethod
def _view_config_info(cls, logger: 'builtinLogger', conf: dict, prefix: str):
for key, value in conf.items():
if not isinstance(value, dict):
tmp_prefix = '{}.{}'.format(prefix, key)
logger.info(f'[{tmp_prefix}] > {value}')
else:
tmp_prefix = '{}.{}'.format(prefix, key)
cls._view_config_info(logger, value, tmp_prefix)
@classmethod
def _apply_config(cls, logger: 'builtinLogger', log_config: 'LogConfig', custom_handler=None):
cls._formatter = Formatter(log_config.fmt)
if cls._is_flag_on(log_config.output_type, OutputType.CONSOLE):
handler = logging.StreamHandler()
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
if cls._is_flag_on(log_config.output_type, OutputType.FILE):
cls._ensure_dir(log_config.file_path)
handler = FileHandler(log_config.file_path, 'a')
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
if cls._is_flag_on(log_config.output_type, OutputType.CUSTOM):
if custom_handler:
handler = custom_handler
handler.setFormatter(cls._formatter)
logger.addHandler(handler)
@classmethod
def _is_flag_on(cls, src_flag: 'Flag', dest_flag: 'Flag') -> bool:
return src_flag & dest_flag == dest_flag
@classmethod
def _ensure_dir(cls, file_path: str):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
@classmethod
def METHOD_NAME(cls, tag: str, msg: Union[str, BaseException]):
return f'[{tag}] {msg}'
|
4,263 |
export sources
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import check_min_vs, is_msvc_static_runtime, is_msvc
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir
from conan.tools.build import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
import os
required_conan_version = ">=1.53.0"
class LogrConan(ConanFile):
name = "logr"
description = "Logger frontend substitution for spdlog, glog, etc for server/desktop applications"
license = "BSD-3-Clause"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ngrodzitski/logr"
topics = ("logger", "development", "util", "utils")
package_type = "header-library"
settings = "os", "arch", "compiler", "build_type"
options = {
"backend": ["spdlog", "glog", "log4cplus", "log4cplus-unicode", None],
}
default_options = {
"backend": "spdlog",
}
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "7",
"clang": "7",
"apple-clang": "10",
"Visual Studio": "16",
"msvc": "192",
}
def METHOD_NAME(self):
export_conandata_patches(self)
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("fmt/9.1.0")
if self.options.backend == "spdlog":
self.requires("spdlog/1.11.0")
elif self.options.backend == "glog":
self.requires("glog/0.6.0")
elif self.options.backend in ["log4cplus", "log4cplus-unicode"]:
self.requires("log4cplus/2.0.5")
def package_id(self):
self.info.clear()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
if self.options.backend == "log4cplus" and self.options["log4cplus"].unicode:
raise ConanInvalidConfiguration("backend='log4cplus' requires log4cplus:unicode=False")
elif self.options.backend == "log4cplus-unicode" and not self.options["log4cplus"].unicode:
raise ConanInvalidConfiguration("backend='log4cplus-unicode' requires log4cplus:unicode=True")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["LOGR_WITH_SPDLOG_BACKEND"] = self.options.backend == "spdlog"
tc.cache_variables["LOGR_WITH_GLOG_BACKEND"] = self.options.backend == "glog"
tc.cache_variables["LOGR_WITH_LOG4CPLUS_BACKEND"] = self.options.backend in ["log4cplus", "log4cplus-unicode"]
tc.cache_variables["LOGR_INSTALL"] = True
tc.cache_variables["LOGR_BUILD_TESTS"] = False
tc.cache_variables["LOGR_BUILD_EXAMPLES"] = False
tc.cache_variables["LOGR_BUILD_BENCHMARKS"] = False
tc.generate()
dpes = CMakeDeps(self)
dpes.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib"))
def package_info(self):
self.cpp_info.bindirs = []
self.cpp_info.libdirs = []
|
4,264 |
client random
|
import socket
from _socket import _Address, _RetAddress
from _typeshed import Incomplete, ReadableBuffer
from collections.abc import Callable, MutableSequence, Sequence
from typing import Any, TypeVar
from OpenSSL.crypto import X509, PKey, X509Name
OPENSSL_VERSION_NUMBER: int
SSLEAY_VERSION: int
SSLEAY_CFLAGS: int
SSLEAY_PLATFORM: int
SSLEAY_DIR: int
SSLEAY_BUILT_ON: int
SENT_SHUTDOWN: int
RECEIVED_SHUTDOWN: int
SSLv23_METHOD: int
TLSv1_METHOD: int
TLSv1_1_METHOD: int
TLSv1_2_METHOD: int
TLS_METHOD: int
TLS_SERVER_METHOD: int
TLS_CLIENT_METHOD: int
SSL3_VERSION: int
TLS1_VERSION: int
TLS1_1_VERSION: int
TLS1_2_VERSION: int
TLS1_3_VERSION: int
OP_NO_SSLv2: int
OP_NO_SSLv3: int
OP_NO_TLSv1: int
OP_NO_TLSv1_1: int
OP_NO_TLSv1_2: int
OP_NO_TLSv1_3: int
MODE_RELEASE_BUFFERS: int
OP_SINGLE_DH_USE: int
OP_SINGLE_ECDH_USE: int
OP_EPHEMERAL_RSA: int
OP_MICROSOFT_SESS_ID_BUG: int
OP_NETSCAPE_CHALLENGE_BUG: int
OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG: int
OP_SSLREF2_REUSE_CERT_TYPE_BUG: int
OP_MICROSOFT_BIG_SSLV3_BUFFER: int
OP_MSIE_SSLV2_RSA_PADDING: int
OP_SSLEAY_080_CLIENT_DH_BUG: int
OP_TLS_D5_BUG: int
OP_TLS_BLOCK_PADDING_BUG: int
OP_DONT_INSERT_EMPTY_FRAGMENTS: int
OP_CIPHER_SERVER_PREFERENCE: int
OP_TLS_ROLLBACK_BUG: int
OP_PKCS1_CHECK_1: int
OP_PKCS1_CHECK_2: int
OP_NETSCAPE_CA_DN_BUG: int
OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG: int
OP_NO_COMPRESSION: int
OP_NO_QUERY_MTU: int
OP_COOKIE_EXCHANGE: int
OP_NO_TICKET: int
OP_ALL: int
VERIFY_PEER: int
VERIFY_FAIL_IF_NO_PEER_CERT: int
VERIFY_CLIENT_ONCE: int
VERIFY_NONE: int
SESS_CACHE_OFF: int
SESS_CACHE_CLIENT: int
SESS_CACHE_SERVER: int
SESS_CACHE_BOTH: int
SESS_CACHE_NO_AUTO_CLEAR: int
SESS_CACHE_NO_INTERNAL_LOOKUP: int
SESS_CACHE_NO_INTERNAL_STORE: int
SESS_CACHE_NO_INTERNAL: int
SSL_ST_CONNECT: int
SSL_ST_ACCEPT: int
SSL_ST_MASK: int
SSL_CB_LOOP: int
SSL_CB_EXIT: int
SSL_CB_READ: int
SSL_CB_WRITE: int
SSL_CB_ALERT: int
SSL_CB_READ_ALERT: int
SSL_CB_WRITE_ALERT: int
SSL_CB_ACCEPT_LOOP: int
SSL_CB_ACCEPT_EXIT: int
SSL_CB_CONNECT_LOOP: int
SSL_CB_CONNECT_EXIT: int
SSL_CB_HANDSHAKE_START: int
SSL_CB_HANDSHAKE_DONE: int
NO_OVERLAPPING_PROTOCOLS: object
class Error(Exception): ...
class WantReadError(Error): ...
class WantWriteError(Error): ...
class WantX509LookupError(Error): ...
class ZeroReturnError(Error): ...
class SysCallError(Error): ...
def SSLeay_version(type: int) -> bytes: ...
class Session: ...
class Connection:
def __getattr__(self, name: str) -> Any: ... # takes attributes from `self._socket`
def __init__(self, context: Context, socket: socket.socket | None = None) -> None: ...
def get_context(self) -> Context: ...
def set_context(self, context: Context) -> None: ...
def get_servername(self) -> bytes | None: ...
def set_tlsext_host_name(self, name: bytes) -> None: ...
def pending(self) -> int: ...
def send(self, buf: ReadableBuffer | str, flags: int = 0) -> int: ...
write = send
def sendall(self, buf: ReadableBuffer | str, flags: int = 0) -> int: ...
def recv(self, bufsiz: int, flags: int | None = None) -> bytes: ...
read = recv
def recv_into(self, buffer: MutableSequence[int], nbytes: int | None = None, flags: int | None = None) -> int: ...
def connect(self, addr: str | bytes | Sequence[str | int]) -> None: ...
def connect_ex(self, addr: _Address | bytes) -> int: ...
def accept(self) -> tuple[Connection, _RetAddress]: ...
def DTLSv1_listen(self) -> None: ...
def DTLSv1_get_timeout(self) -> float | None: ...
def DTLSv1_handle_timeout(self) -> bool: ...
def shutdown(self) -> bool: ...
def do_handshake(self) -> None: ...
def get_certificate(self) -> X509 | None: ...
def get_peer_certificate(self) -> X509 | None: ...
def get_peer_cert_chain(self) -> list[X509] | None: ...
def get_verified_chain(self) -> list[X509] | None: ...
def bio_read(self, bufsiz: int) -> bytes: ...
def bio_write(self, buf: bytes) -> int: ...
def bio_shutdown(self) -> None: ...
def renegotiate(self) -> bool: ...
def renegotiate_pending(self) -> bool: ...
def total_renegotiations(self) -> int: ...
def set_accept_state(self) -> None: ...
def set_connect_state(self) -> None: ...
def get_client_ca_list(self) -> list[X509Name]: ...
def get_cipher_list(self) -> list[str]: ...
def get_cipher_name(self) -> str | None: ...
def get_cipher_bits(self) -> int | None: ...
def get_cipher_version(self) -> str | None: ...
def get_protocol_version_name(self) -> str: ...
def get_protocol_version(self) -> int: ...
def get_shutdown(self) -> int: ...
def set_shutdown(self, state: int) -> None: ...
def get_state_string(self) -> bytes: ...
def server_random(self) -> bytes | None: ...
def METHOD_NAME(self) -> bytes | None: ...
def master_key(self) -> bytes | None: ...
def export_keying_material(
self, label: Incomplete, olen: Incomplete, context: Incomplete = None
) -> Incomplete: ... # TODO: type, see RFC-5705
def get_app_data(self) -> Any: ...
def set_app_data(self, data: Any) -> None: ...
def sock_shutdown(self, __how: int) -> None: ... # alias to `_socket.socket.shutdown`
def want_read(self) -> bool: ...
def want_write(self) -> bool: ...
def get_session(self) -> Session | None: ...
def set_session(self, session: Session) -> None: ...
def get_finished(self) -> bytes | None: ...
def get_peer_finished(self) -> bytes | None: ...
def set_alpn_protos(self, protos: Sequence[bytes]) -> None: ...
def get_alpn_proto_negotiated(self) -> bytes: ...
def request_ocsp(self) -> None: ...
_T = TypeVar("_T")
class Context:
def __getattr__(self, name: str) -> Incomplete: ...
def __init__(self, method: int) -> None: ...
def load_verify_locations(self, cafile: str | None, capath: str | None = None) -> None: ...
def set_options(self, options: int) -> None: ...
def set_verify(self, mode: int, callback: Callable[[Connection, X509, int, int, int], bool] | None = None) -> None: ...
def set_min_proto_version(self, version: int) -> None: ...
def set_max_proto_version(self, version: int) -> None: ...
def use_certificate_chain_file(self, certfile: str | bytes) -> None: ...
def use_certificate_file(self, certfile: str | bytes, filetype: int = 1) -> None: ...
def use_certificate(self, cert: X509) -> None: ...
def use_privatekey_file(self, keyfile: str | bytes, filetype: int | None = ...) -> None: ...
def use_privatekey(self, pkey: PKey) -> None: ...
def add_extra_chain_cert(self, certobj: X509) -> None: ...
def set_cipher_list(self, cipher_list: bytes) -> None: ...
def set_keylog_callback(self, callback: Callable[[Connection, bytes], object]) -> None: ...
def set_alpn_protos(self, protos: Sequence[bytes]) -> None: ...
def set_alpn_select_callback(self, callback: Callable[[Connection, list[bytes]], bytes]) -> None: ...
def set_ocsp_server_callback(self, callback: Callable[[Connection, _T | None], bytes], data: _T | None = None) -> None: ...
def set_ocsp_client_callback(
self, callback: Callable[[Connection, bytes, _T | None], bool], data: _T | None = None
) -> None: ...
|
4,265 |
discover
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, Iterator, List, Mapping, MutableMapping, Optional, Tuple
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
SyncMode,
)
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.connector_state_manager import ConnectorStateManager
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.utils.schema_helpers import split_config
from airbyte_cdk.utils.event_timing import create_timer
from .azure_table import AzureTableReader
from .streams import AzureTableStream
class SourceAzureTable(AbstractSource):
"""This source helps to sync data from one azure data table a time"""
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
pass
def _as_airbyte_record(self, stream_name: str, data: Mapping[str, Any]):
return data
@property
def get_typed_schema(self) -> object:
"""Static schema for tables"""
return {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"PartitionKey": {"type": "string"}},
}
def check(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
try:
reader = AzureTableReader(logger, config)
client = reader.get_table_service_client()
tables_iterator = client.list_tables(results_per_page=1)
next(tables_iterator)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except StopIteration:
logger.log("No tables found, but credentials are correct.")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def METHOD_NAME(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteCatalog:
reader = AzureTableReader(logger, config)
tables = reader.get_tables()
streams = []
for table in tables:
stream_name = table.name
stream = AirbyteStream(
name=stream_name,
json_schema=self.get_typed_schema,
supported_sync_modes=[SyncMode.full_refresh, SyncMode.incremental],
source_defined_cursor=True,
default_cursor_field=["PartitionKey"],
)
streams.append(stream)
logger.info(f"Total {streams.count} streams found.")
return AirbyteCatalog(streams=streams)
def streams(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> List[Stream]:
"""
:param config: The user-provided configuration as specified by the source's spec.
Any stream construction related operation should happen here.
:return: A list of the streams in this source connector.
"""
try:
reader = AzureTableReader(logger, config)
tables = reader.get_tables()
streams = []
for table in tables:
stream_name = table.name
stream = AzureTableStream(stream_name=stream_name, reader=reader)
streams.append(stream)
return streams
except Exception as e:
raise Exception(f"An exception occurred: {str(e)}")
def read(
self, logger: AirbyteLogger, config: Mapping[str, Any], catalog: ConfiguredAirbyteCatalog, state: MutableMapping[str, Any] = None
) -> Iterator[AirbyteMessage]:
"""
This method is overridden to check whether the stream `quotes` exists in the source, if not skip reading that stream.
"""
stream_instances = {s.name: s for s in self.streams(logger=logger, config=config)}
state_manager = ConnectorStateManager(stream_instance_map=stream_instances, state=state)
logger.info(f"Starting syncing {self.name}")
config, internal_config = split_config(config)
self._stream_to_instance_map = stream_instances
with create_timer(self.name) as timer:
for configured_stream in catalog.streams:
stream_instance = stream_instances.get(configured_stream.stream.name)
stream_instance.cursor_field = configured_stream.cursor_field
if not stream_instance and configured_stream.stream.name == "quotes":
logger.warning("Stream `quotes` does not exist in the source. Skip reading `quotes` stream.")
continue
if not stream_instance:
raise KeyError(
f"The requested stream {configured_stream.stream.name} was not found in the source. Available streams: {stream_instances.keys()}"
)
try:
yield from self._read_stream(
logger=logger,
stream_instance=stream_instance,
configured_stream=configured_stream,
state_manager=state_manager,
internal_config=internal_config,
)
except Exception as e:
logger.exception(f"Encountered an exception while reading stream {self.name}")
raise e
finally:
logger.info(f"Finished syncing {self.name}")
logger.info(timer.report())
logger.info(f"Finished syncing {self.name}")
|
4,266 |
test anydbm keys
|
"""Test script for the dbm.open function based on testdumbdbm.py"""
import unittest
import dbm
import os
from test.support import import_helper
from test.support import os_helper
try:
from dbm import ndbm
except ImportError:
ndbm = None
dirname = os_helper.TESTFN
_fname = os.path.join(dirname, os_helper.TESTFN)
#
# Iterates over every database module supported by dbm currently available.
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def cleaunup_test_dir():
os_helper.rmtree(dirname)
def setup_test_dir():
cleaunup_test_dir()
os.mkdir(dirname)
class AnyDBMTestCase:
_dict = {'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assertTrue(issubclass(self.module.error, OSError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_creation_n_file_exists_with_invalid_contents(self):
# create an empty file
os_helper.create_empty_file(_fname)
with dbm.open(_fname, 'n') as f:
self.assertEqual(len(f), 0)
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
# setdefault() works as in the dict interface
self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
# get() works as in the dict interface
self.assertEqual(f.get(b'a'), self._dict['a'])
self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
self.assertIsNone(f.get(b'xxx'))
with self.assertRaises(KeyError):
f[b'xxx']
f.close()
def METHOD_NAME(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_empty_value(self):
if getattr(dbm._defaultmod, 'library', None) == 'Berkeley DB':
self.skipTest("Berkeley DB doesn't distinguish the empty value "
"from the absent one")
f = dbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
f[b'empty'] = b''
self.assertEqual(f.keys(), [b'empty'])
self.assertIn(b'empty', f)
self.assertEqual(f[b'empty'], b'')
self.assertEqual(f.get(b'empty'), b'')
self.assertEqual(f.setdefault(b'empty'), b'')
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
self.assertIn(key, f)
assert(f[key] == b"Python:")
f.close()
def test_open_with_bytes(self):
dbm.open(os.fsencode(_fname), "c").close()
def test_open_with_pathlib_path(self):
dbm.open(os_helper.FakePath(_fname), "c").close()
def test_open_with_pathlib_path_bytes(self):
dbm.open(os_helper.FakePath(os.fsencode(_fname)), "c").close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def test_keys(self):
with dbm.open(_fname, 'c') as d:
self.assertEqual(d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
d[k] = v
self.assertEqual(sorted(d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assertIn(k, d)
self.assertEqual(d[k], v)
self.assertNotIn(b'xxx', d)
self.assertRaises(KeyError, lambda: d[b'xxx'])
def setUp(self):
self.addCleanup(setattr, dbm, '_defaultmod', dbm._defaultmod)
dbm._defaultmod = self.module
self.addCleanup(cleaunup_test_dir)
setup_test_dir()
class WhichDBTestCase(unittest.TestCase):
def test_whichdb(self):
self.addCleanup(setattr, dbm, '_defaultmod', dbm._defaultmod)
_bytes_fname = os.fsencode(_fname)
fnames = [_fname, os_helper.FakePath(_fname),
_bytes_fname, os_helper.FakePath(_bytes_fname)]
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
name = module.__name__
setup_test_dir()
dbm._defaultmod = module
# Try with empty files first
with module.open(_fname, 'c'): pass
for path in fnames:
self.assertEqual(name, self.dbm.whichdb(path))
# Now add a key
with module.open(_fname, 'w') as f:
f[b"1"] = b"1"
# and test that we can find it
self.assertIn(b"1", f)
# and read it
self.assertEqual(f[b"1"], b"1")
for path in fnames:
self.assertEqual(name, self.dbm.whichdb(path))
@unittest.skipUnless(ndbm, reason='Test requires ndbm')
def test_whichdb_ndbm(self):
# Issue 17198: check that ndbm which is referenced in whichdb is defined
with open(_fname + '.db', 'wb'): pass
_bytes_fname = os.fsencode(_fname)
fnames = [_fname, os_helper.FakePath(_fname),
_bytes_fname, os_helper.FakePath(_bytes_fname)]
for path in fnames:
self.assertIsNone(self.dbm.whichdb(path))
def setUp(self):
self.addCleanup(cleaunup_test_dir)
setup_test_dir()
self.dbm = import_helper.import_fresh_module('dbm')
for mod in dbm_iterator():
assert mod.__name__.startswith('dbm.')
suffix = mod.__name__[4:]
testname = f'TestCase_{suffix}'
globals()[testname] = type(testname,
(AnyDBMTestCase, unittest.TestCase),
{'module': mod})
if __name__ == "__main__":
unittest.main()
|
4,267 |
url parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network route-filter rule delete",
is_preview=True,
)
class Delete(AAZCommand):
"""Delete a rule from a route filter.
:example: Delete a rule from a route filter.
az network route-filter rule delete -g MyResourceGroup --filter-name MyRouteFilter -n MyRouteFilterRule
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/routefilters/{}/routefilterrules/{}", "2021-08-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.filter_name = AAZStrArg(
options=["--filter-name"],
help="Name of the route filter.",
required=True,
id_part="name",
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the route filter rule.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.RouteFilterRulesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class RouteFilterRulesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}",
**self.METHOD_NAME
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeFilterName", self.ctx.args.filter_name,
required=True,
),
**self.serialize_url_param(
"ruleName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
4,268 |
test html repr scalar
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
# @file
# @author Neil Vaytet
import numpy as np
import pytest
import scipp as sc
from ..factory import (
make_binned_data_array,
make_dense_data_array,
make_dense_datagroup,
make_dense_dataset,
make_scalar,
make_scalar_array,
make_simple_datagroup,
make_variable,
)
# TODO:
# For now, we are just checking that creating the repr does not throw.
def maybe_variances(variances, dtype):
if dtype in [sc.DType.float64, sc.DType.float32]:
return variances
else:
return False
@pytest.mark.parametrize("variance", [False, True])
@pytest.mark.parametrize(
"dtype", [sc.DType.float64, sc.DType.float32, sc.DType.int64, sc.DType.int32]
)
@pytest.mark.parametrize("unit", ['dimensionless', 'counts', 's', 'us'])
def METHOD_NAME(variance, dtype, unit):
var = make_scalar(
with_variance=maybe_variances(variance, dtype), dtype=dtype, unit=unit
)
sc.make_html(var)
@pytest.mark.parametrize("variance", [False, True])
@pytest.mark.parametrize("label", [False, True])
@pytest.mark.parametrize("attr", [False, True])
@pytest.mark.parametrize("mask", [False, True])
@pytest.mark.parametrize(
"dtype", [sc.DType.float64, sc.DType.float32, sc.DType.int64, sc.DType.int32]
)
@pytest.mark.parametrize("unit", ['dimensionless', 'counts', 's'])
def test_html_repr_scalar_array(variance, label, attr, mask, dtype, unit):
da = make_scalar_array(
with_variance=maybe_variances(variance, dtype),
label=label,
attr=attr,
mask=mask,
dtype=dtype,
unit=unit,
)
sc.make_html(da)
@pytest.mark.parametrize("ndim", [1, 2, 3, 4])
@pytest.mark.parametrize("variances", [False, True])
@pytest.mark.parametrize("dtype", [sc.DType.float64, sc.DType.int64])
@pytest.mark.parametrize("unit", ['dimensionless', 'counts', 's'])
def test_html_repr_variable(ndim, variances, dtype, unit):
var = make_variable(
ndim=ndim,
with_variance=maybe_variances(variances, dtype),
dtype=dtype,
unit=unit,
)
sc.make_html(var)
sc.make_html(var['xx', 1:10])
def test_html_repr_variable_strings():
sc.make_html(sc.array(dims=['x'], values=list(map(chr, range(97, 123)))))
def test_html_repr_variable_vector():
sc.make_html(sc.vectors(dims=['x'], values=np.arange(30.0).reshape(10, 3)))
@pytest.mark.parametrize("ndim", [1, 2, 3, 4])
@pytest.mark.parametrize("with_all", [True, False])
@pytest.mark.parametrize("dtype", [sc.DType.float64, sc.DType.int64])
@pytest.mark.parametrize("unit", ['dimensionless', 'counts', 's'])
def test_html_repr_data_array(ndim, with_all, dtype, unit):
da = make_dense_data_array(
ndim=ndim,
with_variance=maybe_variances(with_all, dtype),
binedges=with_all,
labels=with_all,
attrs=with_all,
masks=with_all,
ragged=with_all,
dtype=dtype,
unit=unit,
)
sc.make_html(da)
sc.make_html(da['xx', 1:10])
@pytest.mark.parametrize("ndim", [1, 2, 3, 4])
@pytest.mark.parametrize("variances", [False, True])
@pytest.mark.parametrize("masks", [False, True])
def test_html_repr_binned_data_array(ndim, variances, masks):
da = make_binned_data_array(ndim=ndim, with_variance=variances, masks=masks)
sc.make_html(da)
sc.make_html(da['xx', 1:10])
def test_html_repr_binned_scalar_data_array():
da = make_binned_data_array(ndim=1)
sc.make_html(da['xx', 1])
def test_html_repr_binned_scalar_data_array_variable_buffer():
da = make_binned_data_array(ndim=1)
sc.make_html(da['xx', 1].bins.data)
@pytest.mark.parametrize("ndim", [1, 2, 3, 4])
@pytest.mark.parametrize("with_all", [True, False])
@pytest.mark.parametrize("dtype", [sc.DType.float64, sc.DType.int64])
@pytest.mark.parametrize("unit", ['dimensionless', 'counts', 's'])
def test_html_repr_dataset(ndim, with_all, dtype, unit):
da = make_dense_dataset(
ndim=ndim,
with_variance=maybe_variances(with_all, dtype),
binedges=with_all,
labels=with_all,
attrs=with_all,
masks=with_all,
ragged=with_all,
dtype=dtype,
unit=unit,
)
sc.make_html(da)
sc.make_html(da['xx', 1:10])
def test_html_repr_dense_datagroup():
with_all = True
dtype = sc.DType.float64
dg = make_dense_datagroup(
maxdepth=2,
ndim=3,
with_variance=maybe_variances(True, dtype),
binedges=with_all,
labels=with_all,
attrs=with_all,
masks=with_all,
ragged=with_all,
dtype=dtype,
unit='dimensionless',
)
sc.make_html(dg)
@pytest.mark.parametrize("ndepth", [1, 2, 10])
def test_html_repr_simple_datagroup(ndepth):
dg = make_simple_datagroup(maxdepth=ndepth)
dg_repr_html = sc.make_html(dg)
from bs4 import BeautifulSoup
html_parser = BeautifulSoup(dg_repr_html, "html.parser")
assert (type(dg).__name__) in html_parser.find('div', class_='sc-obj-type').text
assert bool(html_parser.find('div', class_='dg-root'))
assert bool(html_parser.find('div', class_='dg-detail-box'))
|
4,269 |
set default user
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi user` command."""
from functools import partial
import click
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import arguments, options, types
from aiida.cmdline.utils import decorators, echo
def METHOD_NAME(profile, user):
"""Set the user as the default user for the given profile.
:param profile: the profile
:param user: the user
"""
from aiida.manage.configuration import get_config
config = get_config()
profile.default_user_email = user.email
config.update_profile(profile)
config.store()
def get_user_attribute_default(attribute, ctx):
"""Return the default value for the given attribute of the user passed in the context.
:param attribute: attribute for which to get the current value
:param ctx: click context which should contain the selected user
:return: user attribute default value if set, or None
"""
default = getattr(ctx.params['user'], attribute)
# None or empty string means there is no default
if not default:
return None
return default
@verdi.group('user')
def verdi_user():
"""Inspect and manage users."""
@verdi_user.command('list')
@decorators.with_dbenv()
def user_list():
"""Show a list of all users."""
from aiida.orm import User
default_user = User.collection.get_default()
if default_user is None:
echo.echo_warning('no default user has been configured')
attributes = ['email', 'first_name', 'last_name']
sort = lambda user: user.email
highlight = lambda x: x.email == default_user.email if default_user else None
echo.echo_formatted_list(User.collection.all(), attributes, sort=sort, highlight=highlight)
@verdi_user.command('configure')
@click.option(
'--email',
'user',
prompt='User email',
help='Email address that serves as the user name and a way to identify data created by it.',
type=types.UserParamType(create=True),
cls=options.interactive.InteractiveOption
)
@click.option(
'--first-name',
prompt='First name',
help='First name of the user.',
type=click.STRING,
contextual_default=partial(get_user_attribute_default, 'first_name'),
cls=options.interactive.InteractiveOption
)
@click.option(
'--last-name',
prompt='Last name',
help='Last name of the user.',
type=click.STRING,
contextual_default=partial(get_user_attribute_default, 'last_name'),
cls=options.interactive.InteractiveOption
)
@click.option(
'--institution',
prompt='Institution',
help='Institution of the user.',
type=click.STRING,
contextual_default=partial(get_user_attribute_default, 'institution'),
cls=options.interactive.InteractiveOption
)
@click.option(
'--set-default',
prompt='Set as default?',
help='Set the user as the default user for the current profile.',
is_flag=True,
cls=options.interactive.InteractiveOption
)
@click.pass_context
@decorators.with_dbenv()
def user_configure(ctx, user, first_name, last_name, institution, set_default):
"""Configure a new or existing user.
An e-mail address is used as the user name.
"""
# pylint: disable=too-many-arguments
if first_name is not None:
user.first_name = first_name
if last_name is not None:
user.last_name = last_name
if institution is not None:
user.institution = institution
action = 'updated' if user.is_stored else 'created'
user.store()
echo.echo_success(f'{user.email} successfully {action}')
if set_default:
ctx.invoke(user_set_default, user=user)
@verdi_user.command('set-default')
@arguments.USER()
@click.pass_context
@decorators.with_dbenv()
def user_set_default(ctx, user):
"""Set a user as the default user for the profile."""
METHOD_NAME(ctx.obj.profile, user)
echo.echo_success(f'set `{user.email}` as the new default user for profile `{ctx.obj.profile.name}`')
|
4,270 |
hash text
|
# ----------------------------------------------------------------------------
# Copyright (C) 2021-2023 Deepchecks (https://www.deepchecks.com)
#
# This file is part of Deepchecks.
# Deepchecks is distributed under the terms of the GNU Affero General
# Public License (version 3 or later).
# You should have received a copy of the GNU Affero General Public License
# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
#
"""Module of text utils for NLP package."""
import re
import string
import typing as t
import unicodedata
import warnings
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
__all__ = [
'break_to_lines_and_trim',
'normalize_text',
'hash_text',
'normalize_samples',
'hash_samples'
]
def break_to_lines_and_trim(s, max_lines: int = 10, min_line_length: int = 50, max_line_length: int = 60):
"""Break a string to lines and trim it to a maximum number of lines.
Parameters
----------
s : str
The string to break.
max_lines : int, default 10
The maximum number of lines to return.
min_line_length : int, default 50
The minimum length of a line.
max_line_length : int, default 60
The maximum length of a line.
"""
separating_delimiters = [' ', '\t', '\n', '\r']
lines = []
for i in range(max_lines): # pylint: disable=unused-variable
if len(s) < max_line_length: # if remaining string is short enough, add it and break
lines.append(s.strip())
break
else: # find the first delimiter from the end of the line
max_line_length = min(max_line_length, len(s)-1)
for j in range(max_line_length, min_line_length-1, -1):
if s[j] in separating_delimiters:
lines.append(s[:j])
s = s[j:].strip()
break
else: # if no delimiter was found, break in the middle of the line
# Check if breaking in the middle of an HTML tag
tag_start = re.search(r'<[^>]*$', s[:max_line_length])
if tag_start:
max_line_length = tag_start.start()
lines.append(s[:max_line_length].strip() + '-')
s = s[max_line_length:].strip()
else: # if the loop ended without breaking, and there is still text left, add an ellipsis
if len(s) > 0:
lines[-1] = lines[-1] + '...'
return '<br>'.join(lines)
def remove_punctuation(text: str) -> str:
"""Remove punctuation characters from a string."""
return text.translate(str.maketrans('', '', string.punctuation))
def normalize_unicode(text: str) -> str:
"""Normalize unicode characters."""
return unicodedata.normalize('NFKC', text)
def remove_stopwords(text: str) -> str:
"""Remove stop words from a string."""
if nltk.download('stopwords', quiet=True):
stop_words = set(stopwords.words('english'))
else:
warnings.warn('nltk stopwords not found, stopwords won\'t be ignored when considering text duplicates.'
' Please check your internet connection.')
return text
if nltk.download('punkt', quiet=True):
tokenize = word_tokenize
else:
tokenize = str.split
words = tokenize(text)
return ' '.join([word for word in words if word.lower() not in stop_words])
def normalize_text(
text_sample: str,
*,
ignore_case: bool = True,
remove_punct: bool = True,
normalize_uni: bool = True,
remove_stops: bool = True,
ignore_whitespace: bool = False
) -> str:
"""Normalize given text sample."""
if ignore_case:
text_sample = text_sample.lower()
if remove_punct:
text_sample = remove_punctuation(text_sample)
if normalize_uni:
text_sample = normalize_unicode(text_sample)
if remove_stops:
text_sample = remove_stopwords(text_sample)
if ignore_whitespace:
text_sample = ''.join(text_sample.split())
return text_sample
def cut_string(input_str: str, cut_length: int = 200) -> str:
"""Cut a string to 200 characters, but cut only at whitespaces."""
if len(input_str) > cut_length:
index = input_str.find(' ', cut_length)
if index != -1:
return input_str[:index]
return input_str
def normalize_samples(
text_samples: t.Sequence[str],
*,
ignore_case: bool = True,
remove_punct: bool = True,
normalize_uni: bool = True,
remove_stops: bool = True,
ignore_whitespace: bool = False
) -> t.List[str]:
"""Normalize given sequence of text samples."""
return [
normalize_text(
it,
ignore_case=ignore_case,
remove_punct=remove_punct,
normalize_uni=normalize_uni,
remove_stops=remove_stops,
ignore_whitespace=ignore_whitespace
)
for it in text_samples
]
def METHOD_NAME(text: str) -> int:
"""Hash a text sample."""
assert isinstance(text, str)
return hash(text)
def hash_samples(text: t.Sequence[str]) -> t.List[int]:
"""Hash a sequence of text samples."""
assert not isinstance(text, str)
return [METHOD_NAME(it) for it in text]
|
4,271 |
test w backend options
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Skip Qobj Validation"""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer
from qiskit.utils import QuantumInstance
from qiskit.exceptions import QiskitError
def _compare_dict(dict1, dict2):
equal = True
for key1, value1 in dict1.items():
if key1 not in dict2:
equal = False
break
if value1 != dict2[key1]:
equal = False
break
return equal
class TestSkipQobjValidation(QiskitAlgorithmsTestCase):
"""Test Skip Qobj Validation"""
def setUp(self):
super().setUp()
self.random_seed = 10598
# ┌───┐ ░ ┌─┐ ░
# q0_0: ┤ H ├──■───░─┤M├─░────
# └───┘┌─┴─┐ ░ └╥┘ ░ ┌─┐
# q0_1: ─────┤ X ├─░──╫──░─┤M├
# └───┘ ░ ║ ░ └╥┘
# c0: 2/══════════════╩═════╩═
# 0 1
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[0], qr[1])
# Ensure qubit 0 is measured before qubit 1
qc.barrier(qr)
qc.measure(qr[0], cr[0])
qc.barrier(qr)
qc.measure(qr[1], cr[1])
self.qc = qc
self.backend = BasicAer.get_backend("qasm_simulator")
def test_wo_backend_options(self):
"""without backend options test"""
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
self.backend,
seed_transpiler=self.random_seed,
seed_simulator=self.random_seed,
shots=1024,
)
# run without backend_options and without noise
res_wo_bo = quantum_instance.execute(self.qc).get_counts(self.qc)
self.assertGreaterEqual(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
quantum_instance.skip_qobj_validation = True
res_wo_bo_skip_validation = quantum_instance.execute(self.qc).get_counts(self.qc)
self.assertGreaterEqual(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
self.assertTrue(_compare_dict(res_wo_bo, res_wo_bo_skip_validation))
def METHOD_NAME(self):
"""with backend options test"""
# run with backend_options
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
self.backend,
seed_transpiler=self.random_seed,
seed_simulator=self.random_seed,
shots=1024,
backend_options={"initial_statevector": [0.5, 0.5, 0.5, 0.5]},
)
res_w_bo = quantum_instance.execute(self.qc).get_counts(self.qc)
self.assertGreaterEqual(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
quantum_instance.skip_qobj_validation = True
res_w_bo_skip_validation = quantum_instance.execute(self.qc).get_counts(self.qc)
self.assertGreaterEqual(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
self.assertTrue(_compare_dict(res_w_bo, res_w_bo_skip_validation))
def test_w_noise(self):
"""with noise test"""
# build noise model
# Asymmetric readout error on qubit-0 only
try:
from qiskit.providers.aer.noise import NoiseModel
from qiskit import Aer
self.backend = Aer.get_backend("qasm_simulator")
except ImportError as ex:
self.skipTest(f"Aer doesn't appear to be installed. Error: '{str(ex)}'")
return
probs_given0 = [0.9, 0.1]
probs_given1 = [0.3, 0.7]
noise_model = NoiseModel()
noise_model.add_readout_error([probs_given0, probs_given1], [0])
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
self.backend,
seed_transpiler=self.random_seed,
seed_simulator=self.random_seed,
shots=1024,
noise_model=noise_model,
)
res_w_noise = quantum_instance.execute(self.qc).get_counts(self.qc)
quantum_instance.skip_qobj_validation = True
res_w_noise_skip_validation = quantum_instance.execute(self.qc).get_counts(self.qc)
self.assertTrue(_compare_dict(res_w_noise, res_w_noise_skip_validation))
with self.assertWarns(DeprecationWarning):
# BasicAer should fail:
with self.assertRaises(QiskitError):
_ = QuantumInstance(BasicAer.get_backend("qasm_simulator"), noise_model=noise_model)
with self.assertRaises(QiskitError):
quantum_instance = QuantumInstance(BasicAer.get_backend("qasm_simulator"))
quantum_instance.set_config(noise_model=noise_model)
if __name__ == "__main__":
unittest.main()
|
4,272 |
extract data
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
"""ExpressRouteServiceProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRouteServiceProviderListResult"]:
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, METHOD_NAME
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
|
4,273 |
rootfn
|
#
# Solver class using Scipy's adaptive time stepper
#
import casadi
import pybamm
import numpy as np
import importlib
import scipy.sparse as sparse
scikits_odes_spec = importlib.util.find_spec("scikits")
if scikits_odes_spec is not None:
scikits_odes_spec = importlib.util.find_spec("scikits.odes")
if scikits_odes_spec is not None:
scikits_odes = importlib.util.module_from_spec(scikits_odes_spec)
scikits_odes_spec.loader.exec_module(scikits_odes)
class ScikitsDaeSolver(pybamm.BaseSolver):
"""Solve a discretised model, using scikits.odes.
Parameters
----------
method : str, optional
The method to use in solve_ivp (default is "BDF")
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
root_method : str or pybamm algebraic solver class, optional
The method to use to find initial conditions (for DAE solvers).
If a solver class, must be an algebraic solver class.
If "casadi",
the solver uses casadi's Newton rootfinding algorithm to find initial
conditions. Otherwise, the solver uses 'scipy.optimize.root' with method
specified by 'root_method' (e.g. "lm", "hybr", ...)
root_tol : float, optional
The tolerance for the initial-condition solver (default is 1e-6).
extrap_tol : float, optional
The tolerance to assert whether extrapolation occurs or not (default is 0).
extra_options : dict, optional
Any options to pass to the solver.
Please consult `scikits.odes documentation
<https://bmcage.github.io/odes/dev/index.html>`_ for details.
Some common keys:
- 'max_steps': maximum (int) number of steps the solver can take
"""
def __init__(
self,
method="ida",
rtol=1e-6,
atol=1e-6,
root_method="casadi",
root_tol=1e-6,
extrap_tol=None,
extra_options=None,
):
if scikits_odes_spec is None:
raise ImportError("scikits.odes is not installed")
super().__init__(method, rtol, atol, root_method, root_tol, extrap_tol)
self.name = "Scikits DAE solver ({})".format(method)
self.extra_options = extra_options or {}
pybamm.citations.register("Malengier2018")
pybamm.citations.register("Hindmarsh2000")
pybamm.citations.register("Hindmarsh2005")
def _integrate(self, model, t_eval, inputs_dict=None):
"""
Solve a model defined by dydt with initial conditions y0.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : numeric type
The times at which to compute the solution
inputs_dict : dict, optional
Any input parameters to pass to the model when solving
"""
inputs_dict = inputs_dict or {}
if model.convert_to_format == "casadi":
inputs = casadi.vertcat(*[x for x in inputs_dict.values()])
else:
inputs = inputs_dict
y0 = model.y0
if isinstance(y0, casadi.DM):
y0 = y0.full()
y0 = y0.flatten()
rhs_algebraic_eval = model.rhs_algebraic_eval
events = model.terminate_events_eval
jacobian = model.jac_rhs_algebraic_eval
if model.convert_to_format == "jax":
mass_matrix = model.mass_matrix.entries.toarray()
else:
mass_matrix = model.mass_matrix.entries
if model.convert_to_format == "casadi":
def eqsres(t, y, ydot, return_residuals):
return_residuals[:] = (
rhs_algebraic_eval(t, y, inputs).full().flatten()
- mass_matrix @ ydot
)
else:
def eqsres(t, y, ydot, return_residuals):
return_residuals[:] = (
rhs_algebraic_eval(t, y, inputs).flatten() - mass_matrix @ ydot
)
def METHOD_NAME(t, y, ydot, return_root):
return_root[:] = [float(event(t, y, inputs)) for event in events]
extra_options = {
**self.extra_options,
"old_api": False,
"rtol": self.rtol,
"atol": self.atol,
}
if jacobian:
jac_y0_t0 = jacobian(t_eval[0], y0, inputs)
if sparse.issparse(jac_y0_t0):
def jacfn(t, y, ydot, residuals, cj, J):
jac_eval = jacobian(t, y, inputs) - cj * mass_matrix
J[:][:] = jac_eval.toarray()
else:
def jacfn(t, y, ydot, residuals, cj, J):
jac_eval = jacobian(t, y, inputs) - cj * mass_matrix
J[:][:] = jac_eval
extra_options.update({"jacfn": jacfn})
if events:
extra_options.update({"rootfn": METHOD_NAME, "nr_rootfns": len(events)})
# solver works with ydot0 set to zero
ydot0 = np.zeros_like(y0)
# set up and solve
dae_solver = scikits_odes.dae(self.method, eqsres, **extra_options)
timer = pybamm.Timer()
sol = dae_solver.solve(t_eval, y0, ydot0)
integration_time = timer.time()
# return solution, we need to tranpose y to match scipy's interface
if sol.flag in [0, 2]:
# 0 = solved for all t_eval
if sol.flag == 0:
termination = "final time"
# 2 = found root(s)
elif sol.flag == 2:
termination = "event"
if sol.roots.t is None:
t_root = None
else:
t_root = sol.roots.t
sol = pybamm.Solution(
sol.values.t,
np.transpose(sol.values.y),
model,
inputs_dict,
t_root,
np.transpose(sol.roots.y),
termination,
)
sol.integration_time = integration_time
return sol
else:
raise pybamm.SolverError(sol.message)
|
4,274 |
generate mars
|
import argparse
import json
import os
import shutil
import subprocess
import sys
import urllib.request
REPO_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
sys.path.append(REPO_ROOT)
MODEL_STORE_DIR = os.path.join(REPO_ROOT, "model_store_gen")
os.makedirs(MODEL_STORE_DIR, exist_ok=True)
MAR_CONFIG_FILE_PATH = os.path.join(REPO_ROOT, "ts_scripts", "mar_config.json")
def delete_model_store_gen_dir():
print(f"## Deleting model_store_gen_dir: {MODEL_STORE_DIR}\n")
mar_set.clear()
if os.path.exists(MODEL_STORE_DIR):
try:
shutil.rmtree(MODEL_STORE_DIR)
except OSError as e:
print("Error: %s : %s" % (MODEL_STORE_DIR, e.strerror))
mar_set = set()
def gen_mar(model_store=None):
print(f"## Starting gen_mar: {model_store}\n")
if len(mar_set) == 0:
METHOD_NAME(mar_config=MAR_CONFIG_FILE_PATH, model_store_dir=MODEL_STORE_DIR)
if model_store is not None and os.path.exists(model_store):
print("## Create symlink for mar files\n")
for mar_file in mar_set:
src = f"{MODEL_STORE_DIR}/{mar_file}"
dst = f"{model_store}/{mar_file}"
if os.path.exists(dst):
print(f"## {dst} already exists.\n")
else:
os.symlink(src, dst)
print(f"## Symlink {src}, {dst} successfully.")
def METHOD_NAME(mar_config=MAR_CONFIG_FILE_PATH, model_store_dir=MODEL_STORE_DIR):
"""
By default generate_mars reads ts_scripts/mar_config.json and outputs mar files in dir model_store_gen
- mar_config.json defines a list of models' mar file parameters. They are:
- "model_name": model name
- "version": model version
- "model_file": the path of file model.py
- "serialized_file_remote": the url of file .pth or .pt
- "serialized_file_local": the path of file .pth or .pt
- "gen_scripted_file_path": the python script path of building .pt file
- "handler": handler can be either default handler or handler path
- "extra_files": the paths of extra files
Note: To generate .pt file, "serialized_file_remote" and "gen_scripted_file_path" must be provided
"""
print(
f"## Starting generate_mars, mar_config:{mar_config}, model_store_dir:{model_store_dir}\n"
)
mar_set.clear()
cwd = os.getcwd()
os.chdir(REPO_ROOT)
with open(mar_config) as f:
models = json.loads(f.read())
for model in models:
serialized_file_path = None
if model.get("serialized_file_remote") and model["serialized_file_remote"]:
if (
model.get("gen_scripted_file_path")
and model["gen_scripted_file_path"]
):
subprocess.run(["python", model["gen_scripted_file_path"]])
else:
serialized_model_file_url = (
"https://download.pytorch.org/models/{}".format(
model["serialized_file_remote"]
)
)
urllib.request.urlretrieve(
serialized_model_file_url,
f'{model_store_dir}/{model["serialized_file_remote"]}',
)
serialized_file_path = os.path.join(
model_store_dir, model["serialized_file_remote"]
)
elif model.get("serialized_file_local") and model["serialized_file_local"]:
serialized_file_path = model["serialized_file_local"]
handler = model.get("handler", None)
extra_files = model.get("extra_files", None)
runtime = model.get("runtime", None)
archive_format = model.get("archive_format", "zip-store")
requirements_file = model.get("requirements_file", None)
export_path = model.get("export_path", model_store_dir)
cmd = model_archiver_command_builder(
model["model_name"],
model["version"],
model["model_file"],
serialized_file_path,
handler,
extra_files,
runtime,
archive_format,
requirements_file,
export_path,
)
print(f"## In directory: {os.getcwd()} | Executing command: {cmd}\n")
try:
subprocess.check_call(cmd, shell=True)
marfile = "{}.mar".format(model["model_name"])
print("## {} is generated.\n".format(marfile))
mar_set.add(marfile)
except subprocess.CalledProcessError as exc:
print(
"## {} creation failed !, error: {}\n".format(
model["model_name"], exc
)
)
if (
model.get("serialized_file_remote")
and model["serialized_file_remote"]
and os.path.exists(serialized_file_path)
):
os.remove(serialized_file_path)
os.chdir(cwd)
def model_archiver_command_builder(
model_name=None,
version=None,
model_file=None,
serialized_file=None,
handler=None,
extra_files=None,
runtime=None,
archive_format=None,
requirements_file=None,
export_path=None,
force=True,
):
cmd = "torch-model-archiver"
if model_name:
cmd += " --model-name {0}".format(model_name)
if version:
cmd += " --version {0}".format(version)
if model_file:
cmd += " --model-file {0}".format(model_file)
if serialized_file:
cmd += " --serialized-file {0}".format(serialized_file)
if handler:
cmd += " --handler {0}".format(handler)
if extra_files:
cmd += " --extra-files {0}".format(extra_files)
if runtime:
cmd += " --runtime {0}".format(runtime)
if archive_format:
cmd += " --archive-format {0}".format(archive_format)
if requirements_file:
cmd += " --requirements-file {0}".format(requirements_file)
if export_path:
cmd += " --export-path {0}".format(export_path)
if force:
cmd += " --force"
return cmd
if __name__ == "__main__":
# cmd:
# python ts_scripts/marsgen.py
# python ts_scripts/marsgen.py --config my_mar_config.json
parser = argparse.ArgumentParser(description="Generate model mar files")
parser.add_argument(
"--config",
default=MAR_CONFIG_FILE_PATH,
help="mar file configuration json file",
)
parser.add_argument(
"--model-store", default=MODEL_STORE_DIR, help="model store dir"
)
args = parser.parse_args()
METHOD_NAME(args.config, MODEL_STORE_DIR)
|
4,275 |
resize sample
|
# Part of the implementation is borrowed and modified from PackNet-SfM,
# made publicly available under the MIT License at https://github.com/TRI-ML/packnet-sfm
import random
import cv2
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from modelscope.models.cv.video_depth_estimation.utils.misc import filter_dict
########################################################################################################################
def resize_image(image, shape, interpolation=Image.Resampling.LANCZOS):
"""
Resizes input image.
Parameters
----------
image : Image.PIL
Input image
shape : tuple [H,W]
Output shape
interpolation : int
Interpolation mode
Returns
-------
image : Image.PIL
Resized image
"""
transform = transforms.Resize(shape, interpolation=interpolation)
return transform(image)
def resize_depth(depth, shape):
"""
Resizes depth map.
Parameters
----------
depth : np.array [h,w]
Depth map
shape : tuple (H,W)
Output shape
Returns
-------
depth : np.array [H,W]
Resized depth map
"""
depth = cv2.resize(
depth, dsize=shape[::-1], interpolation=cv2.INTER_NEAREST)
return np.expand_dims(depth, axis=2)
def resize_sample_image_and_intrinsics(sample,
shape,
image_interpolation=Image.Resampling.
LANCZOS):
"""
Resizes the image and intrinsics of a sample
Parameters
----------
sample : dict
Dictionary with sample values
shape : tuple (H,W)
Output shape
image_interpolation : int
Interpolation mode
Returns
-------
sample : dict
Resized sample
"""
# Resize image and corresponding intrinsics
image_transform = transforms.Resize(
shape, interpolation=image_interpolation)
(orig_w, orig_h) = sample['rgb'].size
(out_h, out_w) = shape
# Scale intrinsics
for key in filter_dict(sample, ['intrinsics']):
intrinsics = np.copy(sample[key])
intrinsics[0] *= out_w / orig_w
intrinsics[1] *= out_h / orig_h
sample[key] = intrinsics
# Scale images
for key in filter_dict(sample, [
'rgb',
'rgb_original',
]):
sample[key] = image_transform(sample[key])
# Scale context images
for key in filter_dict(sample, [
'rgb_context',
'rgb_context_original',
]):
sample[key] = [image_transform(k) for k in sample[key]]
# Return resized sample
return sample
def METHOD_NAME(sample, shape, image_interpolation=Image.Resampling.LANCZOS):
"""
Resizes a sample, including image, intrinsics and depth maps.
Parameters
----------
sample : dict
Dictionary with sample values
shape : tuple (H,W)
Output shape
image_interpolation : int
Interpolation mode
Returns
-------
sample : dict
Resized sample
"""
# Resize image and intrinsics
sample = resize_sample_image_and_intrinsics(sample, shape,
image_interpolation)
# Resize depth maps
for key in filter_dict(sample, [
'depth',
]):
sample[key] = resize_depth(sample[key], shape)
# Resize depth contexts
for key in filter_dict(sample, [
'depth_context',
]):
sample[key] = [resize_depth(k, shape) for k in sample[key]]
# Return resized sample
return sample
########################################################################################################################
def to_tensor(image, tensor_type='torch.FloatTensor'):
"""Casts an image to a torch.Tensor"""
transform = transforms.ToTensor()
return transform(image).type(tensor_type)
def to_tensor_sample(sample, tensor_type='torch.FloatTensor'):
"""
Casts the keys of sample to tensors.
Parameters
----------
sample : dict
Input sample
tensor_type : str
Type of tensor we are casting to
Returns
-------
sample : dict
Sample with keys cast as tensors
"""
transform = transforms.ToTensor()
# Convert single items
for key in filter_dict(sample, [
'rgb',
'rgb_original',
'depth',
]):
sample[key] = transform(sample[key]).type(tensor_type)
# Convert lists
for key in filter_dict(
sample, ['rgb_context', 'rgb_context_original', 'depth_context']):
sample[key] = [transform(k).type(tensor_type) for k in sample[key]]
# Return converted sample
return sample
########################################################################################################################
def duplicate_sample(sample):
"""
Duplicates sample images and contexts to preserve their unaugmented versions.
Parameters
----------
sample : dict
Input sample
Returns
-------
sample : dict
Sample including [+"_original"] keys with copies of images and contexts.
"""
# Duplicate single items
for key in filter_dict(sample, ['rgb']):
sample['{}_original'.format(key)] = sample[key].copy()
# Duplicate lists
for key in filter_dict(sample, ['rgb_context']):
sample['{}_original'.format(key)] = [k.copy() for k in sample[key]]
# Return duplicated sample
return sample
def colorjitter_sample(sample, parameters, prob=1.0):
"""
Jitters input images as data augmentation.
Parameters
----------
sample : dict
Input sample
parameters : tuple (brightness, contrast, saturation, hue)
Color jittering parameters
prob : float
Jittering probability
Returns
-------
sample : dict
Jittered sample
"""
if random.random() < prob:
# Prepare transformation
color_augmentation = transforms.ColorJitter()
brightness, contrast, saturation, hue = parameters
augment_image = color_augmentation.get_params(
brightness=[max(0, 1 - brightness), 1 + brightness],
contrast=[max(0, 1 - contrast), 1 + contrast],
saturation=[max(0, 1 - saturation), 1 + saturation],
hue=[-hue, hue])
# Jitter single items
for key in filter_dict(sample, ['rgb']):
sample[key] = augment_image(sample[key])
# Jitter lists
for key in filter_dict(sample, ['rgb_context']):
sample[key] = [augment_image(k) for k in sample[key]]
# Return jittered sample
return sample
########################################################################################################################
|
4,276 |
test no desktop section
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from textwrap import dedent
import pytest
from snapcraft_legacy.internal.meta import errors
from snapcraft_legacy.internal.meta.desktop import DesktopFile
class TestDesktopExec:
scenarios = (
(
"snap name != app name",
dict(app_name="bar", app_args="", expected_exec="foo.bar"),
),
(
"snap name != app name",
dict(app_name="bar", app_args="--arg", expected_exec="foo.bar --arg"),
),
(
"snap name == app name",
dict(app_name="foo", app_args="", expected_exec="foo"),
),
(
"snap name == app name",
dict(app_name="foo", app_args="--arg", expected_exec="foo --arg"),
),
(
"snap name == app name",
dict(app_name="foo", app_args="--arg %U", expected_exec="foo --arg %U"),
),
(
"snap name == app name",
dict(app_name="foo", app_args="%U", expected_exec="foo %U"),
),
)
def test_generate_desktop_file(
self, tmp_work_path, app_name, app_args, expected_exec
):
snap_name = "foo"
desktop_file_path = tmp_work_path / "app.desktop"
with desktop_file_path.open("w") as desktop_file:
print("[Desktop Entry]", file=desktop_file)
print(
"Exec={}".format(" ".join(["in-snap-exe", app_args])), file=desktop_file
)
d = DesktopFile(
snap_name=snap_name,
app_name=app_name,
filename=desktop_file_path,
prime_dir=tmp_work_path.as_posix(),
)
d.write(gui_dir=".")
expected_desktop_file = tmp_work_path / f"{app_name}.desktop"
assert expected_desktop_file.exists()
with expected_desktop_file.open() as desktop_file:
assert (
desktop_file.read()
== dedent(
"""\
[Desktop Entry]
Exec={}
"""
).format(expected_exec)
)
class TestDesktopIcon:
scenarios = (
(
"icon_path preferred",
dict(
icon="other.png", icon_path="foo.png", expected_icon="${SNAP}/foo.png"
),
),
(
"icon_path with / preferred",
dict(icon="/foo.png", icon_path="foo.png", expected_icon="${SNAP}/foo.png"),
),
(
"icon path with ${SNAP}",
dict(
icon="${SNAP}/foo.png", icon_path=None, expected_icon="${SNAP}/foo.png"
),
),
("icon name", dict(icon="foo", icon_path=None, expected_icon="foo")),
)
def test_generate_desktop_file(self, tmp_work_path, icon, icon_path, expected_icon):
snap_name = app_name = "foo"
desktop_file_path = tmp_work_path / "app.desktop"
with desktop_file_path.open("w") as desktop_file:
print("[Desktop Entry]", file=desktop_file)
print("Exec=in-snap-exe", file=desktop_file)
print("Icon={}".format(icon), file=desktop_file)
if icon_path is not None:
(tmp_work_path / icon_path).touch()
d = DesktopFile(
snap_name=snap_name,
app_name=app_name,
filename=desktop_file_path,
prime_dir=tmp_work_path.as_posix(),
)
d.write(gui_dir=".")
if icon_path is not None:
d.write(icon_path=icon_path, gui_dir=".")
else:
d.write(gui_dir=".")
expected_desktop_file = tmp_work_path / f"{app_name}.desktop"
assert expected_desktop_file.exists()
with expected_desktop_file.open() as desktop_file:
assert (
desktop_file.read()
== dedent(
"""\
[Desktop Entry]
Exec=foo
Icon={}
"""
).format(expected_icon)
)
def test_generate_desktop_file_multisection(
self, tmp_work_path, icon, icon_path, expected_icon
):
snap_name = app_name = "foo"
desktop_file_path = tmp_work_path / "app.desktop"
with desktop_file_path.open("w") as desktop_file:
print("[Desktop Entry]", file=desktop_file)
print("Exec=in-snap-exe", file=desktop_file)
print("Icon={}".format(icon), file=desktop_file)
print("[Desktop Entry Two]", file=desktop_file)
print("Exec=in-snap-exe2", file=desktop_file)
print("Icon={}".format(icon), file=desktop_file)
if icon_path is not None:
(tmp_work_path / icon_path).touch()
d = DesktopFile(
snap_name=snap_name,
app_name=app_name,
filename=desktop_file_path,
prime_dir=tmp_work_path.as_posix(),
)
if icon_path is not None:
d.write(icon_path=icon_path, gui_dir=".")
else:
d.write(gui_dir=".")
expected_desktop_file = tmp_work_path / f"{app_name}.desktop"
assert expected_desktop_file.exists()
with expected_desktop_file.open() as desktop_file:
assert (
desktop_file.read()
== dedent(
"""\
[Desktop Entry]
Exec=foo
Icon={0}
[Desktop Entry Two]
Exec=foo
Icon={0}
"""
).format(expected_icon)
)
def test_not_found(tmp_path):
with pytest.raises(errors.InvalidDesktopFileError):
DesktopFile(
snap_name="foo",
app_name="foo",
filename="desktop-file-not-found",
prime_dir=tmp_path.as_posix(),
)
def METHOD_NAME(tmp_work_path):
with open("foo.desktop", "w") as desktop_file:
print("[Random Entry]", file=desktop_file)
print("Exec=foo", file=desktop_file)
print("Icon=foo", file=desktop_file)
d = DesktopFile(
snap_name="foo",
app_name="foo",
filename="foo.desktop",
prime_dir=tmp_work_path.as_posix(),
)
with pytest.raises(errors.InvalidDesktopFileError):
d.write(gui_dir=tmp_work_path.as_posix())
def test_missing_exec_entry(tmp_work_path):
with open("foo.desktop", "w") as desktop_file:
print("[Desktop Entry]", file=desktop_file)
print("Icon=foo", file=desktop_file)
d = DesktopFile(
snap_name="foo",
app_name="foo",
filename="foo.desktop",
prime_dir=tmp_work_path.as_posix(),
)
with pytest.raises(errors.InvalidDesktopFileError):
d.write(gui_dir=tmp_work_path.as_posix())
|
4,277 |
check new session
|
import importlib.resources
import logging
import platform
import sys
import textwrap
import time
import tempfile
import cairo
import gi
from gi.repository import Adw, Gdk, Gio, GLib, Gtk, GtkSource, Pango
from gaphor.abc import Service
from gaphor.application import Application, distribution
from gaphor.core import Transaction
from gaphor.core.modeling import Diagram
try:
import pygit2
except ImportError:
pass
log = logging.getLogger(__name__)
class Status:
def __init__(self, name):
self.name = name
self.status = "in progress"
def complete(self):
self.status = "completed"
def skip(self):
self.status = "skipped"
@property
def in_progress(self):
return self.status == "in progress"
@property
def completed(self):
return self.status in ("completed", "skipped")
def __repr__(self):
return f"{self.name}: {self.status}"
def test(func):
"""A test function."""
def wrapper(self):
status = Status(func.__name__)
self.statuses.append(status)
try:
return func(self, status)
except BaseException:
log.exception("Test %s failed", func.__name__)
status.status = "failed"
return wrapper
class SelfTest(Service):
def __init__(self, application: Application):
self.application = application
self.statuses: list[Status] = []
def shutdown(self):
pass
def init(self, gtk_app):
windows_console_output_workaround()
self.init_timer(gtk_app, timeout=30)
self.test_library_versions()
self.test_gsettings_schemas()
self.test_new_session()
self.test_auto_layout()
self.test_git_support()
def init_timer(self, gtk_app, timeout):
start = time.time()
def callback():
if time.time() > start + timeout:
log.error("Tests timed out")
gtk_app.exit_code = 1
elif any(status.in_progress for status in self.statuses):
return GLib.SOURCE_CONTINUE
elif all(status.completed for status in self.statuses):
log.info(
"All tests have been completed in %.1fs",
time.time() - start,
)
else:
log.error("Not all tests have passed")
gtk_app.exit_code = 1
for status in self.statuses:
log.info(status)
gtk_app.quit()
return GLib.SOURCE_REMOVE
GLib.timeout_add(priority=GLib.PRIORITY_LOW, interval=100, function=callback)
@test
def test_library_versions(self, status):
log.info(
"System information:\n\n%s", textwrap.indent(system_information(), "\t")
)
status.complete()
@test
def test_new_session(self, status):
with (importlib.resources.files("gaphor") / "templates" / "uml.gaphor").open(
encoding="utf-8"
) as f:
session = self.application.new_session(template=f)
def METHOD_NAME(session):
main_window = session.get_service("main_window")
if main_window.window and main_window.window.get_visible():
status.complete()
return GLib.SOURCE_REMOVE
else:
return GLib.SOURCE_CONTINUE
GLib.idle_add(METHOD_NAME, session, priority=GLib.PRIORITY_LOW)
@test
def test_gsettings_schemas(self, status):
source = Gio.settings_schema_source_get_default()
if source.lookup("org.gtk.gtk4.Settings.FileChooser", recursive=True):
log.info(
"Schemas found in data dirs: %s",
":".join(GLib.get_system_data_dirs()),
)
status.complete()
else:
log.error(
"Could not find schemas in data dirs: %s",
":".join(GLib.get_system_data_dirs()),
)
log.info("Schemas found: %s %s", *source.list_schemas(True))
@test
def test_auto_layout(self, status):
session = self.application.new_session()
event_manager = session.get_service("event_manager")
element_factory = session.get_service("element_factory")
auto_layout = session.get_service("auto_layout")
with Transaction(event_manager):
diagram = element_factory.create(Diagram)
auto_layout.layout(diagram)
status.complete()
@test
def test_git_support(self, status):
if "pygit2" not in globals():
status.skip()
return
with tempfile.TemporaryDirectory() as temp_dir:
pygit2.init_repository(temp_dir)
status.complete()
def system_information():
return textwrap.dedent(
f"""\
Gaphor version: {distribution().version}
Operating System: {platform.system()} ({platform.release()})
Display: {display_type()}
Python version: {platform.python_version()}
GTK version: {Gtk.get_major_version()}.{Gtk.get_minor_version()}.{Gtk.get_micro_version()}
Adwaita version: {Adw.get_major_version()}.{Adw.get_minor_version()}.{Adw.get_micro_version()}
GtkSourceView version: {gtk_source_view_version()}
Cairo version: {cairo.cairo_version_string()}
Pango version: {Pango.version_string()}
PyGObject version: {gi.__version__}
Pycairo version: {cairo.version}
pygit2/libgit2 version: {"pygit2" in globals() and f"{pygit2.__version__} / {pygit2.LIBGIT2_VERSION}" or "-NONE-"}
"""
)
def display_type():
dm = Gdk.DisplayManager.get()
display = dm.get_default_display()
return display.__class__.__name__ if display else "none"
def gtk_source_view_version():
if hasattr(GtkSource, "get_major_version"):
return f"{GtkSource.get_major_version()}.{GtkSource.get_minor_version()}.{GtkSource.get_micro_version()}"
else:
return "-"
def windows_console_output_workaround():
if sys.platform == "win32":
from gaphor.main import LOG_FORMAT
logging.basicConfig(
level=logging.INFO,
format=LOG_FORMAT,
filename="gaphor-self-test.txt",
filemode="w",
force=True,
encoding="utf-8",
)
|
4,278 |
test out file
|
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import json
import os
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_OUTPUT_SUBDIR = 'json_outfiles'
GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyOne',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'file': u'gtest_xml_outfile1_test_.cc',
u'line': 41,
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyOne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
u'TearDownProp': u'1',
}],
}],
}
EXPECTED_2 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyTwo',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'file': u'gtest_xml_outfile2_test_.cc',
u'line': 41,
u'status': u'RUN',
u'result': u'COMPLETED',
u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
u'TestSomeProperty': u'2',
u'TearDownProp': u'2',
}],
}],
}
class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, '')
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json'))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json'))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self.METHOD_NAME(GTEST_OUTPUT_1_TEST, EXPECTED_1)
def testOutfile2(self):
self.METHOD_NAME(GTEST_OUTPUT_2_TEST, EXPECTED_2)
def METHOD_NAME(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + '.json'
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
if os.path.isfile(output_file1):
with open(output_file1) as f:
actual = json.load(f)
else:
with open(output_file2) as f:
actual = json.load(f)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '0'
gtest_test_utils.Main()
|
4,279 |
find line style index
|
from qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QHBoxLayout, QLabel, QWidget
from ert.gui.plottery import PlotStyle
STYLE_OFF = ("Off", None)
STYLE_AREA = ("Area", "#")
STYLE_SOLID = ("Solid", "-")
STYLE_DASHED = ("Dashed", "--")
STYLE_DOTTED = ("Dotted", ":")
STYLE_DASH_DOTTED = ("Dash dotted", "-.")
STYLESET_DEFAULT = "default"
STYLESET_AREA = "area"
STYLESET_TOGGLE = "toggle_only"
STYLES = {
STYLESET_DEFAULT: [
STYLE_OFF,
STYLE_SOLID,
STYLE_DASHED,
STYLE_DOTTED,
STYLE_DASH_DOTTED,
],
STYLESET_AREA: [
STYLE_OFF,
STYLE_AREA,
STYLE_SOLID,
STYLE_DASHED,
STYLE_DOTTED,
STYLE_DASH_DOTTED,
],
STYLESET_TOGGLE: [STYLE_OFF, STYLE_SOLID],
}
MARKER_OFF = ("Off", None)
MARKER_X = ("X", "x")
MARKER_CIRCLE = ("Circle", "o")
MARKER_POINT = ("Point", ".")
MARKER_PIXEL = ("Pixel", ",")
MARKER_PLUS = ("Plus", "+")
MARKER_STAR = ("Star", "*")
MARKER_DIAMOND = ("Diamond", "D")
MARKER_PENTAGON = ("Pentagon", "p")
MARKER_SQUARE = ("Square", "s")
MARKER_HLINE = ("H Line", "_")
MARKER_VLINE = ("V Line", "|")
MARKER_OCTAGON = ("Octagon", "8")
MARKER_HEXAGON1 = ("Hexagon 1", "h")
MARKER_HEXAGON2 = ("Hexagon 2", "H")
MARKERS = [
MARKER_OFF,
MARKER_X,
MARKER_CIRCLE,
MARKER_POINT,
MARKER_STAR,
MARKER_DIAMOND,
MARKER_PLUS,
MARKER_PENTAGON,
MARKER_SQUARE,
MARKER_OCTAGON,
MARKER_HEXAGON1,
MARKER_HEXAGON2,
]
class StyleChooser(QWidget):
def __init__(self, line_style_set=STYLESET_DEFAULT):
QWidget.__init__(self)
self._style = PlotStyle("StyleChooser internal style")
self._styles = (
STYLES["default"]
if line_style_set not in STYLES
else STYLES[line_style_set]
)
self.setMinimumWidth(140)
self.setMaximumHeight(25)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
self.line_chooser = QComboBox()
self.line_chooser.setToolTip("Select line style.")
for style in self._styles:
self.line_chooser.addItem(*style)
self.marker_chooser = QComboBox()
self.marker_chooser.setToolTip("Select marker style.")
for marker in MARKERS:
self.marker_chooser.addItem(*marker)
self.thickness_spinner = QDoubleSpinBox()
self.thickness_spinner.setToolTip("Line thickness")
self.thickness_spinner.setMinimum(0.1)
self.thickness_spinner.setDecimals(1)
self.thickness_spinner.setSingleStep(0.1)
self.size_spinner = QDoubleSpinBox()
self.size_spinner.setToolTip("Marker size")
self.size_spinner.setMinimum(0.1)
self.size_spinner.setDecimals(1)
self.size_spinner.setSingleStep(0.1)
# the text content of the spinner varies, but shouldn't push the control
# out of boundaries
self.line_chooser.setMinimumWidth(110)
layout.addWidget(self.line_chooser)
layout.addWidget(self.thickness_spinner)
layout.addWidget(self.marker_chooser)
layout.addWidget(self.size_spinner)
self.setLayout(layout)
self.line_chooser.currentIndexChanged.connect(self._updateStyle)
self.marker_chooser.currentIndexChanged.connect(self._updateStyle)
self.thickness_spinner.valueChanged.connect(self._updateStyle)
self.size_spinner.valueChanged.connect(self._updateStyle)
self._updateLineStyleAndMarker(
self._style.line_style,
self._style.marker,
self._style.width,
self._style.size,
)
self._layout = layout
def getItemSizes(self):
line_style_combo_width = self._layout.itemAt(0).sizeHint().width()
thickness_spinner_width = self._layout.itemAt(1).sizeHint().width()
marker_combo_width = self._layout.itemAt(2).sizeHint().width()
size_spinner_width = self._layout.itemAt(3).sizeHint().width()
return (
line_style_combo_width,
thickness_spinner_width,
marker_combo_width,
size_spinner_width,
)
def METHOD_NAME(self, line_style):
for index, style in enumerate(self._styles):
if (style[1] == line_style) or (style[1] is None and line_style == ""):
return index
return -1
def _findMarkerStyleIndex(self, marker):
for index, style in enumerate(MARKERS):
if (style[1] == marker) or (style[1] is None and marker == ""):
return index
return -1
def _updateLineStyleAndMarker(self, line_style, marker, thickness, size):
self.line_chooser.setCurrentIndex(self.METHOD_NAME(line_style))
self.marker_chooser.setCurrentIndex(self._findMarkerStyleIndex(marker))
self.thickness_spinner.setValue(thickness)
self.size_spinner.setValue(size)
def _updateStyle(self):
self.marker_chooser.setEnabled(self.line_chooser.currentText() != "Area")
line_style = self.line_chooser.itemData(self.line_chooser.currentIndex())
marker_style = self.marker_chooser.itemData(self.marker_chooser.currentIndex())
thickness = float(self.thickness_spinner.value())
size = float(self.size_spinner.value())
self._style.line_style = line_style
self._style.marker = marker_style
self._style.width = thickness
self._style.size = size
def setStyle(self, style: PlotStyle):
self._style.copyStyleFrom(style)
self._updateLineStyleAndMarker(
style.line_style, style.marker, style.width, style.size
)
def getStyle(self) -> PlotStyle:
style = PlotStyle("Generated style from StyleChooser")
style.copyStyleFrom(self._style)
return style
def createLabelLayout(self, layout=None):
if layout is None:
layout = QHBoxLayout()
titles = ["Line style", "Width", "Marker style", "Size"]
sizes = self.getItemSizes()
for title, size in zip(titles, sizes):
label = QLabel(title)
label.setFixedWidth(size)
layout.addWidget(label)
return layout
|
4,280 |
get level
|
#!/usr/bin/env python
# datetime:2020/5/22 18:29
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from dongtai_common.models.agent import IastAgent
from dongtai_common.models.header_vulnerablity import IastHeaderVulnerability
from dongtai_common.models.hook_type import HookType
from dongtai_common.models.strategy import IastStrategyModel
from dongtai_common.models.vul_level import IastVulLevel
from dongtai_common.models.vulnerablity import (
IastVulnerabilityModel,
IastVulnerabilityStatus,
)
from dongtai_web.header_vul.base import HeaderVulSerializer
class HeaderVulUrlSerializer(HeaderVulSerializer):
class Meta:
model = IastHeaderVulnerability
fields = ("url",)
class VulSerializer(serializers.ModelSerializer):
language = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
AGENT_LANGUAGE_MAP: dict[int, str] = {}
status = serializers.SerializerMethodField()
is_header_vul = serializers.SerializerMethodField()
header_vul_urls = serializers.SerializerMethodField()
class Meta:
model = IastVulnerabilityModel
fields = [
"id",
"type",
"hook_type_id",
"url",
"uri",
"agent_id",
"level_id",
"http_method",
"top_stack",
"bottom_stack",
"taint_position",
"latest_time",
"first_time",
"language",
"status",
"header_vul_urls",
"is_header_vul",
]
@staticmethod
def split_container_name(name):
if name is None:
return ""
if "/" in name:
return name.split("/")[0].lower().strip()
if " " in name:
names = name.split(" ")[:-1]
return " ".join(names).lower().strip()
return name
@staticmethod
def judge_is_header_vul(strategy_id: int):
if strategy_id in (28, 29, 30, 31, 32):
return True
return False
@staticmethod
def find_all_urls(pk: int):
"""
Only for header vulnerablity.
"""
return HeaderVulUrlSerializer(IastHeaderVulnerability.objects.filter(vul_id=pk).all(), many=True).data
def get_language(self, obj):
if obj["agent_id"] not in self.AGENT_LANGUAGE_MAP:
agent_model = IastAgent.objects.filter(id=obj["agent_id"]).first()
if agent_model:
self.AGENT_LANGUAGE_MAP[obj["agent_id"]] = agent_model.language
return self.AGENT_LANGUAGE_MAP[obj["agent_id"]]
def get_type(self, obj):
hook_type = HookType.objects.filter(pk=obj["hook_type_id"]).first()
hook_type_name = hook_type.name if hook_type else None
strategy = IastStrategyModel.objects.filter(pk=obj["strategy_id"]).first()
strategy_name = strategy.vul_name if strategy else None
type_ = list(filter(lambda x: x is not None, [strategy_name, hook_type_name]))
return type_[0] if type_ else ""
def get_status(self, obj):
status = IastVulnerabilityStatus.objects.filter(pk=obj["status_id"]).first()
return status.name if status else ""
def get_is_header_vul(self, obj):
return VulSerializer.judge_is_header_vul(obj["strategy_id"])
def get_header_vul_urls(self, obj):
if VulSerializer.judge_is_header_vul(obj["strategy_id"]):
return VulSerializer.find_all_urls(obj["id"])
return []
class VulForPluginSerializer(serializers.ModelSerializer):
type = serializers.SerializerMethodField()
level = serializers.SerializerMethodField(help_text=_("The level name of vulnerablity"))
class Meta:
model = IastVulnerabilityModel
fields = [
"id",
"type",
"level_id",
"url",
"http_method",
"top_stack",
"bottom_stack",
"hook_type_id",
"level",
]
def get_type(self, obj):
hook_type = HookType.objects.filter(pk=obj["hook_type_id"]).first()
hook_type_name = hook_type.name if hook_type else None
strategy = IastStrategyModel.objects.filter(pk=obj["strategy_id"]).first()
strategy_name = strategy.vul_name if strategy else None
type_ = list(filter(lambda x: x is not None, [strategy_name, hook_type_name]))
return type_[0] if type_ else ""
def METHOD_NAME(self, obj):
level = IastVulLevel.objects.filter(pk=obj["level_id"]).first()
return level.name_value if level else ""
class VulSummaryLanguageSerializer(serializers.Serializer):
language = serializers.CharField(help_text=_("programming language"))
count = serializers.IntegerField(
help_text=_("The number of vulnerabilities corresponding to the programming language")
)
class VulSummaryLevelSerializer(serializers.Serializer):
level = serializers.CharField(help_text=_("The name of vulnerablity level"))
count = serializers.IntegerField(help_text=_("The number of vulnerabilities corresponding to the level"))
level_id = serializers.IntegerField(help_text=_("The id of vulnerablity level"))
class VulSummaryTypeSerializer(serializers.Serializer):
type = serializers.CharField(help_text=_("The name of vulnerablity type"))
count = serializers.IntegerField(
help_text=_("The number of vulnerabilities corresponding to the vulnerablity type")
)
class VulSummaryProjectSerializer(serializers.Serializer):
project_name = serializers.CharField(help_text=_("The name of the project"))
count = serializers.IntegerField(help_text=_("The number of vulnerabilities corresponding to the project"))
id = serializers.IntegerField(help_text=_("The id of the project"))
class VulSummaryResponseDataSerializer(serializers.Serializer):
language = VulSummaryLanguageSerializer(many=True)
level = VulSummaryLevelSerializer(many=True)
type = VulSummaryTypeSerializer(many=True)
projects = VulSummaryProjectSerializer(many=True)
|
4,281 |
do node
|
# Copyright (C) 2008-2011 Dejan Muhamedagic <[email protected]>
# Copyright (C) 2013 Kristoffer Gronlund <[email protected]>
# See COPYING for license information.
# Revised UI structure for crmsh
#
# Goals:
#
# - Modularity
# - Reduced global state
# - Separate static hierarchy from current context
# - Fix completion
# - Implement bash completion
# - Retain all previous functionality
# - Have per-level pre-requirements:
# def requires(self): <- raise error if prereqs are not met
# This is so that crmsh can be installed with minimal prereqs,
# and use cluster sublevel to install all requirements
from . import command
from . import completers as compl
from . import cmd_status
from . import ui_cib
from . import ui_cibstatus
from . import ui_cluster
from . import ui_configure
from . import ui_corosync
from . import ui_history
from . import ui_maintenance
from . import ui_node
from . import ui_options
from . import ui_ra
from . import ui_resource
from . import ui_script
from . import ui_site
class Root(command.UI):
"""
Root of the UI hierarchy.
"""
# name is the user-visible name of this CLI level.
name = 'root'
@command.level(ui_cib.CibShadow)
@command.help('''manage shadow CIBs
A shadow CIB is a regular cluster configuration which is kept in
a file. The CRM and the CRM tools may manage a shadow CIB in the
same way as the live CIB (i.e. the current cluster configuration).
A shadow CIB may be applied to the cluster in one step.
''')
def do_cib(self):
pass
@command.level(ui_cibstatus.CibStatusUI)
@command.help('''CIB status management and editing
Enter edit and manage the CIB status section level.
''')
def do_cibstatus(self):
pass
@command.level(ui_cluster.Cluster)
@command.help('''Cluster setup and management
Commands at this level enable low-level cluster configuration
management with HA awareness.
''')
def do_cluster(self):
pass
@command.level(ui_configure.CibConfig)
@command.help('''CRM cluster configuration
The configuration level.
Note that you can change the working CIB at the cib level. It is
advisable to configure shadow CIBs and then commit them to the
cluster.
''')
def do_configure(self):
pass
@command.level(ui_corosync.Corosync)
@command.help('''Corosync configuration management
Corosync is the underlying messaging layer for most HA clusters.
This level provides commands for editing and managing the corosync
configuration.
''')
def do_corosync(self):
pass
@command.level(ui_history.History)
@command.help('''CRM cluster history
The history level.
Examine Pacemaker's history: node and resource events, logs.
''')
def do_history(self):
pass
@command.level(ui_maintenance.Maintenance)
@command.help('''maintenance
Commands that should only be executed while in
maintenance mode.
''')
def do_maintenance(self):
pass
@command.level(ui_node.NodeMgmt)
@command.help('''nodes management
A few node related tasks such as node standby are implemented
here.
''')
def METHOD_NAME(self):
pass
@command.level(ui_options.CliOptions)
@command.help('''user preferences
Several user preferences are available. Note that it is possible
to save the preferences to a startup file.
''')
def do_options(self):
pass
@command.level(ui_ra.RA)
@command.help('''resource agents information center
This level contains commands which show various information about
the installed resource agents. It is available both at the top
level and at the `configure` level.
''')
def do_ra(self):
pass
@command.help('''Utility to collect logs and other information
`report` is a utility to collect all information (logs,
configuration files, system information, etc) relevant to
crmsh over the given period of time.
''')
def do_report(self, context, *args):
import sys
from crmsh.report import core
sys.argv[1:] = args
core.run()
@command.level(ui_resource.RscMgmt)
@command.help('''resources management
Everything related to resources management is available at this
level. Most commands are implemented using the crm_resource(8)
program.
''')
def do_resource(self):
pass
@command.level(ui_script.Script)
@command.help('''Cluster scripts
Cluster scripts can perform cluster-wide configuration,
validation and management. See the `list` command for
an overview of available scripts.
''')
def do_script(self):
pass
@command.level(ui_site.Site)
@command.help('''Geo-cluster support
The site level.
Geo-cluster related management.
''')
def do_site(self):
pass
@command.completers(compl.choice(compl.status_option))
@command.help('''show cluster status
Show cluster status. The status is displayed by `crm_mon`. Supply
additional arguments for more information or different format.
See `crm_mon(8)` for more details.
Usage:
...............
status [<option> ...]
option :: bynode | inactive | ops | timing | failcounts
...............
''')
def do_status(self, context, *args):
return cmd_status.cmd_status(args)
@command.help('''Verify cluster state
Performs basic checks for the cluster configuration and
current status, reporting potential issues.
Usage:
.................
verify [scores]
.................
''')
def do_verify(self, context, *args):
return cmd_status.cmd_verify(args)
# this will initialize _children for all levels under the root
Root.init_ui()
# vim:ts=4:sw=4:et:
|
4,282 |
kill
|
# Copyright (c) 2021 Matt Colligan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from pywayland.server import Listener
from wlroots.wlr_types import Output as WlrOutput
from wlroots.wlr_types import SceneTree
from wlroots.wlr_types.layer_shell_v1 import LayerShellV1Layer, LayerSurfaceV1
from libqtile import hook
from libqtile.backend.wayland.output import Output
from libqtile.backend.wayland.window import Static
from libqtile.command.base import expose_command
from libqtile.log_utils import logger
try:
# Continue if ffi not built, so that docs can be built without wayland deps.
from libqtile.backend.wayland._ffi import ffi
except ModuleNotFoundError:
pass
if TYPE_CHECKING:
from typing import Any
from wlroots.wlr_types.scene import SceneLayerSurfaceV1
from libqtile.backend.wayland.core import Core
from libqtile.core.manager import Qtile
from libqtile.utils import ColorsType
class LayerStatic(Static[LayerSurfaceV1]):
"""A static window belonging to the layer shell."""
def __init__(
self,
core: Core,
qtile: Qtile,
surface: LayerSurfaceV1,
wid: int,
):
Static.__init__(self, core, qtile, surface, wid)
self.desired_width = 0
self.desired_height = 0
self.data_handle = ffi.new_handle(self)
surface.data = self.data_handle
# Determine which output this window is to appear on
if wlr_output := surface.output:
logger.debug("Layer surface requested output: %s", wlr_output.name)
else:
wlr_output = cast(
WlrOutput, core.output_layout.output_at(core.cursor.x, core.cursor.y)
)
logger.debug("Layer surface given output: %s", wlr_output.name)
surface.output = wlr_output
output = cast(Output, wlr_output.data)
self.output = output
self.screen = output.screen
# Add the window to the scene graph
parent_tree = core.layer_trees[surface.pending.layer]
self.scene_layer: SceneLayerSurfaceV1 = core.scene.layer_surface_v1_create(
parent_tree, surface
)
self.tree: SceneTree = self.scene_layer.tree
self.tree.node.data = self.data_handle
self.popup_tree = SceneTree.create(parent_tree) # Popups get their own tree
self.popup_tree.node.data = self.data_handle
# Set up listeners
self.add_listener(surface.map_event, self._on_map)
self.add_listener(surface.unmap_event, self._on_unmap)
self.add_listener(surface.destroy_event, self._on_destroy)
self.add_listener(surface.surface.commit_event, self._on_commit)
# Temporarily set the layer's current state to pending so that we can easily
# arrange it. TODO: how much of this is needed?
self._layer = surface.pending.layer
old_state = surface.current
surface.current = surface.pending
self.unhide()
self.output.organise_layers()
surface.current = old_state
self._move_to_layer(old_state.layer)
def _on_commit(self, _listener: Listener, _data: Any) -> None:
if self.surface.output and self.surface.output.data:
output = self.surface.output.data
if output != self.output:
# The window wants to move to a different output.
if self.tree.node.enabled:
self.output.layers[self._layer].remove(self)
output.layers[self._layer].append(self)
self.output = output
pending = self.surface.pending
if (
self._layer != pending.layer
or self._width != pending.desired_width
or self._height != pending.desired_height
):
# The window has changed its desired layer or dimensions.
self._move_to_layer(pending.layer)
def _move_to_layer(self, layer: LayerShellV1Layer) -> None:
new_parent = self.core.layer_trees[layer]
self.tree.node.reparent(new_parent)
self.popup_tree.node.reparent(new_parent)
if self.tree.node.enabled:
# If we're mapped, we also need to update the lists on the output.
self.output.layers[self._layer].remove(self)
self.output.layers[layer].append(self)
self.output.organise_layers()
self._layer = layer
def finalize(self) -> None:
super().finalize()
self.popup_tree.node.destroy()
def METHOD_NAME(self) -> None:
self.surface.destroy()
def hide(self) -> None:
if self.core.exclusive_layer is self:
self.core.exclusive_layer = None
if self.reserved_space:
self.qtile.free_reserved_space(self.reserved_space, self.screen)
self.reserved_space = None
if self.surface.surface == self.core.seat.keyboard_state.focused_surface:
group = self.qtile.current_screen.group
if group.current_window:
group.focus(group.current_window, warp=self.qtile.config.cursor_warp)
else:
self.core.seat.keyboard_clear_focus()
if self in self.output.layers[self._layer]:
self.tree.node.set_enabled(enabled=False)
# TODO also toggle popup_tree
self.output.layers[self._layer].remove(self)
self.output.organise_layers()
def unhide(self) -> None:
if self not in self.output.layers[self._layer]:
self.tree.node.set_enabled(enabled=True)
self.output.layers[self._layer].append(self)
self.output.organise_layers()
def focus(self, _warp: bool = True) -> None:
self.core.focus_window(self)
hook.fire("client_focus", self)
def place(
self,
x: int,
y: int,
width: int,
height: int,
borderwidth: int,
bordercolor: ColorsType | None,
above: bool = False,
margin: int | list[int] | None = None,
respect_hints: bool = False,
) -> None:
self.x = x
self.y = y
self.tree.node.set_position(x, y)
self.popup_tree.node.set_position(x, y)
# The actual resizing is done by `Output`.
self._width = width
self._height = height
@expose_command()
def bring_to_front(self) -> None:
self.tree.node.raise_to_top()
|
4,283 |
test file with missing final nl
|
"""
Test script for the 'cmd' module
Original by Michael Schneider
"""
import cmd
import sys
from test import test_support
import re
import unittest
import StringIO
class samplecmdclass(cmd.Cmd):
"""
Instance the sampleclass:
>>> mycmd = samplecmdclass()
Test for the function parseline():
>>> mycmd.parseline("")
(None, None, '')
>>> mycmd.parseline("?")
('help', '', 'help ')
>>> mycmd.parseline("?help")
('help', 'help', 'help help')
>>> mycmd.parseline("!")
('shell', '', 'shell ')
>>> mycmd.parseline("!command")
('shell', 'command', 'shell command')
>>> mycmd.parseline("func")
('func', '', 'func')
>>> mycmd.parseline("func arg1")
('func', 'arg1', 'func arg1')
Test for the function onecmd():
>>> mycmd.onecmd("")
>>> mycmd.onecmd("add 4 5")
9
>>> mycmd.onecmd("")
9
>>> mycmd.onecmd("test")
*** Unknown syntax: test
Test for the function emptyline():
>>> mycmd.emptyline()
*** Unknown syntax: test
Test for the function default():
>>> mycmd.default("default")
*** Unknown syntax: default
Test for the function completedefault():
>>> mycmd.completedefault()
This is the completedefault methode
>>> mycmd.completenames("a")
['add']
Test for the function completenames():
>>> mycmd.completenames("12")
[]
>>> mycmd.completenames("help")
['help']
Test for the function complete_help():
>>> mycmd.complete_help("a")
['add']
>>> mycmd.complete_help("he")
['help']
>>> mycmd.complete_help("12")
[]
>>> sorted(mycmd.complete_help(""))
['add', 'exit', 'help', 'shell']
Test for the function do_help():
>>> mycmd.do_help("testet")
*** No help on testet
>>> mycmd.do_help("add")
help text for add
>>> mycmd.onecmd("help add")
help text for add
>>> mycmd.do_help("")
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
Test for the function print_topics():
>>> mycmd.print_topics("header", ["command1", "command2"], 2 ,10)
header
======
command1
command2
<BLANKLINE>
Test for the function columnize():
>>> mycmd.columnize([str(i) for i in xrange(20)])
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
>>> mycmd.columnize([str(i) for i in xrange(20)], 10)
0 7 14
1 8 15
2 9 16
3 10 17
4 11 18
5 12 19
6 13
This is an interactive test, put some commands in the cmdqueue attribute
and let it execute
This test includes the preloop(), postloop(), default(), emptyline(),
parseline(), do_help() functions
>>> mycmd.use_rawinput=0
>>> mycmd.cmdqueue=["", "add", "add 4 5", "help", "help add","exit"]
>>> mycmd.cmdloop()
Hello from preloop
help text for add
*** invalid number of arguments
9
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
help text for add
Hello from postloop
"""
def preloop(self):
print "Hello from preloop"
def postloop(self):
print "Hello from postloop"
def completedefault(self, *ignored):
print "This is the completedefault methode"
return
def complete_command(self):
print "complete command"
return
def do_shell(self, s):
pass
def do_add(self, s):
l = s.split()
if len(l) != 2:
print "*** invalid number of arguments"
return
try:
l = [int(i) for i in l]
except ValueError:
print "*** arguments should be numbers"
return
print l[0]+l[1]
def help_add(self):
print "help text for add"
return
def do_exit(self, arg):
return True
class TestAlternateInput(unittest.TestCase):
class simplecmd(cmd.Cmd):
def do_print(self, args):
print >>self.stdout, args
def do_EOF(self, args):
return True
class simplecmd2(simplecmd):
def do_EOF(self, args):
print >>self.stdout, '*** Unknown syntax: EOF'
return True
def METHOD_NAME(self):
input = StringIO.StringIO("print test\nprint test2")
output = StringIO.StringIO()
cmd = self.simplecmd(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) "))
def test_input_reset_at_EOF(self):
input = StringIO.StringIO("print test\nprint test2")
output = StringIO.StringIO()
cmd = self.simplecmd2(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) *** Unknown syntax: EOF\n"))
input = StringIO.StringIO("print \n\n")
output = StringIO.StringIO()
cmd.stdin = input
cmd.stdout = output
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) \n"
"(Cmd) \n"
"(Cmd) *** Unknown syntax: EOF\n"))
def test_main(verbose=None):
from test import test_cmd
test_support.run_doctest(test_cmd, verbose)
test_support.run_unittest(TestAlternateInput)
def test_coverage(coverdir):
trace = test_support.import_module('trace')
tracer=trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,],
trace=0, count=1)
tracer.run('reload(cmd);test_main()')
r=tracer.results()
print "Writing coverage results..."
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if __name__ == "__main__":
if "-c" in sys.argv:
test_coverage('/tmp/cmd.cover')
elif "-i" in sys.argv:
samplecmdclass().cmdloop()
else:
test_main()
|
4,284 |
get subvolume output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSubvolumeResult',
'AwaitableGetSubvolumeResult',
'get_subvolume',
'get_subvolume_output',
]
@pulumi.output_type
class GetSubvolumeResult:
"""
Subvolume Information properties
"""
def __init__(__self__, id=None, name=None, parent_path=None, path=None, provisioning_state=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parent_path and not isinstance(parent_path, str):
raise TypeError("Expected argument 'parent_path' to be a str")
pulumi.set(__self__, "parent_path", parent_path)
if path and not isinstance(path, str):
raise TypeError("Expected argument 'path' to be a str")
pulumi.set(__self__, "path", path)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parentPath")
def parent_path(self) -> Optional[str]:
"""
parent path to the subvolume
"""
return pulumi.get(self, "parent_path")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Path to the subvolume
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSubvolumeResult(GetSubvolumeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubvolumeResult(
id=self.id,
name=self.name,
parent_path=self.parent_path,
path=self.path,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_subvolume(account_name: Optional[str] = None,
pool_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
subvolume_name: Optional[str] = None,
volume_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubvolumeResult:
"""
Returns the path associated with the subvolumeName provided
:param str account_name: The name of the NetApp account
:param str pool_name: The name of the capacity pool
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str subvolume_name: The name of the subvolume.
:param str volume_name: The name of the volume
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['poolName'] = pool_name
__args__['resourceGroupName'] = resource_group_name
__args__['subvolumeName'] = subvolume_name
__args__['volumeName'] = volume_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:netapp/v20230501:getSubvolume', __args__, opts=opts, typ=GetSubvolumeResult).value
return AwaitableGetSubvolumeResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
parent_path=pulumi.get(__ret__, 'parent_path'),
path=pulumi.get(__ret__, 'path'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_subvolume)
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subvolume_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubvolumeResult]:
"""
Returns the path associated with the subvolumeName provided
:param str account_name: The name of the NetApp account
:param str pool_name: The name of the capacity pool
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str subvolume_name: The name of the subvolume.
:param str volume_name: The name of the volume
"""
...
|
4,285 |
match case block
|
"""Match hook."""
from typing import Union, Optional, Any
import re
from tackle.models import BaseHook, Context, Field
from tackle.parser import walk_element
from tackle.render import render_string
from tackle.exceptions import HookCallException
class MatchHook(BaseHook):
"""
Hook for match / case statements. Takes a dict where the keys are matched to a
value. If the case value has an arrow in it (ie key->: ... ) the arrow is stripped
away. All matched values are ran as hooks.
"""
hook_type: str = 'match'
value: str = Field(
..., render_by_default=True, description="The value to match against."
)
case: dict = Field(
...,
description="A dictionary where the keys are cases to be matched. Runs hooks "
"if present.",
)
args: list = ['value']
skip_output: bool = True
_render_exclude = {'case'}
_docs_order = 3
def run_key(self, value):
if self.temporary_context is None:
self.temporary_context = {}
tmp_context = Context(
input_context=value,
key_path=self.key_path.copy(),
key_path_block=self.key_path.copy(),
public_hooks=self.public_hooks,
private_hooks=self.private_hooks,
public_context=self.public_context,
private_context=self.private_context,
temporary_context=self.temporary_context,
existing_context=self.existing_context,
no_input=self.no_input,
calling_directory=self.calling_directory,
calling_file=self.calling_file,
verbose=self.verbose,
override_context=self.override_context,
)
walk_element(context=tmp_context, element=value.copy())
return tmp_context.public_context
def block_macro(self, key: str, val: dict) -> dict:
"""Take matched input dict and create a `block` hook to parse."""
# Remove the merge which will be inserted into the parsed block hook.
merge = self.merge if 'merge' not in val else val['merge']
if merge:
# Do this because arrows can stack up and mess up merge
self.key_path = self.key_path[:-1]
# We now don't want the hook to be merged
self.merge = False
output = {
key[-2:]: 'block',
'merge': merge,
'items': {},
}
# Have a collection of fields that are part of the base.
aliases = [v.alias for _, v in BaseHook.__fields__.items()] + ['->', '_>']
for k, v in val.items():
if k not in aliases:
# Set the keys under the `items` key per the block hook's input
output['items'].update({k: v})
else:
output.update({k: v})
return output
def match_case(self, v: Any):
# Normal dicts
if isinstance(v, dict) and not ('->' in v or '_>' in v):
# TODO: Determine if `match` hook should parse dictionaries by default
# https://github.com/sudoblockio/tackle/issues/160
# This will change to something like this but k will have a `->` suffixed:
# return self.run_key(self.block_macro(k, v))
self.skip_output = False
return v
# Dicts that are expanded hooks
elif isinstance(v, dict):
return self.run_key(v)
elif isinstance(v, (str, int)):
self.skip_output = False
return render_string(self, v)
self.skip_output = False
return v
def METHOD_NAME(self, k: str, v: Any):
# Return the value indexed without arrow
if isinstance(v, str):
return self.run_key({k[:-2]: {k[-2:]: v + ' --merge'}})
elif isinstance(v, dict):
# We are in a block
return self.run_key(self.block_macro(k, v))
else:
raise HookCallException(
f"Matched value must be of type string or dict, not {v}.",
context=self,
) from None
def exec(self) -> Optional[Union[dict, list]]:
default_value = None
default_key = None
# Condition catches everything except expanded hook calls and blocks (ie key->)
for k, v in self.case.items():
if k in ['_', '_->']:
default_value = v
default_key = k
# Save this value for later in case nothing is matched
continue
try:
_match = re.fullmatch(k, self.value)
except re.error as e:
raise HookCallException(
f"Error in match hook case '{k}'\n{e}\nMalformed regex. Must "
f"with python's `re` module syntax.",
context=self,
) from None
if _match:
return self.match_case(v=v)
# TODO: This regex needs to be modified to not match empty hooks
# ie - `->`: x - should not match everything
# Case where we have an arrow in a key - ie `key->: ...`
elif re.fullmatch(k[:-2], self.value) and k[-2:] in ('->', '_>'):
return self.METHOD_NAME(k, v)
if default_key is not None:
if '->' in default_key or '_>' in default_key:
return self.METHOD_NAME(k=default_key, v=default_value)
return self.match_case(v=default_value)
raise HookCallException(
f"Value `{self.value}` not found in "
f"{' ,'.join([i for i in list(self.case)])}",
context=self,
) from None
|
4,286 |
commands for
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Manages the details on the images used in the various stages."""
import json
import os.path
import shlex
import sys
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack.util.git
#: Global variable used to cache in memory the content of images.json
_data = None
def data():
"""Returns a dictionary with the static data on the images.
The dictionary is read from a JSON file lazily the first time
this function is called.
"""
global _data
if not _data:
json_dir = os.path.abspath(os.path.dirname(__file__))
json_file = os.path.join(json_dir, "images.json")
with open(json_file) as f:
_data = json.load(f)
return _data
def build_info(image, spack_version):
"""Returns the name of the build image and its tag.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
spack_version (str): version of Spack that we want to use to build
Returns:
A tuple with (image_name, image_tag) for the build image
"""
# Don't handle error here, as a wrong image should have been
# caught by the JSON schema
image_data = data()["images"][image]
build_image = image_data.get("build", None)
if not build_image:
return None, None
# Translate version from git to docker if necessary
build_tag = image_data["build_tags"].get(spack_version, spack_version)
return build_image, build_tag
def os_package_manager_for(image):
"""Returns the name of the OS package manager for the image
passed as argument.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
Returns:
Name of the package manager, e.g. "apt" or "yum"
"""
name = data()["images"][image]["os_package_manager"]
return name
def all_bootstrap_os():
"""Return a list of all the OS that can be used to bootstrap Spack"""
return list(data()["images"])
def METHOD_NAME(package_manager):
"""Returns the commands used to update system repositories, install
system packages and clean afterwards.
Args:
package_manager (str): package manager to be used
Returns:
A tuple of (update, install, clean) commands.
"""
info = data()["os_package_managers"][package_manager]
return info["update"], info["install"], info["clean"]
def bootstrap_template_for(image):
return data()["images"][image]["bootstrap"]["template"]
def _verify_ref(url, ref, enforce_sha):
# Do a checkout in a temporary directory
msg = 'Cloning "{0}" to verify ref "{1}"'.format(url, ref)
tty.info(msg, stream=sys.stderr)
git = spack.util.git.git(required=True)
with fs.temporary_dir():
git("clone", "-q", url, ".")
sha = git(
"rev-parse", "-q", ref + "^{commit}", output=str, error=os.devnull, fail_on_error=False
)
if git.returncode:
msg = '"{0}" is not a valid reference for "{1}"'
raise RuntimeError(msg.format(sha, url))
if enforce_sha:
ref = sha.strip()
return ref
def checkout_command(url, ref, enforce_sha, verify):
"""Return the checkout command to be used in the bootstrap phase.
Args:
url (str): url of the Spack repository
ref (str): either a branch name, a tag or a commit sha
enforce_sha (bool): if true turns every
verify (bool):
"""
url = url or "https://github.com/spack/spack.git"
ref = ref or "develop"
enforce_sha, verify = bool(enforce_sha), bool(verify)
# If we want to enforce a sha or verify the ref we need
# to checkout the repository locally
if enforce_sha or verify:
ref = _verify_ref(url, ref, enforce_sha)
return " && ".join(
[
"git init --quiet",
f"git remote add origin {shlex.quote(url)}",
f"git fetch --depth=1 origin {shlex.quote(ref)}",
"git checkout --detach FETCH_HEAD",
]
)
|
4,287 |
load image
|
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import argparse
import onnx
import yaml
from pycocotools.coco import COCO
from pycocotools.mask import iou, encode
import numpy as np
from torchvision import transforms
from PIL import Image
from onnx import numpy_helper
import os
import onnxruntime
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.WARN)
logger.info("Evaluating ONNXRuntime full precision accuracy and performance:")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model_path',
type=str,
help="Pre-trained model on onnx file"
)
parser.add_argument(
'--label_path',
type=str,
help="Annotation file path"
)
parser.add_argument(
'--data_path',
type=str,
help="Path to val2017 of COCO"
)
parser.add_argument(
'--benchmark',
action='store_true', \
default=False
)
parser.add_argument(
'--tune',
action='store_true', \
default=False,
help="whether quantize the model"
)
parser.add_argument(
'--config',
type=str,
help="config yaml path"
)
parser.add_argument(
'--output_model',
type=str,
help="output model path"
)
parser.add_argument(
'--mode',
type=str,
help="benchmark mode of performance or accuracy"
)
args = parser.parse_args()
# key = COCO id, value = Pascal VOC id
COCO_TO_VOC = {
1: 15, # person
2: 2, # bicycle
3: 7, # car
4: 14, # motorbike
5: 1, # airplane
6: 6, # bus
7: 19, # train
9: 4, # boat
16: 3, # bird
17: 8, # cat
18: 12, # dog
19: 13, # horse
20: 17, # sheep
21: 10, # cow
44: 5, # bottle
62: 9, # chair
63: 18, # couch/sofa
64: 16, # potted plant
67: 11, # dining table
72: 20, # tv
}
VOC_CAT_IDS = list(COCO_TO_VOC.keys())
cocoGt = COCO(str(args.label_path))
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
class Dataset:
def __init__(self):
imgIds = self.getImgIdsUnion(cocoGt, VOC_CAT_IDS)
self.data = []
for imgId in imgIds:
img_path = os.path.join(args.data_path, cocoGt.imgs[imgId]['file_name'])
if os.path.exists(img_path):
input_tensor = self.METHOD_NAME(img_path)
_, height, width = input_tensor.shape
output_tensor = np.zeros((21, height, width), dtype=np.uint8)
annIds = cocoGt.getAnnIds(imgId, VOC_CAT_IDS)
for ann in cocoGt.loadAnns(annIds):
mask = cocoGt.annToMask(ann)
output_tensor[COCO_TO_VOC[ann['category_id']]] |= mask
# Set everything not labeled to be background
output_tensor[0] = 1 - np.max(output_tensor, axis=0)
self.data.append((input_tensor, output_tensor))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def getImgIdsUnion(self, gt, catIds):
"""
Returns all the images that have *any* of the categories in `catIds`,
unlike the built-in `gt.getImgIds` which returns all the images containing
*all* of the categories in `catIds`.
"""
imgIds = set()
for catId in catIds:
imgIds |= set(gt.catToImgs[catId])
return list(imgIds)
def METHOD_NAME(self, img_path):
input_image = Image.open(img_path).convert('RGB')
input_tensor = preprocess(input_image)
input_tensor = input_tensor.detach().cpu().numpy()
return input_tensor
def iou(model_tensor, target_tensor):
# Don't include the background when summing
model_tensor = model_tensor[:, 1:, :, :]
target_tensor = target_tensor[:, 1:, :, :]
intersection = np.sum(np.logical_and(model_tensor, target_tensor))
union = np.sum(np.logical_or(model_tensor, target_tensor))
if union == 0:
# Can only happen if nothing was there and nothing was predicted,
# which is a perfect score
return 1
else:
return intersection / union
def evaluate(model, dataloader):
totalIoU = 0
sess = onnxruntime.InferenceSession(model.SerializeToString(), None)
idx = 1
for input_tensor, target_tensor in dataloader:
input_tensor = input_tensor[np.newaxis, ...]
target_tensor = target_tensor[np.newaxis, ...]
model_tensor = sess.run(["out"], {"input": input_tensor})[0]
batch_size, nclasses, height, width = model_tensor.shape
raw_labels = np.argmax(model_tensor, axis=1).astype(np.uint8)
output_tensor = np.zeros((nclasses, batch_size, height, width), dtype=np.uint8)
for c in range(nclasses):
output_tensor[c][raw_labels==c] = 1
output_tensor = np.transpose(output_tensor, [1, 0, 2, 3])
totalIoU += iou(output_tensor, target_tensor)
idx += 1
return totalIoU / idx
if __name__ == "__main__":
from neural_compressor.experimental import common
ds = Dataset()
dataloader = common.DataLoader(ds)
model = onnx.load(args.model_path)
def eval(model):
return evaluate(model, ds)
if args.benchmark and args.mode == "accuracy":
results = eval(model)
print("Batch size = 1")
print("Accuracy: %.5f" % results)
if args.benchmark and args.mode == "performance":
from neural_compressor.experimental import Benchmark, common
evaluator = Benchmark(args.config)
evaluator.model = common.Model(model)
evaluator.b_dataloader = common.DataLoader(ds)
evaluator(args.mode)
|
4,288 |
which
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import time
import os
class Timed:
def __init__(self, t, name, unprinted_runtime=False):
self.t = t
self.name = name
self.start = None
self.unprinted_runtime = unprinted_runtime
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
end = time.time()
self.t.add_runtime(
self.name,
end - self.start,
unprinted_runtime=self.unprinted_runtime
)
def get_vivado_max_freq(report_file):
processing = False
group = ""
delay = ""
requirement = ""
freq = 0
freqs = dict()
path_type = None
with open(report_file, 'r') as fp:
for l in fp:
if l.startswith("Slack"):
if '(MET)' in l:
violation = 0.0
else:
violation = float(
l.split(':')[1].split()[0].strip().strip('ns')
)
processing = True
if processing is True:
fields = l.split()
if len(fields) > 1 and fields[1].startswith('----'):
processing = False
# check if this is a timing we want
if group not in requirement.split():
continue
if group not in freqs:
freqs[group] = dict()
freqs[group]['actual'] = freq
freqs[group]['requested'] = requested_freq
freqs[group]['met'] = freq >= requested_freq
freqs[group]['{}_violation'.format(path_type.lower())
] = violation
path_type = None
if path_type is not None:
freqs[group]['{}_violation'.format(path_type.lower())
] = violation
data = l.split(':')
if len(data) > 1:
if data[0].strip() == 'Data Path Delay':
delay = data[1].split()[0].strip('ns')
freq = 1e9 / float(delay)
if data[0].strip() == 'Path Group':
group = data[1].strip()
if data[0].strip() == 'Requirement':
requirement = data[1].strip()
r = float(requirement.split()[0].strip('ns'))
if r != 0.0:
requested_freq = 1e9 / r
if data[0].strip() == 'Path Type':
ptype = data[1].strip()
if path_type != ptype.split()[0]:
path_type = ptype.split()[0]
for cd in freqs:
freqs[cd]['actual'] = float("{:.3f}".format(freqs[cd]['actual'] / 1e6))
freqs[cd]['requested'] = float(
"{:.3f}".format(freqs[cd]['requested'] / 1e6)
)
return freqs
def get_yosys_resources(yosys_log):
with open(yosys_log, "r") as f:
data = f.readlines()
resources = dict()
print_stats = False
proc_cells = False
for line in data:
print_stats = "Printing statistics" in line or print_stats
if not print_stats:
continue
if proc_cells and line.strip():
cell, count = line.split()
resources[cell] = count
proc_cells = ("Number of cells" in line or proc_cells) and line.strip()
return resources
def have_exec(mybin):
return METHOD_NAME(mybin) != None
# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def METHOD_NAME(program, get_dir=False):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
if get_dir:
return path
else:
return exe_file
return None
def safe_get_dict_value(dict, key, default):
if key in dict:
return dict[key]
else:
return default
def get_file_dict(file_name, file_type):
return dict(name=os.path.realpath(file_name), file_type=file_type)
def removeprefix(string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
else:
return string[:]
def removesuffix(string, suffix):
if suffix and string.endswith(suffix):
return string[:-len(suffix)]
else:
return string[:]
|
4,289 |
get blocks
|
import logging
import re
import numpy as np
import sympy
from pysb.bng import generate_equations
from pysb.core import Expression
from pysb.logging import setup_logger
from pysb.simulator.base import Simulator, SimulatorException
class SSABase(Simulator):
_supports = {'multi_initials': True, 'multi_param_values': True}
def __init__(self, model, verbose=False, tspan=None, **kwargs):
super(SSABase, self).__init__(model, verbose, **kwargs)
generate_equations(self._model)
self.tspan = tspan
self.verbose = verbose
# private attribute
self._parameter_number = len(self._model.parameters)
self._n_species = len(self._model.species)
self._n_reactions = len(self._model.reactions)
self._step_0 = True
self.num_sim = None
if verbose:
setup_logger(logging.INFO)
def _get_template_args(self):
""" converts pysb reactions to pycuda/pyopencl format """
p = re.compile('\s')
stoich_matrix = self.model.stoichiometry_matrix.toarray()
params_names = [g.name for g in self._model.parameters]
_reaction_number = len(self._model.reactions)
stoich_string = ''
l_lim = self._n_species - 1
r_lim = self._n_reactions - 1
for i in range(0, self._n_reactions):
for j in range(0, len(stoich_matrix)):
stoich_string += "%s" % repr(stoich_matrix[j][i])
if not (i == l_lim and j == r_lim):
stoich_string += ','
stoich_string += '\n'
output_string = ''
expr_strings = {
e.name: '(%s)' % sympy.ccode(
e.expand_expr(expand_observables=True)
) for e in self.model.expressions}
for n, rxn in enumerate(self._model.reactions):
output_string += "\th[%s] = " % repr(n)
rate = sympy.fcode(rxn["rate"])
rate = re.sub('d0', '', rate)
rate = p.sub('', rate)
# Create expression strings with observables
# expand only expressions used in the rate eqn
for e in {sym for sym in rxn["rate"].atoms()
if isinstance(sym, Expression)}:
rate = re.sub(r'\b%s\b' % e.name,
expr_strings[e.name],
rate)
# replace x**2 with (x-1)*x
pattern = "(_{2}s\d+)\*\*(\d+)"
matches = re.findall(pattern, rate)
for m in matches:
repl = m[0]
for i in range(1, int(m[1])):
repl += "*(%s-%d)" % (m[0], i)
rate = re.sub(pattern, repl, rate)
# replace species string with matrix index (`_si` with `y[i]`)
rate = re.sub(r'_{2}s(\d+)', lambda m: 'y[%s]' % (int(m.group(1))),
rate)
# replace param names with vector notation
for q, prm in enumerate(params_names):
rate = re.sub(r'\b(%s)\b' % prm, 'param_vec[%s]' % q, rate)
# Calculate the fast approximate, better performance on GPUs
rate = rate.replace('pow', 'powf')
# If a parameter is a float and appears first, the result output
# will lose precision. Casting to double ensures precision
rate = '(double)' + rate
output_string += rate + ";\n"
return dict(n_species=self._n_species, n_params=self._parameter_number,
n_reactions=_reaction_number, propensities=output_string,
stoch=stoich_string)
def run(self, tspan=None, param_values=None, initials=None, number_sim=0):
num_sim = int(number_sim)
# check for proper arguments
if param_values is None and initials is None and not num_sim:
raise SimulatorException("Please provide a multi-dimension set of "
"parameters, initials, or number_sim>0")
elif param_values is None and not num_sim:
self.initials = initials
num_sim = self.initials.shape[0]
elif initials is None and not num_sim:
self.param_values = param_values
num_sim = self.param_values.shape[0]
if param_values is None and initials is None:
# Run simulation using same param_values
# initials will be taken care of on SimulatorBase side
param_values = np.repeat(self.param_values, num_sim, axis=0)
elif isinstance(param_values, np.ndarray) and len(
param_values.shape) == 1:
# initials taken care of on SimulatorBase side
param_values = np.repeat([param_values], num_sim, axis=0)
elif isinstance(initials, np.ndarray) and len(initials.shape) == 1:
# parameters taken care of on SimulatorBase side
initials = np.repeat([initials], num_sim, axis=0)
self.num_sim = num_sim
super(SSABase, self).run(tspan=tspan, initials=initials,
param_values=param_values,
_run_kwargs=locals())
@staticmethod
def METHOD_NAME(n_simulations, threads_per_block):
# Choosing the number of blocks and threads per block depends on the
# hardware warpsize (32 for NVIDIA, 64 for AMD), and number of
# simulations.
# CUDA CURAND limits it to 256, so that's the hard limit we set.
# We want to number of blocks to be a multiple of the
# threads_per_block, so we saturate the GPU equally.
# If the number of simulations isn't a multiple of it,
# we just make the number of blocks slightly bigger, then fill
# the rest of the space with zeros, which instantly finishes.
max_tpb = 256
if threads_per_block > max_tpb:
# Limit of 256 threads per block from curand
threads_per_block = max_tpb
if n_simulations < max_tpb:
block_count = 1
threads_per_block = max_tpb
elif n_simulations % threads_per_block == 0:
block_count = int(n_simulations // threads_per_block)
else:
block_count = int(n_simulations // threads_per_block + 1)
return block_count, threads_per_block
|
4,290 |
build legacy tx
|
from __future__ import annotations
from typing import Sequence, Optional, Union, Dict, Any, Set, List, NewType
import abc
import solders.hash
import solders.keypair
import solders.pubkey
import solders.instruction
import solders.signature
import solders.transaction
from .errors import SolTxSizeError
SolTxIx = solders.instruction.Instruction
SolAccountMeta = solders.instruction.AccountMeta
SolBlockHash = solders.hash.Hash
SolAccount = solders.keypair.Keypair
SolSig = solders.signature.Signature
SolPubKey = solders.pubkey.Pubkey
SolTxReceipt = Dict[str, Any]
_SoldersLegacyTx = solders.transaction.Transaction
_SoldersLegacyMsg = solders.message.Message
_SolPktDataSize = 1280 - 40 - 8
class SolCommit:
Type = NewType('SolCommit', str)
NotProcessed = Type('not-processed')
Processed = Type('processed')
Confirmed = Type('confirmed')
Safe = Type('safe') # optimistic-finalized => 2/3 of validators
Finalized = Type('finalized')
Order = [NotProcessed, Processed, Confirmed, Safe, Finalized]
@staticmethod
def level(commitment: Type) -> int:
for index, value in enumerate(SolCommit.Order):
if value == commitment:
return index
assert False, 'Wrong commitment'
@staticmethod
def upper_set(commitment: Type) -> Set[Type]:
level = SolCommit.level(commitment)
return set(SolCommit.Order[level:])
@staticmethod
def lower_set(commitment: Type) -> Set[Type]:
level = SolCommit.level(commitment)
return set(SolCommit.Order[:level])
@staticmethod
def to_solana(commitment: Type) -> Type:
if commitment == SolCommit.NotProcessed:
return SolCommit.Processed
elif commitment == SolCommit.Safe:
return SolCommit.Confirmed
elif commitment in {SolCommit.Processed, SolCommit.Confirmed, SolCommit.Finalized}:
return commitment
assert False, 'Wrong commitment'
class SolTx(abc.ABC):
_empty_block_hash = SolBlockHash.default()
def __init__(self, name: str, ix_list: Optional[Sequence[SolTxIx]]) -> None:
self._name = name
self._is_signed = False
self._is_cloned = False
self._solders_legacy_tx = self.METHOD_NAME(recent_block_hash=None, ix_list=ix_list)
@property
def name(self) -> str:
return self._name
def is_empty(self) -> bool:
return len(self._solders_legacy_tx.message.instructions) == 0
def is_cloned(self) -> bool:
return self._is_cloned
@property
def recent_block_hash(self) -> Optional[SolBlockHash]:
block_hash = self._solders_legacy_tx.message.recent_blockhash
if block_hash == self._empty_block_hash:
return None
return block_hash
@recent_block_hash.setter
def recent_block_hash(self, value: Optional[SolBlockHash]) -> None:
ix_list = self._decode_ix_list()
self._solders_legacy_tx = self.METHOD_NAME(recent_block_hash=value, ix_list=ix_list)
@property
def ix_list(self) -> List[SolTxIx]:
return self._decode_ix_list()
@property
def fee_payer(self) -> Optional[SolPubKey]:
acct_key_list = self._solders_legacy_tx.message.account_keys
return acct_key_list[0] if acct_key_list else None
@fee_payer.setter
def fee_payer(self, value: SolPubKey) -> None:
block_hash = self.recent_block_hash
ix_list = self._decode_ix_list(value)
self._solders_legacy_tx = self.METHOD_NAME(recent_block_hash=block_hash, ix_list=ix_list)
def add(self, *args: Union[SolTx, SolTxIx]) -> SolTx:
ix_list = self._decode_ix_list()
for arg in args:
if isinstance(arg, SolTxIx):
ix_list.append(arg)
elif isinstance(arg, SolTx):
ix_list.extend(arg._decode_ix_list())
else:
raise ValueError('invalid instruction:', arg)
block_hash = self.recent_block_hash
self._solders_legacy_tx = self.METHOD_NAME(recent_block_hash=block_hash, ix_list=ix_list)
return self
def serialize(self) -> bytes:
assert self._is_signed, 'transaction has not been signed'
result = self._serialize()
if len(result) > _SolPktDataSize:
raise SolTxSizeError(len(result), _SolPktDataSize)
return result
def sign(self, signer: SolAccount) -> None:
if signer.pubkey() != self.fee_payer:
self.fee_payer = signer.pubkey()
self._sign(signer)
self._is_signed = True
def validate(self, signer: SolAccount):
tx = self._clone()
tx.recent_block_hash = SolBlockHash.from_string('4NCYB3kRT8sCNodPNuCZo8VUh4xqpBQxsxed2wd9xaD4')
tx.sign(signer)
tx.serialize() # <- there will be exception
def clone(self) -> SolTx:
tx = self._clone()
self._is_cloned = True
return tx
def METHOD_NAME(self, recent_block_hash: Optional[SolBlockHash],
ix_list: Optional[Sequence[SolTxIx]]) -> _SoldersLegacyTx:
self._is_signed = False
if recent_block_hash is None:
recent_block_hash = SolBlockHash.default()
if ix_list is None:
ix_list: List[SolTxIx] = list()
fee_payer: Optional[SolPubKey] = None
for ix in ix_list:
for acct_meta in ix.accounts:
if acct_meta.is_signer:
fee_payer = acct_meta.pubkey
break
msg = _SoldersLegacyMsg.new_with_blockhash(ix_list, fee_payer, recent_block_hash)
return _SoldersLegacyTx.new_unsigned(msg)
def _decode_ix_list(self, signer: Optional[SolPubKey] = None) -> List[SolTxIx]:
msg = self._solders_legacy_tx.message
acct_key_list = msg.account_keys
ix_list: List[SolTxIx] = list()
for compiled_ix in msg.instructions:
ix_data = compiled_ix.data
program_id = acct_key_list[compiled_ix.program_id_index]
acct_meta_list: List[SolAccountMeta] = list()
for idx in compiled_ix.accounts:
is_signer = msg.is_signer(idx)
if (signer is not None) and is_signer:
acct_meta = SolAccountMeta(signer, True, msg.is_writable(idx))
else:
acct_meta = SolAccountMeta(acct_key_list[idx], is_signer, msg.is_writable(idx))
acct_meta_list.append(acct_meta)
ix_list.append(SolTxIx(program_id, ix_data, acct_meta_list))
return ix_list
@property
def is_signed(self) -> bool:
return self._is_signed
@property
def sig(self) -> SolSig:
assert self._is_signed, 'Transaction has not been signed'
return self._sig()
@abc.abstractmethod
def _serialize(self) -> bytes:
pass
@abc.abstractmethod
def _sign(self, signer: SolAccount) -> None:
pass
@abc.abstractmethod
def _sig(self) -> SolSig:
pass
@abc.abstractmethod
def _clone(self) -> SolTx:
pass
|
4,291 |
test context is lazy
|
import unittest.mock as mock
import pytest
import sqlalchemy as sa
import sqlalchemy.dialects.mysql
import sqlalchemy.dialects.oracle
import sqlalchemy.dialects.postgresql
import sqlalchemy.dialects.sqlite
from sqlalchemy import inspect
from sqlalchemy_utils import Password, PasswordType, types # noqa
from sqlalchemy_utils.compat import _select_args
@pytest.fixture
def extra_kwargs():
"""PasswordType extra keyword arguments."""
return {}
@pytest.fixture
def User(Base, extra_kwargs):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
password = sa.Column(PasswordType(
schemes=[
'pbkdf2_sha512',
'pbkdf2_sha256',
'md5_crypt',
'hex_md5'
],
deprecated=['md5_crypt', 'hex_md5'],
**extra_kwargs
))
def __repr__(self):
return 'User(%r)' % self.id
return User
@pytest.fixture
def init_models(User):
pass
def onload_callback(schemes, deprecated):
"""
Get onload callback that takes the PasswordType arguments from the config.
"""
def onload(**kwargs):
kwargs['schemes'] = schemes
kwargs['deprecated'] = deprecated
return kwargs
return onload
@pytest.mark.skipif('types.password.passlib is None')
class TestPasswordType:
@pytest.mark.parametrize('dialect_module,impl', [
(sqlalchemy.dialects.sqlite, sa.dialects.sqlite.BLOB),
(sqlalchemy.dialects.postgresql, sa.dialects.postgresql.BYTEA),
(sqlalchemy.dialects.oracle, sa.dialects.oracle.RAW),
(sqlalchemy.dialects.mysql, sa.VARBINARY),
])
def test_load_dialect_impl(self, dialect_module, impl):
"""
Should produce the same impl type as Alembic would expect after
inspecing a database
"""
password_type = PasswordType()
assert isinstance(
password_type.load_dialect_impl(dialect_module.dialect()),
impl
)
def test_encrypt(self, User):
"""Should encrypt the password on setting the attribute."""
obj = User()
obj.password = b'b'
assert obj.password.hash != 'b'
assert obj.password.hash.startswith(b'$pbkdf2-sha512$')
def test_check(self, session, User):
"""
Should be able to compare the plaintext against the
encrypted form.
"""
obj = User()
obj.password = 'b'
assert obj.password == 'b'
assert obj.password != 'a'
session.add(obj)
session.commit()
try:
obj = session.get(User, obj.id)
except AttributeError:
# sqlalchemy 1.3
obj = session.query(User).get(obj.id)
assert obj.password == b'b'
assert obj.password != 'a'
def test_check_and_update(self, User):
"""
Should be able to compare the plaintext against a deprecated
encrypted form and have it auto-update to the preferred version.
"""
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.hash('b'))
assert obj.password.hash.decode('utf8').startswith('$1$')
assert obj.password == 'b'
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha512$')
def test_auto_column_length(self, User):
"""Should derive the correct column length from the specified schemes.
"""
from passlib.hash import pbkdf2_sha512
kind = inspect(User).c.password.type
# name + rounds + salt + hash + ($ * 4) of largest hash
expected_length = len(pbkdf2_sha512.name)
expected_length += len(str(pbkdf2_sha512.max_rounds))
expected_length += pbkdf2_sha512.max_salt_size
expected_length += pbkdf2_sha512.encoded_checksum_size
expected_length += 4
assert kind.length == expected_length
def test_without_schemes(self):
assert PasswordType(schemes=[]).length == 1024
def test_compare(self, User):
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.hash('b'))
other = User()
other.password = Password(md5_crypt.hash('b'))
# Not sure what to assert here; the test raised an error before.
assert obj.password != other.password
def test_set_none(self, session, User):
obj = User()
obj.password = None
assert obj.password is None
session.add(obj)
session.commit()
try:
obj = session.get(User, obj.id)
except AttributeError:
# sqlalchemy 1.3
obj = session.query(User).get(obj.id)
assert obj.password is None
def test_update_none(self, session, User):
"""
Should be able to change a password from ``None`` to a valid
password.
"""
obj = User()
obj.password = None
session.add(obj)
session.commit()
try:
obj = session.get(User, obj.id)
except AttributeError:
# sqlalchemy 1.3
obj = session.query(User).get(obj.id)
obj.password = 'b'
session.commit()
def test_compare_none(self, User):
"""
Should be able to compare a password of ``None``.
"""
obj = User()
obj.password = None
assert obj.password is None
assert obj.password == None # noqa
obj.password = 'b'
assert obj.password is not None
assert obj.password != None # noqa
def test_check_and_update_persist(self, session, User):
"""
When a password is compared, the hash should update if needed to
change the algorithm; and, commit to the database.
"""
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.hash('b'))
session.add(obj)
session.commit()
assert obj.password.hash.decode('utf8').startswith('$1$')
assert obj.password == 'b'
session.commit()
try:
obj = session.get(User, obj.id)
except AttributeError:
# sqlalchemy 1.3
obj = session.query(User).get(obj.id)
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha512$')
assert obj.password == 'b'
@pytest.mark.parametrize(
'extra_kwargs',
[
dict(
onload=onload_callback(
schemes=['pbkdf2_sha256'],
deprecated=[],
)
)
]
)
def test_lazy_configuration(self, User):
"""
Field should be able to read the passlib attributes lazily from the
config (e.g. Flask config).
"""
schemes = User.password.type.context.schemes()
assert tuple(schemes) == ('pbkdf2_sha256',)
obj = User()
obj.password = b'b'
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha256$')
@pytest.mark.parametrize('max_length', [1, 103])
def test_constant_length(self, max_length):
"""
Test that constant max_length is applied.
"""
typ = PasswordType(max_length=max_length)
assert typ.length == max_length
def METHOD_NAME(self):
"""
Make sure the init doesn't evaluate the lazy context.
"""
onload = mock.Mock(return_value={})
PasswordType(onload=onload)
assert not onload.called
def test_compilation(self, User, session):
query = sa.select(*_select_args(User.password))
# the type should be cacheable and not throw exception
session.execute(query)
|
4,292 |
check parameters match
|
# TODO inspect for Cython (see sagenb.misc.sageinspect)
import re
import inspect
import pytest
import vispy.scene.cameras.magnify
from vispy.testing import run_tests_if_main, requires_numpydoc
public_modules = [
# the list of modules users need to access for all functionality
'vispy',
'vispy.color',
'vispy.geometry',
'vispy.gloo',
'vispy.io',
'vispy.plot',
'vispy.scene',
'vispy.util',
'vispy.visuals',
]
def _func_name(func, cls=None):
"""Get the name."""
parts = []
if cls is not None:
module = inspect.getmodule(cls)
else:
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if cls is not None:
parts.append(cls.__name__)
parts.append(func.__name__)
return '.'.join(parts)
# functions to ignore
docstring_ignores = [
'vispy.scene.visuals', # not parsed properly by this func, copies anyway
]
error_ignores = {
# These we do not live by:
'GL01', # Docstring should start in the line immediately after the quotes
'EX01', 'EX02', # examples failed (we test them separately)
'ES01', # no extended summary
'SA01', # no see also
'YD01', # no yields section
'SA04', # no description in See Also
'PR04', # Parameter "shape (n_channels" has no type
'RT02', # The first line of the Returns section should contain only the type, unless multiple values are being returned # noqa
# XXX should also verify that | is used rather than , to separate params
# XXX should maybe also restore the parameter-desc-length < 800 char check
}
error_ignores_specific = {}
subclass_name_ignores = (
(vispy.scene.cameras.magnify.MagnifyCamera, {'MagnifyTransform', 'Magnify1DTransform'}),
)
def METHOD_NAME(func, cls=None):
"""Check docstring, return list of incorrect results."""
from numpydoc.validate import validate
name = _func_name(func, cls)
skip = (not name.startswith('vispy.') or
any(re.match(d, name) for d in docstring_ignores) or
'deprecation_wrapped' in getattr(
getattr(func, '__code__', None), 'co_name', ''))
if skip:
return list()
if cls is not None:
for subclass, ignores in subclass_name_ignores:
if issubclass(cls, subclass) and name.split('.')[-1] in ignores:
return list()
incorrect = ['%s : %s : %s' % (name, err[0], err[1])
for err in validate(name)['errors']
if err[0] not in error_ignores and
(name.split('.')[-1], err[0]) not in error_ignores_specific]
return incorrect
@pytest.mark.xfail
@requires_numpydoc()
def test_docstring_parameters():
"""Test module docstring formatting."""
from numpydoc import docscrape
incorrect = []
for name in public_modules:
# Assert that by default we import all public names with `import vispy`
# if name not in ('vispy'):
# extra = name.split('.')[1]
# assert hasattr(vispy, extra)
with pytest.warns(None): # traits warnings
module = __import__(name, globals())
for submod in name.split('.')[1:]:
module = getattr(module, submod)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
if cname.startswith('_'):
continue
incorrect += METHOD_NAME(cls)
cdoc = docscrape.ClassDoc(cls)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
incorrect += METHOD_NAME(method, cls=cls)
if hasattr(cls, '__call__') and \
'of type object' not in str(cls.__call__):
incorrect += METHOD_NAME(cls.__call__, cls)
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
if fname.startswith('_'):
continue
incorrect += METHOD_NAME(func)
incorrect = sorted(list(set(incorrect)))
msg = '\n' + '\n'.join(incorrect)
msg += '\n%d error%s' % (len(incorrect), 's' if len(incorrect) == 1 else '')
if len(incorrect) > 0:
raise AssertionError(msg)
run_tests_if_main()
|
4,293 |
test ascension day
|
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import warnings
from holidays.constants import BANK
from holidays.countries.austria import Austria, AT, AUT
from tests.common import TestCase
class TestAustria(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Austria, years=range(1900, 2050))
def test_country_aliases(self):
self.assertCountryAliases(Austria, AT, AUT)
def test_new_years(self):
self.assertHolidayName("Neujahr", (f"{year}-01-01" for year in range(1900, 2050)))
def test_epiphany(self):
self.assertHolidayName(
"Heilige Drei Könige", (f"{year}-01-06" for year in range(1900, 2050))
)
def test_easter_monday(self):
self.assertHolidayName(
"Ostermontag",
"1900-04-16",
"1901-04-08",
"1902-03-31",
"1999-04-05",
"2000-04-24",
"2010-04-05",
"2018-04-02",
"2019-04-22",
"2020-04-13",
"2021-04-05",
"2022-04-18",
)
def test_labour_day(self):
self.assertHolidayName("Staatsfeiertag", (f"{year}-05-01" for year in range(1900, 2050)))
def METHOD_NAME(self):
self.assertHolidayName(
"Christi Himmelfahrt",
"1900-05-24",
"1901-05-16",
"1902-05-08",
"1999-05-13",
"2000-06-01",
"2010-05-13",
"2018-05-10",
"2019-05-30",
"2020-05-21",
"2021-05-13",
"2022-05-26",
)
def test_whit_monday(self):
self.assertHolidayName(
"Pfingstmontag",
"1900-06-04",
"1901-05-27",
"1902-05-19",
"1999-05-24",
"2000-06-12",
"2010-05-24",
"2018-05-21",
"2019-06-10",
"2020-06-01",
"2021-05-24",
"2022-06-06",
)
def test_corpus_christi(self):
self.assertHolidayName(
"Fronleichnam",
"1900-06-14",
"1901-06-06",
"1902-05-29",
"1999-06-03",
"2000-06-22",
"2010-06-03",
"2018-05-31",
"2019-06-20",
"2020-06-11",
"2021-06-03",
"2022-06-16",
)
def test_assumption_day(self):
self.assertHolidayName(
"Mariä Himmelfahrt", (f"{year}-08-15" for year in range(1900, 2050))
)
def test_national_day(self):
self.assertHolidayName(
"Nationalfeiertag",
(f"{year}-11-12" for year in range(1919, 1935)),
(f"{year}-10-26" for year in range(1967, 2050)),
)
self.assertNoHoliday("1918-11-12", "1935-11-12", "1966-10-26")
self.assertNoHolidayName("Nationalfeiertag", range(1900, 1919), range(1935, 1967))
def test_all_saints_day(self):
self.assertHolidayName("Allerheiligen", (f"{year}-11-01" for year in range(1900, 2050)))
def test_immaculate_conception_day(self):
self.assertHolidayName("Mariä Empfängnis", (f"{year}-12-08" for year in range(1900, 2050)))
def test_christmas_day(self):
self.assertHolidayName("Christtag", (f"{year}-12-25" for year in range(1900, 2050)))
def test_st_stephens_day(self):
self.assertHolidayName("Stefanitag", (f"{year}-12-26" for year in range(1900, 2050)))
def test_2022(self):
self.assertHolidays(
Austria(years=2022),
("2022-01-01", "Neujahr"),
("2022-01-06", "Heilige Drei Könige"),
("2022-04-18", "Ostermontag"),
("2022-05-01", "Staatsfeiertag"),
("2022-05-26", "Christi Himmelfahrt"),
("2022-06-06", "Pfingstmontag"),
("2022-06-16", "Fronleichnam"),
("2022-08-15", "Mariä Himmelfahrt"),
("2022-10-26", "Nationalfeiertag"),
("2022-11-01", "Allerheiligen"),
("2022-12-08", "Mariä Empfängnis"),
("2022-12-25", "Christtag"),
("2022-12-26", "Stefanitag"),
)
def test_bank_2022(self):
self.assertHolidays(
Austria(categories=(BANK,), years=2022),
("2022-04-15", "Karfreitag"),
("2022-12-24", "Heiliger Abend"),
("2022-12-31", "Silvester"),
)
def test_subdivisions(self):
warnings.simplefilter("ignore", category=DeprecationWarning)
for code in (9, "9", "", None):
self.assertEqual(AT(prov=code).subdiv, "9")
self.assertEqual(AT(state=code).subdiv, "9")
self.assertEqual(AT(subdiv=code).subdiv, "9")
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Neujahr"),
("2022-01-06", "Heilige Drei Könige"),
("2022-04-15", "Karfreitag"),
("2022-04-18", "Ostermontag"),
("2022-05-01", "Staatsfeiertag"),
("2022-05-26", "Christi Himmelfahrt"),
("2022-06-06", "Pfingstmontag"),
("2022-06-16", "Fronleichnam"),
("2022-08-15", "Mariä Himmelfahrt"),
("2022-10-26", "Nationalfeiertag"),
("2022-11-01", "Allerheiligen"),
("2022-12-08", "Mariä Empfängnis"),
("2022-12-24", "Heiliger Abend"),
("2022-12-25", "Christtag"),
("2022-12-26", "Stefanitag"),
("2022-12-31", "Silvester"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-01-06", "Epiphany"),
("2022-04-15", "Good Friday"),
("2022-04-18", "Easter Monday"),
("2022-05-01", "Labor Day"),
("2022-05-26", "Ascension Day"),
("2022-06-06", "Whit Monday"),
("2022-06-16", "Corpus Christi"),
("2022-08-15", "Assumption Day"),
("2022-10-26", "National Day"),
("2022-11-01", "All Saints' Day"),
("2022-12-08", "Immaculate Conception"),
("2022-12-24", "Christmas Eve"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "St. Stephen's Day"),
("2022-12-31", "New Year's Eve"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-01-06", "Богоявлення"),
("2022-04-15", "Страсна пʼятниця"),
("2022-04-18", "Великодній понеділок"),
("2022-05-01", "День праці"),
("2022-05-26", "Вознесіння Господнє"),
("2022-06-06", "День Святого Духа"),
("2022-06-16", "Свято Тіла і Крові Христових"),
("2022-08-15", "Внебовзяття Пресвятої Діви Марії"),
("2022-10-26", "Національне свято"),
("2022-11-01", "День усіх святих"),
("2022-12-08", "Непорочне зачаття Діви Марії"),
("2022-12-24", "Святий вечір"),
("2022-12-25", "Різдво Христове"),
("2022-12-26", "День Святого Стефана"),
("2022-12-31", "Переддень Нового року"),
)
|
4,294 |
test constant dunder
|
import math
import pytest
from jinja2.exceptions import UndefinedError
from jinja2.nativetypes import NativeEnvironment
from jinja2.nativetypes import NativeTemplate
from jinja2.runtime import Undefined
@pytest.fixture
def env():
return NativeEnvironment()
def test_is_defined_native_return(env):
t = env.from_string("{{ missing is defined }}")
assert not t.render()
def test_undefined_native_return(env):
t = env.from_string("{{ missing }}")
assert isinstance(t.render(), Undefined)
def test_adding_undefined_native_return(env):
t = env.from_string("{{ 3 + missing }}")
with pytest.raises(UndefinedError):
t.render()
def test_cast_int(env):
t = env.from_string("{{ value|int }}")
result = t.render(value="3")
assert isinstance(result, int)
assert result == 3
def test_list_add(env):
t = env.from_string("{{ a + b }}")
result = t.render(a=["a", "b"], b=["c", "d"])
assert isinstance(result, list)
assert result == ["a", "b", "c", "d"]
def test_multi_expression_add(env):
t = env.from_string("{{ a }} + {{ b }}")
result = t.render(a=["a", "b"], b=["c", "d"])
assert not isinstance(result, list)
assert result == "['a', 'b'] + ['c', 'd']"
def test_loops(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=["a", "b", "c", "d"])
assert isinstance(result, str)
assert result == "abcd"
def test_loops_with_ints(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=[1, 2, 3, 4])
assert isinstance(result, int)
assert result == 1234
def test_loop_look_alike(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=[1])
assert isinstance(result, int)
assert result == 1
@pytest.mark.parametrize(
("source", "expect"),
(
("{{ value }}", True),
("{{ value }}", False),
("{{ 1 == 1 }}", True),
("{{ 2 + 2 == 5 }}", False),
("{{ None is none }}", True),
("{{ '' == None }}", False),
),
)
def test_booleans(env, source, expect):
t = env.from_string(source)
result = t.render(value=expect)
assert isinstance(result, bool)
assert result is expect
def test_variable_dunder(env):
t = env.from_string("{{ x.__class__ }}")
result = t.render(x=True)
assert isinstance(result, type)
def METHOD_NAME(env):
t = env.from_string("{{ true.__class__ }}")
result = t.render()
assert isinstance(result, type)
def test_constant_dunder_to_string(env):
t = env.from_string("{{ true.__class__|string }}")
result = t.render()
assert not isinstance(result, type)
assert result in {"<type 'bool'>", "<class 'bool'>"}
def test_string_literal_var(env):
t = env.from_string("[{{ 'all' }}]")
result = t.render()
assert isinstance(result, str)
assert result == "[all]"
def test_string_top_level(env):
t = env.from_string("'Jinja'")
result = t.render()
assert result == "Jinja"
def test_tuple_of_variable_strings(env):
t = env.from_string("'{{ a }}', 'data', '{{ b }}', b'{{ c }}'")
result = t.render(a=1, b=2, c="bytes")
assert isinstance(result, tuple)
assert result == ("1", "data", "2", b"bytes")
def test_concat_strings_with_quotes(env):
t = env.from_string("--host='{{ host }}' --user \"{{ user }}\"")
result = t.render(host="localhost", user="Jinja")
assert result == "--host='localhost' --user \"Jinja\""
def test_no_intermediate_eval(env):
t = env.from_string("0.000{{ a }}")
result = t.render(a=7)
assert isinstance(result, float)
# If intermediate eval happened, 0.000 would render 0.0, then 7
# would be appended, resulting in 0.07.
assert math.isclose(result, 0.0007)
def test_spontaneous_env():
t = NativeTemplate("{{ true }}")
assert isinstance(t.environment, NativeEnvironment)
def test_leading_spaces(env):
t = env.from_string(" {{ True }}")
result = t.render()
assert result == " True"
def test_macro(env):
t = env.from_string("{%- macro x() -%}{{- [1,2] -}}{%- endmacro -%}{{- x()[1] -}}")
result = t.render()
assert result == 2
assert isinstance(result, int)
|
4,295 |
mul
|
# Copyright (c) 2016, Hubert Kario
#
# See the LICENSE file for legal information regarding use of this file.
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
from __future__ import division
try:
import unittest2 as unittest
except ImportError:
import unittest
import tlslite.utils.rijndael as rijndael
class TestConstants(unittest.TestCase):
def setUp(self):
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def METHOD_NAME(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] -
log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= METHOD_NAME(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | METHOD_NAME(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = METHOD_NAME(2, r)
rcon.append(r)
self.S = tuple(S)
self.Si = tuple(Si)
self.T1 = tuple(T1)
self.T2 = tuple(T2)
self.T3 = tuple(T3)
self.T4 = tuple(T4)
self.T5 = tuple(T5)
self.T6 = tuple(T6)
self.T7 = tuple(T7)
self.T8 = tuple(T8)
self.U1 = tuple(U1)
self.U2 = tuple(U2)
self.U3 = tuple(U3)
self.U4 = tuple(U4)
self.rcon = tuple(rcon)
def test_S_box(self):
self.assertEqual(rijndael.S, self.S)
def test_Si_box(self):
self.assertEqual(rijndael.Si, self.Si)
def test_T1(self):
self.assertEqual(rijndael.T1, self.T1)
def test_T2(self):
self.assertEqual(rijndael.T2, self.T2)
def test_T3(self):
self.assertEqual(rijndael.T3, self.T3)
def test_T4(self):
self.assertEqual(rijndael.T4, self.T4)
def test_T5(self):
self.assertEqual(rijndael.T5, self.T5)
def test_T6(self):
self.assertEqual(rijndael.T6, self.T6)
def test_T7(self):
self.assertEqual(rijndael.T7, self.T7)
def test_T8(self):
self.assertEqual(rijndael.T8, self.T8)
def test_U1(self):
self.assertEqual(rijndael.U1, self.U1)
def test_U2(self):
self.assertEqual(rijndael.U2, self.U2)
def test_U3(self):
self.assertEqual(rijndael.U3, self.U3)
def test_U4(self):
self.assertEqual(rijndael.U4, self.U4)
def test_rcon(self):
self.assertEqual(rijndael.rcon, self.rcon)
class TestSelfDecryptEncrypt(unittest.TestCase):
def enc_dec(self, k_len, b_len):
plaintext = bytearray(b'b' * b_len)
cipher = rijndael.Rijndael(bytearray(b'a' * k_len), b_len)
self.assertEqual(plaintext,
cipher.decrypt(cipher.encrypt(plaintext)))
def test_16_16(self):
self.enc_dec(16, 16)
def test_16_24(self):
self.enc_dec(16, 24)
def test_16_32(self):
self.enc_dec(16, 32)
def test_24_16(self):
self.enc_dec(24, 16)
def test_24_24(self):
self.enc_dec(24, 24)
def test_24_32(self):
self.enc_dec(24, 32)
def test_32_16(self):
self.enc_dec(32, 16)
def test_32_24(self):
self.enc_dec(32, 24)
def test_32_32(self):
self.enc_dec(32, 32)
|
4,296 |
test hernquist mass normalization
|
__author__ = "sibirrer"
import numpy as np
import numpy.testing as npt
import pytest
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from lenstronomy.Util import util
class TestLensCosmo(object):
"""Tests the UnitManager class routines."""
def setup_method(self):
z_L = 0.8
z_S = 3.0
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
self.lensCosmo = LensCosmo(z_L, z_S, cosmo=cosmo)
def test_ang_dist(self):
npt.assert_almost_equal(self.lensCosmo.ds, 1588.9213590743666, decimal=8)
npt.assert_almost_equal(self.lensCosmo.dd, 1548.7055203661785, decimal=8)
npt.assert_almost_equal(self.lensCosmo.dds, 892.0038749095863, decimal=8)
def test_epsilon_crit(self):
npt.assert_almost_equal(self.lensCosmo.sigma_crit / 1.9121e15, 1, decimal=3)
def test_arcsec2phys(self):
arcsec = np.array([1, 2]) # pixel coordinate from center
physcoord = self.lensCosmo.arcsec2phys_lens(arcsec)
npt.assert_almost_equal(physcoord[0], 0.0075083362428338641, decimal=8)
npt.assert_almost_equal(physcoord[1], 0.015016672485667728, decimal=8)
physcoord = self.lensCosmo.arcsec2phys_source(arcsec)
npt.assert_almost_equal(physcoord[0], 0.007703308130864105, decimal=8)
npt.assert_almost_equal(physcoord[1], 0.01540661626172821, decimal=8)
def test_phys2arcsec_lens(self):
phys = 1.0
arc_sec = self.lensCosmo.phys2arcsec_lens(phys)
phys_new = self.lensCosmo.arcsec2phys_lens(arc_sec)
npt.assert_almost_equal(phys_new, phys, decimal=8)
def test_mass_in_phi_E(self):
phi_E = 1.5
mass = self.lensCosmo.mass_in_theta_E(phi_E)
npt.assert_almost_equal(mass, 761967261292.6725, decimal=2)
def test_kappa2proj_mass(self):
kappa = 0.5
mass = self.lensCosmo.kappa2proj_mass(kappa)
npt.assert_almost_equal(mass, kappa * self.lensCosmo.sigma_crit, decimal=3)
def test_mass_in_coin(self):
theta_E = 1.0
m_coin = self.lensCosmo.mass_in_coin(theta_E)
npt.assert_almost_equal(m_coin, 165279526936.52194, decimal=0)
def test_D_dt_model(self):
D_dt = self.lensCosmo.ddt
npt.assert_almost_equal(D_dt, 4965.660384441859, decimal=8)
def test_nfw_angle2physical(self):
Rs_angle = 6.0
alpha_Rs = 1.0
rho0, Rs, c, r200, M200 = self.lensCosmo.nfw_angle2physical(Rs_angle, alpha_Rs)
assert Rs * c == r200
def test_nfw_physical2angle(self):
M = 10.0**13.5
c = 4
Rs_angle, alpha_Rs = self.lensCosmo.nfw_physical2angle(M, c)
rho0, Rs, c_out, r200, M200 = self.lensCosmo.nfw_angle2physical(
Rs_angle, alpha_Rs
)
npt.assert_almost_equal(c_out, c, decimal=3)
npt.assert_almost_equal(np.log10(M200), np.log10(M), decimal=4)
def test_sis_theta_E2sigma_v(self):
theta_E = 2.0
sigma_v = self.lensCosmo.sis_theta_E2sigma_v(theta_E)
theta_E_out = self.lensCosmo.sis_sigma_v2theta_E(sigma_v)
npt.assert_almost_equal(theta_E_out, theta_E, decimal=5)
def test_fermat2delays(self):
fermat_pot = 0.5
dt_days = self.lensCosmo.time_delay_units(fermat_pot)
fermat_pot_out = self.lensCosmo.time_delay2fermat_pot(dt_days)
npt.assert_almost_equal(fermat_pot, fermat_pot_out, decimal=10)
def test_uldm_angular2phys(self):
kappa_0, theta_c = 0.1, 3
mlog10, Mlog10 = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c)
npt.assert_almost_equal(mlog10, -24.3610006, decimal=5)
npt.assert_almost_equal(Mlog10, 11.7195843, decimal=5)
def test_uldm_mphys2angular(self):
m_log10, M_log10 = -24, 11
kappa_0, theta_c = self.lensCosmo.uldm_mphys2angular(m_log10, M_log10)
mcheck, Mcheck = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c)
npt.assert_almost_equal(mcheck, m_log10, decimal=4)
npt.assert_almost_equal(Mcheck, M_log10, decimal=4)
def test_a_z(self):
a = self.lensCosmo.background.a_z(z=1)
npt.assert_almost_equal(a, 0.5)
def test_sersic_m_star2k_eff(self):
m_star = 10**11.5
R_sersic = 1
n_sersic = 4
k_eff = self.lensCosmo.sersic_m_star2k_eff(m_star, R_sersic, n_sersic)
npt.assert_almost_equal(k_eff, 0.1294327891669961, decimal=5)
m_star_out = self.lensCosmo.sersic_k_eff2m_star(k_eff, R_sersic, n_sersic)
npt.assert_almost_equal(m_star_out, m_star, decimal=6)
def test_hernquist_angular2phys(self):
m_star = 10**10 # in M_sun
rs = 0.01 # in Mpc
# test bijective transformation
sigma0, rs_angle = self.lensCosmo.hernquist_phys2angular(mass=m_star, rs=rs)
m_star_new, rs_new = self.lensCosmo.hernquist_angular2phys(
sigma0=sigma0, rs_angle=rs_angle
)
npt.assert_almost_equal(m_star_new, m_star, decimal=1)
npt.assert_almost_equal(rs_new, rs, decimal=8)
def METHOD_NAME(self):
m_star = 10**10 # in M_sun
rs = 0.01 # in Mpc
# test bijective transformation
sigma0, rs_angle = self.lensCosmo.hernquist_phys2angular(mass=m_star, rs=rs)
# test mass integrals
# make large grid
delta_pix = rs_angle / 30.0
x, y = util.make_grid(numPix=501, deltapix=delta_pix)
# compute convergence
from lenstronomy.LensModel.lens_model import LensModel
lens_model = LensModel(lens_model_list=["HERNQUIST"])
kwargs = [{"sigma0": sigma0, "Rs": rs_angle, "center_x": 0, "center_y": 0}]
kappa = lens_model.kappa(x, y, kwargs)
# sum up convergence
kappa_tot = np.sum(kappa) * delta_pix**2
# transform to mass
mass_tot = kappa_tot * self.lensCosmo.sigma_crit_angle
# compare
npt.assert_almost_equal(mass_tot / m_star, 1, decimal=1)
if __name__ == "__main__":
pytest.main()
|
4,297 |
default bpe
|
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license
import gzip
import html
from functools import lru_cache
import ftfy
import regex as re
from boxmot.utils import BOXMOT
@lru_cache()
def METHOD_NAME():
return BOXMOT / "appearance/backbones/clip/clip/bpe_simple_vocab_16e6.txt.gz"
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = METHOD_NAME()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) # noqa: E501
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except Exception:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
4,298 |
list
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-09-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.AppPlatform/skus")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_09_01_preview.AppPlatformManagementClient`'s
:attr:`skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.ResourceSku"]:
"""Lists all of the available skus of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSku or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_09_01_preview.models.ResourceSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-09-01-preview")
)
cls: ClsType[_models.ResourceSkuCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceSkuCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.AppPlatform/skus"}
|
4,299 |
parse
|
#!/usr/bin/python3
# coding: utf-8
# Small BBCode parser for reading Godot's XML doc format, so it can be translated into different formats (not just
# Markdown, HTML etc).
class NodeText:
def __init__(self, text):
self.text = text
class NodeTag:
def __init__(self):
self.name = ""
self.value = ""
self.options = {}
self.closing = False
def to_string(self):
out = ""
out += "["
if self.closing:
out += '/'
out += self.name
if self.value != "":
out += "=" + self.value
for key, value in self.options.items():
out += " " + key
if value != "":
out += "=" + value
out += "]"
return out
def is_opening(self):
return not self.closing
def get_first_option_key(self):
for key in self.options:
return key
raise Exception("The tag has no options")
def find_next_unescaped_bracket(text, from_pos):
pos = from_pos
while pos < len(text):
pos = text.find("[", pos)
if pos == -1:
return -1
if pos > 0 and text[pos - 1] == '\\':
pos += 1
continue
return pos
return -1
def is_name_start(c):
return c.isalpha() or c == '_'
def is_name_part(c):
return c.isalnum() or c == '_' or c == '.'
def parse_name(text, begin_pos): # -> name
pos = begin_pos
name = ""
while pos < len(text):
c = text[pos]
if is_name_part(c):
pos += 1
continue
break
return text[begin_pos : pos]
def parse_tag_option_value(text, begin_pos): # -> value
pos = begin_pos
while pos < len(text):
c = text[pos]
if c == ']':
break
if c == ' ':
break
pos += 1
return text[begin_pos : pos]
def parse_tag_option(text, begin_pos): # -> name?, value?, pos
if not is_name_start(text[begin_pos]):
return None, None, begin_pos
pos = begin_pos
name = parse_name(text, pos)
pos += len(name)
if pos >= len(text):
return None, None, pos
value = ""
c = text[pos]
if c == '=':
pos += 1
if pos >= len(text):
return None, None, pos
value = parse_tag_option_value(text, pos)
pos += len(value)
return name, value, pos
def parse_tag(text, pos, nodes): # -> success, end_pos
if text[pos] == '/':
# End tag
pos += 1
if pos >= len(text):
return False, pos
tag = NodeTag()
tag.name = parse_name(text, pos)
tag.closing = True
pos += len(tag.name)
if pos >= len(text):
return False, pos
if text[pos] != ']':
return False, pos
nodes.append(tag)
pos += 1
return True, pos
# Tag name and optional value
key, value, pos = parse_tag_option(text, pos)
if key is None:
return False, pos + 1
tag = NodeTag()
tag.name = key
tag.value = value
while pos < len(text):
c = text[pos]
if c == ' ':
pos += 1
continue
if is_name_start(c):
key, value, pos = parse_tag_option(text, pos)
if key is None:
# Option without value
return False, pos + 1
tag.options[key] = value
continue
if c == ']':
nodes.append(tag)
return True, pos + 1
raise Exception("Unexpected character '" + c + "'")
return False, pos
def unescape_text(text):
return text.replace("\\[", '[')
def parse_text(text, begin_pos, nodes): # -> pos
pos = begin_pos
while (pos < len(text)):
open_bracket_pos = find_next_unescaped_bracket(text, pos)
if open_bracket_pos != -1:
if open_bracket_pos > pos:
nodes.append(NodeText(unescape_text(text[pos : open_bracket_pos])))
success, end_pos = parse_tag(text, open_bracket_pos + 1, nodes)
pos = end_pos
if not success:
# print("ERROR, parse_tag didn't succeed")
# Append failed tag as text
nodes.append(NodeText(text[open_bracket_pos : end_pos]))
else:
# No remaining tags
nodes.append(NodeText(unescape_text(text[pos:])))
break
return pos
# Parses text into a list of nodes, which can either be text or tags.
def METHOD_NAME(text): # -> nodes
nodes = []
parse_text(text, 0, nodes)
return nodes
# Debug
def print_nodes_as_list(nodes):
for node in nodes:
if isinstance(node, NodeText):
print("Text:`" + node.text + "`")
elif isinstance(node, NodeTag):
print("Tag:`" + node.name + "`")
else:
print("<error>", node)
# Debug
def get_nodes_as_bbcode(nodes):
out = ""
for node in nodes:
if isinstance(node, NodeText):
out += node.text
elif isinstance(node, NodeTag):
out += node.to_string()
return out
# Testing
if __name__ == "__main__":
text = "Outputs values from the custom input having the same name as the node. May be used in [VoxelGraphFunction]. It won't be used in [VoxelGeneratorGraph]."
nodes = METHOD_NAME(text)
print(print_nodes_as_list(nodes))
# text = ("Hello World, [i]Text[/i] is [b][i]Fun[/i][/b], with [ClassName], "
# "[color=#123456aa]Our [member Yolo.jesus][/color] and "
# "[url=xxx]link[/url], interval [0..1] in code is [code][0..1][/code]")
# nodes = parse(text)
# bb = get_nodes_as_bbcode(nodes)
# print("Equals: ", bb == text)
# print(bb)
# print(text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.