id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,400 |
create or update
|
from __future__ import annotations
import itertools
from functools import reduce
from typing import Any, Tuple, Type
from django.db import IntegrityError, router, transaction
from django.db.models import Model, Q
from django.db.models.expressions import CombinedExpression
from django.db.models.signals import post_save
from .utils import resolve_combined_expression
__all__ = (
"create_or_update",
"update",
"update_or_create",
)
def _handle_value(instance: Model, value: Any) -> Any:
if isinstance(value, CombinedExpression):
return resolve_combined_expression(instance, value)
return value
def _handle_key(model: Type[Model], key: str, value: Any) -> str:
# XXX(dcramer): we want to support column shortcut on create so we can do
# create_or_update(..., {'project': 1})
if not isinstance(value, Model):
key_: str = model._meta.get_field(key).attname
return key_
return key
def update(instance: Model, using: str | None = None, **kwargs: Any) -> int:
"""
Updates specified attributes on the current instance.
"""
assert instance.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(instance.__class__, instance=instance)
for field in instance._meta.fields:
if getattr(field, "auto_now", False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(instance, False)
affected = instance.__class__._base_manager.using(using).filter(pk=instance.pk).update(**kwargs)
for k, v in kwargs.items():
setattr(instance, k, _handle_value(instance, v))
if affected == 1:
post_save.send(sender=instance.__class__, instance=instance, created=False)
return affected
elif affected == 0:
return affected
elif affected < 0:
raise ValueError(
"Somehow we have updated a negative number of rows. You seem to have a problem with your db backend."
)
else:
raise ValueError("Somehow we have updated multiple rows. This is very, very bad.")
update.alters_data = True # type: ignore
def update_or_create(
model: Type[Model],
using: str | None = None,
**kwargs: Any,
) -> tuple[Model, bool]:
"""
Similar to `get_or_create()`, either updates a row or creates it.
In order to determine if the row exists, this searches on all of the kwargs
besides `defaults`. If the row exists, it is updated with the data in
`defaults`. If it doesn't, it is created with the data in `defaults` and the
remaining kwargs.
Returns a tuple of (object, created), where object is the created or updated
object and created is a boolean specifying whether a new object was created.
"""
defaults = kwargs.pop("defaults", {})
if not using:
using = router.db_for_write(model)
objects = model.objects.using(using)
affected = objects.filter(**kwargs).update(**defaults)
if affected:
return affected, False
instance = objects.model()
create_kwargs = kwargs.copy()
create_kwargs.update(
{_handle_key(model, k, v): _handle_value(instance, v) for k, v in defaults.items()}
)
try:
with transaction.atomic(using=using):
return objects.create(**create_kwargs), True
except IntegrityError:
pass
# Retrying the update() here to preserve behavior in a race condition with a concurrent create().
return objects.filter(**kwargs).update(**defaults), False
def METHOD_NAME(
model: Type[Model], using: str | None = None, **kwargs: Any
) -> Tuple[int, bool]:
"""
Similar to get_or_create, either updates a row or creates it.
In order to determine if the row exists, this searches on all of the kwargs
besides `values` and `default`.
If the row exists, it is updated with the data in `values`. If it
doesn't, it is created with the data in `values`, `defaults`, and the remaining
kwargs.
The result will be (rows affected, False) if the row was not created,
or (instance, True) if the object is new.
>>> create_or_update(MyModel, key='value', values={
>>> 'col_name': F('col_name') + 1,
>>> }, defaults={'created_at': timezone.now()})
"""
values = kwargs.pop("values", {})
defaults = kwargs.pop("defaults", {})
if not using:
using = router.db_for_write(model)
objects = model.objects.using(using)
affected = objects.filter(**kwargs).update(**values)
if affected:
return affected, False
create_kwargs = kwargs.copy()
inst = objects.model()
for k, v in itertools.chain(values.items(), defaults.items()):
# XXX(dcramer): we want to support column shortcut on create so
# we can do create_or_update(..., {'project': 1})
if not isinstance(v, Model):
k = model._meta.get_field(k).attname
if isinstance(v, CombinedExpression):
create_kwargs[k] = resolve_combined_expression(inst, v)
else:
create_kwargs[k] = v
try:
with transaction.atomic(using=using):
return objects.create(**create_kwargs), True
except IntegrityError:
affected = objects.filter(**kwargs).update(**values)
return affected, False
def in_iexact(column: str, values: Any) -> Q:
"""Operator to test if any of the given values are (case-insensitive)
matching to values in the given column."""
from operator import or_
query = f"{column}__iexact"
# if values is empty, have a default value for the reduce call that will essentially resolve a column in []
query_in = f"{column}__in"
return reduce(or_, [Q(**{query: v}) for v in values], Q(**{query_in: []}))
def in_icontains(column: str, values: Any) -> Q:
"""Operator to test if any of the given values are (case-insensitively)
contained within values in the given column."""
from operator import or_
query = f"{column}__icontains"
return reduce(or_, [Q(**{query: v}) for v in values])
|
2,401 |
get word rep
|
"""stimuli utility funcs for the stroop experiment
assume red is the "dominant color"
- which should be okay since stroop task is symmetric w.r.t to color
"""
import numpy as np
# constants
COLORS = ['red', 'green']
TASKS = ['color naming', 'word reading']
CONDITIONS = ['control', 'conflict', 'congruent']
# input check
n_tasks = len(TASKS)
n_colors = len(COLORS)
assert n_colors == 2
assert n_tasks == 2
def get_color_rep(color):
if color == 'red':
return [1, 0]
elif color == 'green':
return [0, 1]
elif color is None:
return [0, 0]
else:
raise ValueError(f'Unrecognizable color: {color}')
def METHOD_NAME(word):
if word == 'red':
return [1, 0]
elif word == 'green':
return [0, 1]
elif word is None:
return [0, 0]
else:
raise ValueError(f'Unrecognizable word: {word}')
def get_task_rep(task, demand=1):
assert demand >= 0
if task == 'color naming':
return [demand, 0]
elif task == 'word reading':
return [0, demand]
elif task is None:
return [0, 0]
else:
raise ValueError(f'Unrecognizable task: {task}')
def compute_delays(SOA):
""" calculate the delay time for color/word input
positive SOA => color is presented earlier, v.v.
Parameters
----------
SOA : int
stimulus onset asynchrony == color onset - word onset
Returns
-------
int,int
the delay time for color/word input, repsectively
"""
color_delay = max(0, -SOA)
word_delay = max(0, SOA)
return color_delay, word_delay
def get_stimulus(
color_input_layer, color,
word_input_layer, word,
task_input_layer, task,
n_time_steps=1, SOA=0, demand=1,
):
"""get a stroop stimulus
Parameters
----------
color/word/task_input_layer: pnl.TransferMechanism
the input layer PNL object
color/word/task : str
an element in COLORS/COLORS/TASKS
n_time_steps: int
the stimuli sequence length
SOA: int
stimulus onset asynchrony; see compute_delays()
demand: positive float
the level of activity for the active task unit
Returns
-------
dict, as requested by PNL composition
a representation of the input sitmuli sequence
"""
assert abs(SOA) <= n_time_steps
# set up the stimuli
color_stimulus = np.tile(get_color_rep(color), (n_time_steps, 1))
word_stimulus = np.tile(METHOD_NAME(word), (n_time_steps, 1))
task_stimulus = np.tile(get_task_rep(task, demand), (n_time_steps, 1))
# onset delay
if SOA != 0:
color_delay, word_delay = compute_delays(SOA)
color_stimulus[:color_delay, :] = 0
word_stimulus[:word_delay, :] = 0
# form the input dict
input_dict = {
color_input_layer: color_stimulus,
word_input_layer: word_stimulus,
task_input_layer: task_stimulus
}
return input_dict
def get_stimulus_set(
inp_color, inp_word, inp_task,
n_time_steps=1, SOA=0, demand=1
):
"""get stimuli for all task x condition combination with some SOA
Parameters
----------
color/word/task_input_layer: pnl.TransferMechanism
the input layer PNL object
n_time_steps: int
the stimuli sequence length
SOA: int
stimulus onset asynchrony; see compute_delays()
demand: positive float
the level of activity for the active task unit
Returns
-------
hierarchical dict
- level 1: key: tasks val: stimuli for all conditions
- level 2: key: condition val: a stimulus
"""
# color naming - congruent
inputs_cn_con = get_stimulus(
inp_color, 'red', inp_word, 'red', inp_task, 'color naming',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# color naming - incongruent
inputs_cn_cfl = get_stimulus(
inp_color, 'red', inp_word, 'green', inp_task, 'color naming',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# color naming - control
inputs_cn_ctr = get_stimulus(
inp_color, 'red', inp_word, None, inp_task, 'color naming',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# word reading - congruent
inputs_wr_con = get_stimulus(
inp_color, 'red', inp_word, 'red', inp_task, 'word reading',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# word reading - incongruent
inputs_wr_cfl = get_stimulus(
inp_color, 'green', inp_word, 'red', inp_task, 'word reading',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# word reading - control
inputs_wr_ctr = get_stimulus(
inp_color, None, inp_word, 'red', inp_task, 'word reading',
n_time_steps=n_time_steps, SOA=SOA, demand=demand,
)
# combine the stimuli to lists
color_naming_input_list = [inputs_cn_ctr, inputs_cn_cfl, inputs_cn_con]
word_reading_input_list = [inputs_wr_ctr, inputs_wr_cfl, inputs_wr_con]
# for each task, pack all conditions to dictionaries
color_naming_input_dict = dict(zip(CONDITIONS, color_naming_input_list))
word_reading_input_dict = dict(zip(CONDITIONS, word_reading_input_list))
# pack both tasks to a dict
all_input_dict = dict(
zip(TASKS, [color_naming_input_dict, word_reading_input_dict])
)
return all_input_dict
def get_stimulus_train(
color_input_layer, color,
word_input_layer, word,
n_time_steps=1,
):
"""get a stroop stimulus
Parameters
----------
color/word/task_input_layer: pnl.TransferMechanism
the input layer PNL object
color/word/task : str
an element in COLORS/COLORS/TASKS
n_time_steps: int
the stimuli sequence length
SOA: int
stimulus onset asynchrony; see compute_delays()
demand: positive float
the level of activity for the active task unit
Returns
-------
dict, as requested by PNL composition
a representation of the input sitmuli sequence
"""
# set up the stimuli
bias = [4, 4]
color_stimulus = np.tile(get_color_rep(color), (n_time_steps, 1))
word_stimulus = np.tile(METHOD_NAME(word), (n_time_steps, 1))
#
color_stimulus += bias
word_stimulus += bias
# form the input dict
input_dict = {
color_input_layer: color_stimulus,
word_input_layer: word_stimulus,
}
return input_dict
def get_stimulus_set_train(
inp_color, inp_word,
n_time_steps=1,
):
"""get stimuli for all task x condition combination with some SOA
Parameters
----------
color/word/task_input_layer: pnl.TransferMechanism
the input layer PNL object
n_time_steps: int
the stimuli sequence length
SOA: int
stimulus onset asynchrony; see compute_delays()
demand: positive float
the level of activity for the active task unit
Returns
-------
hierarchical dict
- level 1: key: tasks val: stimuli for all conditions
- level 2: key: condition val: a stimulus
"""
# combine the stimuli to lists
color_naming_input_list = [
get_stimulus_train(
inp_color, color_, inp_word, None, n_time_steps=n_time_steps,
) for color_ in COLORS
]
word_reading_input_list = [
get_stimulus_train(
inp_color, None, inp_word, color_, n_time_steps=n_time_steps,
) for color_ in COLORS
]
# for each task, pack all conditions to dictionaries
color_naming_input_dict = dict(zip(COLORS, color_naming_input_list))
word_reading_input_dict = dict(zip(COLORS, word_reading_input_list))
# pack both tasks to a dict
all_input_dict = dict(
zip(TASKS, [color_naming_input_dict, word_reading_input_dict])
)
return all_input_dict
|
2,402 |
test set spikes
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_front_end_common.interface.ds import DataType
from spynnaker.pyNN.models.common import NeuronRecorder
import pyNN.spiNNaker as sim
from spinnaker_testbase import BaseTestCase
class TestSetRecord(BaseTestCase):
# NO unittest_setup() as sim.setup is called
def METHOD_NAME(self):
sim.setup(timestep=1)
if_curr = sim.Population(1, sim.IF_curr_exp())
self.assertCountEqual(
[], if_curr._vertex.get_recording_variables())
ssa = sim.Population(
1, sim.SpikeSourceArray(spike_times=[0]))
ssp = sim.Population(2, sim.SpikeSourcePoisson(rate=100.0),
additional_parameters={"seed": 1})
if_curr.record("spikes")
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
ssa.record("spikes")
ssp.record("spikes")
sim.end()
def test_set_v(self):
sim.setup(timestep=1)
if_curr = sim.Population(1, sim.IF_curr_exp())
ssa = sim.Population(
1, sim.SpikeSourceArray(spike_times=[0]))
ssp = sim.Population(2, sim.SpikeSourcePoisson(rate=100.0),
additional_parameters={"seed": 1})
if_curr.record("v")
# SpikeSourceArray must throw if asked to record voltage
with self.assertRaises(Exception):
ssa.record("v")
# SpikeSourcePoisson must throw if asked to record voltage
with self.assertRaises(Exception):
ssp.record("v")
sim.end()
def test_set_all(self):
sim.setup(timestep=1)
if_curr = sim.Population(1, sim.IF_curr_exp())
ssa = sim.Population(
1, sim.SpikeSourceArray(spike_times=[0]))
ssp = sim.Population(2, sim.SpikeSourcePoisson(rate=100.0),
additional_parameters={"seed": 1})
if_curr.record("all")
self.assertCountEqual(
["spikes", "v", "gsyn_inh", "gsyn_exc", "packets-per-timestep",
"rewiring"],
if_curr._vertex.get_recording_variables())
ssa.record("all")
self.assertCountEqual(
["spikes"], ssa._vertex.get_recording_variables())
ssp.record("all")
self.assertCountEqual(
["spikes"], ssp._vertex.get_recording_variables())
sim.end()
def test_set_spikes_interval(self):
sim.setup(timestep=1)
if_curr = sim.Population(1, sim.IF_curr_exp())
recorder = if_curr._vertex.neuron_recorder
self.assertCountEqual(
[], if_curr._vertex.get_recording_variables())
ssa = sim.Population(
1, sim.SpikeSourceArray(spike_times=[0]))
ssp = sim.Population(2, sim.SpikeSourcePoisson(rate=100.0),
additional_parameters={"seed": 1})
if_curr.record("spikes", sampling_interval=2)
ssa.record("spikes", sampling_interval=2)
ssp.record("spikes", sampling_interval=2)
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
assert recorder.get_sampling_interval_ms("spikes") == 2
def test_set_spikes_interval2(self):
sim.setup(timestep=0.5)
if_curr = sim.Population(1, sim.IF_curr_exp())
recorder = if_curr._vertex.neuron_recorder
self.assertCountEqual(
[], if_curr._vertex.get_recording_variables())
if_curr.record("spikes", sampling_interval=2.5)
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
assert recorder.get_sampling_interval_ms("spikes") == 2.5
def test_set_spikes_indexes(self):
sim.setup(timestep=1)
if_curr = sim.Population(5, sim.IF_curr_exp())
recorder = if_curr._vertex.neuron_recorder
ssa = sim.Population(
5, sim.SpikeSourceArray(spike_times=[0]))
ssp = sim.Population(5, sim.SpikeSourcePoisson(rate=100.0),
additional_parameters={"seed": 1})
if_curr[1, 2, 4].record("spikes")
ssa[1, 2, 4].record("spikes")
ssp[1, 2, 4].record("spikes")
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
assert recorder._indexes["spikes"] == [1, 2, 4]
def test_set_spikes_indexes2(self):
sim.setup(timestep=1)
if_curr = sim.Population(5, sim.IF_curr_exp())
recorder = if_curr._vertex.neuron_recorder
if_curr[1, 2, 4].record("spikes")
if_curr[1, 3].record("spikes")
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
assert recorder._indexes["spikes"] == [1, 2, 3, 4]
def test_turn_off_spikes_indexes(self):
sim.setup(timestep=1)
if_curr = sim.Population(5, sim.IF_curr_exp())
if_curr.record("spikes")
if_curr.record(None)
self.assertCountEqual(
[], if_curr._vertex.get_recording_variables())
def test_set_spikes_indexes3(self):
sim.setup(timestep=1)
if_curr = sim.Population(5, sim.IF_curr_exp())
if_curr.record("spikes")
self.assertCountEqual(
["spikes"], if_curr._vertex.get_recording_variables())
# These test are currently directly on NeuronRecorder as no pynn way
# to do this
def test_turn_off_some_indexes(self):
data_types = {
"v": DataType.S1615,
"gsyn_exc": DataType.S1615,
"gsyn_inh": DataType.S1615}
recorder = NeuronRecorder(
["v", "gsyn_exc", "gsyn_inh"], data_types, ["spikes"], 5, [], [],
[], [])
recorder.set_recording("spikes", True)
self.assertCountEqual(["spikes"], recorder.recording_variables)
recorder.set_recording("spikes", False, indexes=[2, 4])
self.assertCountEqual([0, 1, 3], recorder._indexes["spikes"])
|
2,403 |
subtest convergence order
|
import numpy as np
import unittest
import simsoptpp as sopp
from numpy.testing import assert_raises
def get_random_polynomial(dim, degree):
coeffsx = np.random.standard_normal(size=(degree+1, dim))
coeffsy = np.random.standard_normal(size=(degree+1, dim))
coeffsz = np.random.standard_normal(size=(degree+1, dim))
def fun(x, y, z, flatten=True):
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
px = sum([coeffsx[i, :] * x[:, None]**i for i in range(degree+1)])
py = sum([coeffsy[i, :] * y[:, None]**i for i in range(degree+1)])
pz = sum([coeffsz[i, :] * z[:, None]**i for i in range(degree+1)])
res = px*py*pz
if flatten:
return (np.ascontiguousarray(res)).flatten()
else:
return res
return fun
class Testing(unittest.TestCase):
def subtest_regular_grid_interpolant_exact(self, dim, degree):
"""
Build a random, vector valued polynomial of a specific degree and check
that it is interpolated exactly.
"""
np.random.seed(0)
xran = (1.0, 4.0, 20)
yran = (1.1, 3.9, 10)
zran = (1.2, 3.8, 15)
fun = get_random_polynomial(dim, degree)
rule = sopp.UniformInterpolationRule(degree)
interpolant = sopp.RegularGridInterpolant3D(rule, xran, yran, zran, dim, True)
interpolant.interpolate_batch(fun)
nsamples = 100
xpoints = np.random.uniform(low=xran[0], high=xran[1], size=(nsamples, ))
ypoints = np.random.uniform(low=yran[0], high=yran[1], size=(nsamples, ))
zpoints = np.random.uniform(low=zran[0], high=zran[1], size=(nsamples, ))
xyz = np.asarray([xpoints, ypoints, zpoints]).T.copy()
fhxyz = np.zeros((nsamples, dim))
fxyz = fun(xyz[:, 0], xyz[:, 1], xyz[:, 2], flatten=False)
interpolant.evaluate_batch(xyz, fhxyz)
assert np.allclose(fxyz, fhxyz, atol=1e-12, rtol=1e-12)
print(np.max(np.abs((fxyz-fhxyz)/fhxyz)))
def test_regular_grid_interpolant_exact(self):
for dim in [1, 3, 4, 6]:
for degree in [1, 2, 3, 4]:
with self.subTest(dim=dim, degree=degree):
self.subtest_regular_grid_interpolant_exact(dim, degree)
def test_out_of_bounds(self):
"""
Check that the interpolant behaves correctly when evaluated outside of
the defined domain. If created with out_of_bounds_ok=True, then
nothing should happen, but if out_of_bounds_ok=False, then a runtime
error should be raised.
"""
np.random.seed(0)
xran = (1.0, 4.0, 20)
yran = (1.1, 3.9, 10)
zran = (1.2, 3.8, 15)
dim = 3
degree = 2
fun = get_random_polynomial(dim, degree)
rule = sopp.UniformInterpolationRule(degree)
nsamples = 100
xpoints = np.random.uniform(low=xran[1]+0.1, high=xran[1]+0.3, size=(nsamples, ))
ypoints = np.random.uniform(low=yran[1]+0.1, high=yran[1]+0.3, size=(nsamples, ))
zpoints = np.random.uniform(low=zran[1]+0.1, high=zran[1]+0.3, size=(nsamples, ))
xyz = np.asarray([xpoints, ypoints, zpoints]).T.copy()
fhxyz = np.ones((nsamples, dim))
interpolant = sopp.RegularGridInterpolant3D(rule, xran, yran, zran, dim, True)
interpolant.interpolate_batch(fun)
interpolant.evaluate_batch(xyz, fhxyz)
assert np.allclose(fhxyz, 1., atol=1e-14, rtol=1e-14)
interpolant = sopp.RegularGridInterpolant3D(rule, xran, yran, zran, dim, False)
interpolant.interpolate_batch(fun)
with assert_raises(RuntimeError):
interpolant.evaluate_batch(xyz, fhxyz)
def test_skip(self):
"""
Check that the interpolant correctly identifies which regions in the
domain to skip
"""
np.random.seed(0)
xran = (1.0, 4.0, 30)
yran = (1.1, 3.9, 30)
zran = (1.2, 3.8, 30)
xkeep = (2.0, 3.0)
ykeep = (2.0, 3.0)
zkeep = (2.0, 3.0)
def skip(xs, ys, zs):
xs = np.asarray(xs)
ys = np.asarray(ys)
zs = np.asarray(zs)
keep = (xkeep[0] < xs) * (xs < xkeep[1]) * (ykeep[0] < ys) * (ys < ykeep[1]) * (zkeep[0] < zs) * (zs < zkeep[1])
return np.invert(keep)
dim = 3
degree = 2
fun = get_random_polynomial(dim, degree)
rule = sopp.UniformInterpolationRule(degree)
interpolant = sopp.RegularGridInterpolant3D(rule, xran, yran, zran, dim, True, skip)
interpolant.interpolate_batch(fun)
xyz = np.asarray([
[2.4, 2.6, 2.8], # keep
[2.1, 2.1, 2.9], # keep
[2.8, 2.8, 2.1], # keep
[1.3, 1.3, 1.3], # do not keep
[1.3, 2.9, 3.5], # do not keep
[3.5, 1.3, 1.3], # do not keep
])
fhxyz = 100*np.ones((xyz.shape[0], dim))
interpolant.evaluate_batch(xyz, fhxyz)
print("fhxyz %s" % (fhxyz))
fxyz = fun(xyz[:, 0], xyz[:, 1], xyz[:, 2], flatten=False)
assert np.allclose(fhxyz[:3, :], fxyz[:3, :], atol=1e-12, rtol=1e-12)
assert np.allclose(fhxyz[3:, :], 100, atol=1e-12, rtol=1e-12)
def test_convergence_order(self):
for dim in [1, 4, 6]:
for degree in [1, 3]:
with self.subTest(dim=dim, degree=degree):
self.METHOD_NAME(dim, degree)
def METHOD_NAME(self, dim, degree):
"""
Check that the interpolant converges at the correct order
"""
np.random.seed(0)
fun = get_random_polynomial(dim, degree+1)
rule = sopp.UniformInterpolationRule(degree)
nsamples = 1000
xran = [1.0, 4.0, 10]
yran = [1.1, 3.9, 10]
zran = [1.2, 3.8, 10]
xpoints = np.random.uniform(low=xran[0], high=xran[1], size=(nsamples, ))
ypoints = np.random.uniform(low=yran[0], high=yran[1], size=(nsamples, ))
zpoints = np.random.uniform(low=zran[0], high=zran[1], size=(nsamples, ))
xyz = np.asarray([xpoints, ypoints, zpoints]).T.copy()
fhxyz = np.zeros((nsamples, dim))
fxyz = fun(xyz[:, 0], xyz[:, 1], xyz[:, 2], flatten=False)
err = 1e6
for n in [5, 10, 20, 40]:
xran[2] = n
yran[2] = n
zran[2] = n
interpolant = sopp.RegularGridInterpolant3D(rule, xran, yran, zran, dim, True)
interpolant.interpolate_batch(fun)
interpolant.evaluate_batch(xyz, fhxyz)
err_new = np.mean(np.linalg.norm(fxyz-fhxyz, axis=1))
print(err_new/err)
assert err_new/err < 0.6**(degree+1)
err = err_new
|
2,404 |
dummy callback
|
# Standard library
from __future__ import division, print_function, absolute_import, unicode_literals
# On some systems mpi4py is available but broken we avoid crashes by importing
# it only when an MPI Pool is explicitly created.
# Still make it a global to avoid messing up other things.
MPI = None
# Project
from . import log, _VERBOSE
from .pool import BasePool
__all__ = ['MPIPool']
def METHOD_NAME(x):
return True
def _import_mpi(quiet=False):
global MPI
try:
import mpi4py.MPI
MPI = mpi4py.MPI
except ImportError:
if not quiet:
# Re-raise with a more user-friendly error:
raise ImportError("Please install mpi4py")
class MPIPool(BasePool):
"""A processing pool that distributes tasks using MPI.
With this pool class, the master process distributes tasks to worker
processes using an MPI communicator. This pool therefore supports parallel
processing on large compute clusters and in environments with multiple
nodes or computers that each have many processor cores.
This implementation is inspired by @juliohm in `this module
<https://github.com/juliohm/HUM/blob/master/pyhum/utils.py#L24>`_
Parameters
----------
comm : :class:`mpi4py.MPI.Comm`, optional
An MPI communicator to distribute tasks with. If ``None``, this uses
``MPI.COMM_WORLD`` by default.
"""
def __init__(self, comm=None, callback=None):
_import_mpi()
if comm is None:
comm = MPI.COMM_WORLD
self.comm = comm
self.master = 0
self.rank = self.comm.Get_rank()
self.workers = set(range(self.comm.size))
self.workers.discard(self.master)
self.callback = callback
self.size = self.comm.Get_size() - 1
if self.size == 0:
raise ValueError("Tried to create an MPI pool, but there "
"was only one MPI process available. "
"Need at least two.")
@staticmethod
def enabled():
if MPI is None:
_import_mpi(quiet=True)
if MPI is not None:
if MPI.COMM_WORLD.size > 1:
return True
return False
def wait(self, callback=None):
"""Tell the workers to wait and listen for the master process. This is
called automatically when using :meth:`MPIPool.map` and doesn't need to
be called by the user.
"""
if self.is_master():
return
worker = self.comm.rank
status = MPI.Status()
while True:
log.log(_VERBOSE, "Worker {0} waiting for task".format(worker))
task = self.comm.recv(source=self.master, tag=MPI.ANY_TAG,
status=status)
if task is None:
log.log(_VERBOSE, "Worker {0} told to quit work".format(worker))
break
func, arg = task
log.log(_VERBOSE, "Worker {0} got task {1} with tag {2}"
.format(worker, arg, status.tag))
result = func(arg)
log.log(_VERBOSE, "Worker {0} sending answer {1} with tag {2}"
.format(worker, result, status.tag))
self.comm.ssend(result, self.master, status.tag)
if callback is not None:
callback()
def map(self, worker, tasks, callback=None):
"""Evaluate a function or callable on each task in parallel using MPI.
The callable, ``worker``, is called on each element of the ``tasks``
iterable. The results are returned in the expected order (symmetric with
``tasks``).
Parameters
----------
worker : callable
A function or callable object that is executed on each element of
the specified ``tasks`` iterable. This object must be picklable
(i.e. it can't be a function scoped within a function or a
``lambda`` function). This should accept a single positional
argument and return a single object.
tasks : iterable
A list or iterable of tasks. Each task can be itself an iterable
(e.g., tuple) of values or data to pass in to the worker function.
callback : callable, optional
An optional callback function (or callable) that is called with the
result from each worker run and is executed on the master process.
This is useful for, e.g., saving results to a file, since the
callback is only called on the master thread.
Returns
-------
results : list
A list of results from the output of each ``worker()`` call.
"""
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
return
if callback is None:
callback = METHOD_NAME
workerset = self.workers.copy()
tasklist = [(tid, (worker, arg)) for tid, arg in enumerate(tasks)]
resultlist = [None] * len(tasklist)
pending = len(tasklist)
while pending:
if workerset and tasklist:
worker = workerset.pop()
taskid, task = tasklist.pop()
log.log(_VERBOSE, "Sent task %s to worker %s with tag %s",
task[1], worker, taskid)
self.comm.send(task, dest=worker, tag=taskid)
if tasklist:
flag = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
if not flag:
continue
else:
self.comm.Probe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
status = MPI.Status()
result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG,
status=status)
worker = status.source
taskid = status.tag
log.log(_VERBOSE, "Master received from worker %s with tag %s",
worker, taskid)
callback(result)
if self.callback is not None:
callback_ret = self.callback(result)
else:
callback_ret = True
if callback_ret is not False:
# then the result is from a completed task
workerset.add(worker)
resultlist[taskid] = result
pending -= 1
return resultlist
def close(self):
""" Tell all the workers to quit."""
if self.is_worker():
return
for worker in self.workers:
self.comm.send(None, worker, 0)
|
2,405 |
get model params
|
import torch
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
import warnings
warnings.filterwarnings("ignore")
# DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_data():
"""Load CIFAR-10 (training and test set)."""
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
trainset = CIFAR10("./dataset", train=True, download=True, transform=transform)
testset = CIFAR10("./dataset", train=False, download=True, transform=transform)
num_examples = {"trainset": len(trainset), "testset": len(testset)}
return trainset, testset, num_examples
def load_partition(idx: int):
"""Load 1/10th of the training and test data to simulate a partition."""
assert idx in range(10)
trainset, testset, num_examples = load_data()
n_train = int(num_examples["trainset"] / 10)
n_test = int(num_examples["testset"] / 10)
train_parition = torch.utils.data.Subset(
trainset, range(idx * n_train, (idx + 1) * n_train)
)
test_parition = torch.utils.data.Subset(
testset, range(idx * n_test, (idx + 1) * n_test)
)
return (train_parition, test_parition)
def train(net, trainloader, valloader, epochs, device: str = "cpu"):
"""Train the network on the training set."""
print("Starting training...")
net.to(device) # move model to GPU if available
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(
net.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4
)
net.train()
for _ in range(epochs):
for images, labels in trainloader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
loss = criterion(net(images), labels)
loss.backward()
optimizer.step()
net.to("cpu") # move model back to CPU
train_loss, train_acc = test(net, trainloader)
val_loss, val_acc = test(net, valloader)
results = {
"train_loss": train_loss,
"train_accuracy": train_acc,
"val_loss": val_loss,
"val_accuracy": val_acc,
}
return results
def test(net, testloader, steps: int = None, device: str = "cpu"):
"""Validate the network on the entire test set."""
print("Starting evalutation...")
net.to(device) # move model to GPU if available
criterion = torch.nn.CrossEntropyLoss()
correct, loss = 0, 0.0
net.eval()
with torch.no_grad():
for batch_idx, (images, labels) in enumerate(testloader):
images, labels = images.to(device), labels.to(device)
outputs = net(images)
loss += criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum().item()
if steps is not None and batch_idx == steps:
break
accuracy = correct / len(testloader.dataset)
net.to("cpu") # move model back to CPU
return loss, accuracy
def replace_classifying_layer(efficientnet_model, num_classes: int = 10):
"""Replaces the final layer of the classifier."""
num_features = efficientnet_model.classifier.fc.in_features
efficientnet_model.classifier.fc = torch.nn.Linear(num_features, num_classes)
def load_efficientnet(entrypoint: str = "nvidia_efficientnet_b0", classes: int = None):
"""Loads pretrained efficientnet model from torch hub. Replaces final classifying
layer if classes is specified.
Args:
entrypoint: EfficientNet model to download.
For supported entrypoints, please refer
https://pytorch.org/hub/nvidia_deeplearningexamples_efficientnet/
classes: Number of classes in final classifying layer. Leave as None to get the downloaded
model untouched.
Returns:
EfficientNet Model
Note: One alternative implementation can be found at https://github.com/lukemelas/EfficientNet-PyTorch
"""
efficientnet = torch.hub.load(
"NVIDIA/DeepLearningExamples:torchhub", entrypoint, pretrained=True
)
if classes is not None:
replace_classifying_layer(efficientnet, classes)
return efficientnet
def METHOD_NAME(model):
"""Returns a model's parameters."""
return [val.cpu().numpy() for _, val in model.state_dict().items()]
|
2,406 |
test reduce slice sum2 d
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.reduce_slice_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.reduce_slice_ops.python.ops import reduce_slice_ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class ReduceSliceTest(TensorFlowTestCase):
def testReduceSliceSum1D(self):
x = np.array([1, 40, 700], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([1, 741, 40, 740, 41], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def METHOD_NAME(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [741, 852, 963], [40, 50, 60],
[740, 850, 960], [41, 52, 63]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum3D(self):
x = np.array([[[1, 2], [3, 4]], [[50, 60], [70, 80]],
[[600, 700], [800, 900]]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[[1, 2], [3, 4]],
[[651, 762], [873, 984]],
[[50, 60], [70, 80]],
[[650, 760], [870, 980]],
[[51, 62], [73, 84]]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSumAxis1(self):
x = np.transpose(np.array([[1, 2, 3], [40, 50, 60],
[700, 800, 900]], dtype=np.int32))
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.transpose(np.array([[1, 2, 3],
[741, 852, 963],
[40, 50, 60],
[740, 850, 960],
[41, 52, 63]], dtype=np.int32))
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 1).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceSum1DIndices(self):
x = np.array([[1, 2, 3], [40, 50, 60], [700, 800, 900],
[1000, 2000, 3000], [40000, 50000, 60000]], dtype=np.int32)
indices = np.array([0, 0, 2, 5], dtype=np.int32)
result = np.array([[0, 0, 0], [41, 52, 63],
[41700, 52800, 63900]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceProd(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [28, 80, 162], [4, 5, 6],
[28, 40, 54], [4, 10, 18]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_prod(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMax(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [7, 8, 9], [4, 5, 6],
[7, 8, 9], [4, 5, 6]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_max(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceMin(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.array([[1, 2, 3], [1, 2, 3], [4, 5, 6],
[4, 5, 6], [1, 2, 3]], dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_min(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataRows(self):
x = np.empty((0, 1, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.zeros((5, 1, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyDataCols(self):
x = np.empty((100, 0, 2, 3, 4, 5, 6), dtype=np.int32)
indices = np.array([[0, 1], [0, 3], [1, 2], [1, 3], [0, 2]], dtype=np.int32)
result = np.empty((5, 0, 2, 3, 4, 5, 6), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmptyIndicesRows(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0, 2), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty0Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.empty((0,), dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
def testReduceSliceEmpty1Indices1D(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
result = np.empty((0, 3), dtype=np.int32)
with self.test_session(use_gpu=True):
y_tf = reduce_slice_ops.reduce_slice_sum(x, indices, 0).eval()
self.assertAllEqual(y_tf, result)
if __name__ == "__main__":
googletest.main()
|
2,407 |
do down
|
import signal
import sys
from bdb import Bdb
from cmd import Cmd
from collections.abc import Callable, Iterable, Mapping, Sequence
from inspect import _SourceObjectType
from types import CodeType, FrameType, TracebackType
from typing import IO, Any, ClassVar, TypeVar
from typing_extensions import ParamSpec, Self
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", "post_mortem", "help"]
_T = TypeVar("_T")
_P = ParamSpec("_P")
line_prefix: str # undocumented
class Restart(Exception): ...
def run(statement: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None: ...
def runeval(expression: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> Any: ...
def runctx(statement: str, globals: dict[str, Any], locals: Mapping[str, Any]) -> None: ...
def runcall(func: Callable[_P, _T], *args: _P.args, **kwds: _P.kwargs) -> _T | None: ...
def set_trace(*, header: str | None = None) -> None: ...
def post_mortem(t: TracebackType | None = None) -> None: ...
def pm() -> None: ...
class Pdb(Bdb, Cmd):
# Everything here is undocumented, except for __init__
commands_resuming: ClassVar[list[str]]
aliases: dict[str, str]
mainpyfile: str
_wait_for_mainpyfile: bool
rcLines: list[str]
commands: dict[int, list[str]]
commands_doprompt: dict[int, bool]
commands_silent: dict[int, bool]
commands_defining: bool
commands_bnum: int | None
lineno: int | None
stack: list[tuple[FrameType, int]]
curindex: int
curframe: FrameType | None
curframe_locals: Mapping[str, Any]
def __init__(
self,
completekey: str = "tab",
stdin: IO[str] | None = None,
stdout: IO[str] | None = None,
skip: Iterable[str] | None = None,
nosigint: bool = False,
readrc: bool = True,
) -> None: ...
def forget(self) -> None: ...
def setup(self, f: FrameType | None, tb: TracebackType | None) -> None: ...
def execRcLines(self) -> None: ...
def bp_commands(self, frame: FrameType) -> bool: ...
def interaction(self, frame: FrameType | None, traceback: TracebackType | None) -> None: ...
def displayhook(self, obj: object) -> None: ...
def handle_command_def(self, line: str) -> bool: ...
def defaultFile(self) -> str: ...
def lineinfo(self, identifier: str) -> tuple[None, None, None] | tuple[str, str, int]: ...
def checkline(self, filename: str, lineno: int) -> int: ...
def _getval(self, arg: str) -> object: ...
def print_stack_trace(self) -> None: ...
def print_stack_entry(self, frame_lineno: tuple[FrameType, int], prompt_prefix: str = "\n-> ") -> None: ...
def lookupmodule(self, filename: str) -> str | None: ...
if sys.version_info < (3, 11):
def _runscript(self, filename: str) -> None: ...
def do_commands(self, arg: str) -> bool | None: ...
def do_break(self, arg: str, temporary: bool = ...) -> bool | None: ...
def do_tbreak(self, arg: str) -> bool | None: ...
def do_enable(self, arg: str) -> bool | None: ...
def do_disable(self, arg: str) -> bool | None: ...
def do_condition(self, arg: str) -> bool | None: ...
def do_ignore(self, arg: str) -> bool | None: ...
def do_clear(self, arg: str) -> bool | None: ...
def do_where(self, arg: str) -> bool | None: ...
def do_up(self, arg: str) -> bool | None: ...
def METHOD_NAME(self, arg: str) -> bool | None: ...
def do_until(self, arg: str) -> bool | None: ...
def do_step(self, arg: str) -> bool | None: ...
def do_next(self, arg: str) -> bool | None: ...
def do_run(self, arg: str) -> bool | None: ...
def do_return(self, arg: str) -> bool | None: ...
def do_continue(self, arg: str) -> bool | None: ...
def do_jump(self, arg: str) -> bool | None: ...
def do_debug(self, arg: str) -> bool | None: ...
def do_quit(self, arg: str) -> bool | None: ...
def do_EOF(self, arg: str) -> bool | None: ...
def do_args(self, arg: str) -> bool | None: ...
def do_retval(self, arg: str) -> bool | None: ...
def do_p(self, arg: str) -> bool | None: ...
def do_pp(self, arg: str) -> bool | None: ...
def do_list(self, arg: str) -> bool | None: ...
def do_whatis(self, arg: str) -> bool | None: ...
def do_alias(self, arg: str) -> bool | None: ...
def do_unalias(self, arg: str) -> bool | None: ...
def do_help(self, arg: str) -> bool | None: ...
do_b = do_break
do_cl = do_clear
do_w = do_where
do_bt = do_where
do_u = do_up
do_d = METHOD_NAME
do_unt = do_until
do_s = do_step
do_n = do_next
do_restart = do_run
do_r = do_return
do_c = do_continue
do_cont = do_continue
do_j = do_jump
do_q = do_quit
do_exit = do_quit
do_a = do_args
do_rv = do_retval
do_l = do_list
do_h = do_help
def help_exec(self) -> None: ...
def help_pdb(self) -> None: ...
def sigint_handler(self, signum: signal.Signals, frame: FrameType) -> None: ...
def message(self, msg: str) -> None: ...
def error(self, msg: str) -> None: ...
if sys.version_info >= (3, 12):
def set_convenience_variable(self, frame: FrameType, name: str, value: Any) -> None: ...
def _select_frame(self, number: int) -> None: ...
def _getval_except(self, arg: str, frame: FrameType | None = None) -> object: ...
def _print_lines(
self, lines: Sequence[str], start: int, breaks: Sequence[int] = (), frame: FrameType | None = None
) -> None: ...
def _cmdloop(self) -> None: ...
def do_display(self, arg: str) -> bool | None: ...
def do_interact(self, arg: str) -> bool | None: ...
def do_longlist(self, arg: str) -> bool | None: ...
def do_source(self, arg: str) -> bool | None: ...
def do_undisplay(self, arg: str) -> bool | None: ...
do_ll = do_longlist
def _complete_location(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def _complete_bpnumber(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def _complete_expression(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def complete_undisplay(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def complete_unalias(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
complete_commands = _complete_bpnumber
complete_break = _complete_location
complete_b = _complete_location
complete_tbreak = _complete_location
complete_enable = _complete_bpnumber
complete_disable = _complete_bpnumber
complete_condition = _complete_bpnumber
complete_ignore = _complete_bpnumber
complete_clear = _complete_location
complete_cl = _complete_location
complete_debug = _complete_expression
complete_print = _complete_expression
complete_p = _complete_expression
complete_pp = _complete_expression
complete_source = _complete_expression
complete_whatis = _complete_expression
complete_display = _complete_expression
if sys.version_info < (3, 11):
def _runmodule(self, module_name: str) -> None: ...
# undocumented
def find_function(funcname: str, filename: str) -> tuple[str, str, int] | None: ...
def main() -> None: ...
def help() -> None: ...
if sys.version_info < (3, 10):
def getsourcelines(obj: _SourceObjectType) -> tuple[list[str], int]: ...
def lasti2lineno(code: CodeType, lasti: int) -> int: ...
class _rstr(str):
def __repr__(self) -> Self: ...
|
2,408 |
gen flow
|
from typing import Any, Dict, List, Literal, Union
import pandas as pd
from prefect import Flow, Task, apply_map
from viadot.task_utils import (
add_ingestion_metadata_task,
credentials_loader,
df_to_csv,
df_to_parquet,
union_dfs_task,
)
from viadot.tasks import AzureDataLakeUpload, OutlookToDF
class OutlookToADLS(Flow):
def __init__(
self,
mailbox_list: List[str],
name: str = None,
start_date: str = None,
end_date: str = None,
outbox_list: List[str] = ["Sent Items"],
local_file_path: str = None,
output_file_extension: str = ".parquet",
adls_file_path: str = None,
overwrite_adls: bool = True,
adls_sp_credentials_secret: str = None,
limit: int = 10000,
timeout: int = 3600,
if_exists: Literal["append", "replace", "skip"] = "append",
outlook_credentials_secret: str = "OUTLOOK",
*args: List[Any],
**kwargs: Dict[str, Any],
):
"""
Flow for downloading data from Outlook source to Azure Data Lake in parquet format by default.
Args:
mailbox_list (List[str]): Mailbox name.
name (str, optional): The name of the flow. Defaults to None.
start_date (str, optional): A filtering start date parameter e.g. "2022-01-01". Defaults to None.
end_date (str, optional): A filtering end date parameter e.g. "2022-01-02". Defaults to None.
outbox_list (List[str], optional): List of outbox folders to differenciate between
Inboxes and Outboxes. Defaults to ["Sent Items"].
local_file_path (str, optional): Local destination path. Defaults to None.
output_file_extension (str, optional): Output file extension. Defaults to ".parquet".
adls_file_path (str, optional): Azure Data Lake destination file path. Defaults to None.
overwrite_adls (bool, optional): Whether to overwrite the file in ADLS. Defaults to True.
adls_sp_credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with
ACCOUNT_NAME and Service Principal credentials (TENANT_ID, CLIENT_ID, CLIENT_SECRET) for the Azure Data Lake. Defaults to None.
outlook_credentials_secret (str, optional): The name of the Azure Key Vault secret containing a dictionary with outlook credentials.
limit (int, optional): Number of fetched top messages. Defaults to 10000.
timeout(int, optional): The amount of time (in seconds) to wait while running this task before
a timeout occurs. Defaults to 3600.
if_exists (Literal['append', 'replace', 'skip'], optional): What to do if the local file already exists. Defaults to "append".
"""
self.mailbox_list = mailbox_list
self.start_date = start_date
self.end_date = end_date
self.outbox_list = outbox_list
self.limit = limit
self.timeout = timeout
self.local_file_path = local_file_path
self.if_exsists = if_exists
# AzureDataLakeUpload
self.adls_file_path = adls_file_path
self.output_file_extension = output_file_extension
self.overwrite_adls = overwrite_adls
self.adls_sp_credentials_secret = adls_sp_credentials_secret
self.outlook_credentials_secret = outlook_credentials_secret
super().__init__(*args, name=name, **kwargs)
self.METHOD_NAME()
def gen_outlook_df(
self, mailbox_list: Union[str, List[str]], flow: Flow = None
) -> Task:
credentials = credentials_loader.run(
credentials_secret=self.outlook_credentials_secret
)
outlook_to_df = OutlookToDF(timeout=self.timeout, credentials=credentials)
df = outlook_to_df.bind(
mailbox_name=mailbox_list,
start_date=self.start_date,
end_date=self.end_date,
outbox_list=self.outbox_list,
limit=self.limit,
flow=flow,
)
return df
def METHOD_NAME(self) -> Flow:
dfs = apply_map(self.gen_outlook_df, self.mailbox_list, flow=self)
df = union_dfs_task.bind(dfs, flow=self)
df_with_metadata = add_ingestion_metadata_task.bind(df, flow=self)
if self.output_file_extension == ".parquet":
df_to_file = df_to_parquet.bind(
df=df_with_metadata,
path=self.local_file_path,
if_exists=self.if_exsists,
flow=self,
)
else:
df_to_file = df_to_csv.bind(
df=df_with_metadata,
path=self.local_file_path,
if_exists=self.if_exsists,
flow=self,
)
file_to_adls_task = AzureDataLakeUpload(timeout=self.timeout)
file_to_adls_task.bind(
from_path=self.local_file_path,
to_path=self.adls_file_path,
overwrite=self.overwrite_adls,
sp_credentials_secret=self.adls_sp_credentials_secret,
flow=self,
)
df_with_metadata.set_upstream(df, flow=self)
df_to_file.set_upstream(df_with_metadata, flow=self)
file_to_adls_task.set_upstream(df_to_file, flow=self)
|
2,409 |
load db
|
#!/usr/bin/env python3
# coding: utf-8 -*-
#
# Author: badz & pipiche38
#
import logging
import Classes.ZigpyTransport.AppGeneric
import zigpy.config as zigpy_conf
import zigpy.device
import zigpy.profiles
import zigpy.zdo.types as zdo_types
import zigpy_znp.commands.util
import zigpy_znp.config as znp_conf
import zigpy_znp.types as t
import zigpy_znp.zigbee.application
from Classes.ZigpyTransport.firmwareversionHelper import \
znp_extract_versioning_for_plugin
from Classes.ZigpyTransport.plugin_encoders import (
build_plugin_8010_frame_content, build_plugin_8015_frame_content)
from Modules.zigbeeVersionTable import ZNP_MODEL
from zigpy.zcl import clusters
LOGGER = logging.getLogger(__name__)
class App_znp(zigpy_znp.zigbee.application.ControllerApplication):
@classmethod
async def new(cls, config: dict, auto_form: bool = False, start_radio: bool = True) -> zigpy.application.ControllerApplication:
LOGGER.debug("new")
async def METHOD_NAME(self) -> None:
await Classes.ZigpyTransport.AppGeneric.METHOD_NAME(self)
async def initialize(self, *, auto_form: bool = False, force_form: bool = False):
await Classes.ZigpyTransport.AppGeneric.initialize(self, auto_form=auto_form, force_form=force_form)
LOGGER.info("ZNP Configuration: %s", self.config)
async def startup(self, HardwareID, pluginconf, callBackHandleMessage, callBackUpdDevice=None, callBackGetDevice=None, callBackBackup=None, captureRxFrame=None, auto_form=False, force_form=False, log=None, permit_to_join_timer=None):
# If set to != 0 (default) extended PanId will be use when forming the network.
# If set to !=0 (default) channel will be use when formin the network
self.log = log
self.pluginconf = pluginconf
self.permit_to_join_timer = permit_to_join_timer
self.callBackFunction = callBackHandleMessage
self.callBackUpdDevice = callBackUpdDevice
self.callBackGetDevice = callBackGetDevice
self.callBackBackup = callBackBackup
self.HardwareID = HardwareID
self.captureRxFrame = captureRxFrame
# Pipiche : 24-Oct-2022 Disabling CONF_MAX_CONCURRENT_REQUESTS so the default will be used ( 16 )
# self.znp_config[znp_conf.CONF_MAX_CONCURRENT_REQUESTS] = 2
"""
Starts a network, optionally forming one with random settings if necessary.
"""
try:
await self.connect()
await self.initialize(auto_form=True, force_form=force_form)
except Exception as e:
LOGGER.error("Couldn't start application", exc_info=e)
await self.shutdown()
raise
self.log.logging("TransportZigpy", "Log", "ZNP Configuration %s" %self.config)
# Populate and get the list of active devices.
# This will allow the plugin if needed to update the IEEE -> NwkId
await self.load_network_info( load_devices=True )
network_info = self.state.network_info
self.callBackFunction(build_plugin_8015_frame_content( self, network_info))
# Trigger Version payload to plugin
znp_model = self.get_device(nwk=t.NWK(0x0000)).model
znp_manuf = self.get_device(nwk=t.NWK(0x0000)).manufacturer
self.log.logging("TransportZigpy", "Status", "ZNP Radio manufacturer: %s" %znp_manuf)
self.log.logging("TransportZigpy", "Status", "ZNP Radio board model: %s" %znp_model)
self.log.logging("TransportZigpy", "Status", "ZNP Radio version: %s" %self._znp.version)
FirmwareBranch, FirmwareMajorVersion, FirmwareVersion, build = znp_extract_versioning_for_plugin( self, znp_model, znp_manuf)
self.callBackFunction(build_plugin_8010_frame_content(FirmwareBranch, FirmwareMajorVersion, FirmwareVersion, build ))
async def shutdown(self) -> None:
"""Shutdown controller."""
if self.config[zigpy_conf.CONF_NWK_BACKUP_ENABLED]:
self.callBackBackup(await self.backups.create_backup(load_devices=True))
await self.disconnect()
async def register_endpoints(self):
self.log.logging("TransportZigpy", "Status", "ZNP Radio register default Ep")
await super().register_endpoints()
self.log.logging("TransportZigpy", "Status", "ZNP Radio register any additional/specific Ep")
await Classes.ZigpyTransport.AppGeneric.register_specific_endpoints(self)
def device_initialized(self, device):
self.log.logging("TransportZigpy", "Log","device_initialized (0x%04x %s)" %(device.nwk, device.ieee))
super().device_initialized(device)
def get_device(self, ieee=None, nwk=None):
return Classes.ZigpyTransport.AppGeneric.get_device(self, ieee, nwk)
def handle_join(self, nwk: t.NWK, ieee: t.EUI64, parent_nwk: t.NWK, *, handle_rejoin: bool = True,) -> None:
return Classes.ZigpyTransport.AppGeneric.handle_join(self, nwk, ieee, parent_nwk)
def get_device_ieee(self, nwk):
return Classes.ZigpyTransport.AppGeneric.get_device_ieee(self, nwk)
def handle_leave(self, nwk, ieee):
Classes.ZigpyTransport.AppGeneric.handle_leave(self, nwk, ieee)
def get_zigpy_version(self):
return Classes.ZigpyTransport.AppGeneric.get_zigpy_version(self)
def handle_message(
self,
sender: zigpy.device.Device,
profile: int,
cluster: int,
src_ep: int,
dst_ep: int,
message: bytes,
dst_addressing=None,
) -> None:
return Classes.ZigpyTransport.AppGeneric.handle_message(self,sender,profile,cluster,src_ep,dst_ep,message, dst_addressing=dst_addressing)
async def set_zigpy_tx_power(self, power):
self.log.logging("TransportZigpy", "Debug", "set_tx_power %s" %power)
await self.set_tx_power(dbm=power)
async def set_led(self, mode):
if mode == 1:
await self._set_led_mode(led=0xFF, mode=zigpy_znp.commands.util.LEDMode.ON)
else:
await self._set_led_mode(led=0xFF, mode=zigpy_znp.commands.util.LEDMode.OFF)
async def set_certification(self, mode):
self.log.logging("TransportZigpy", "Debug", "set_certification not implemented yet")
async def get_time_server(self):
self.log.logging("TransportZigpy", "Debug", "get_time_server not implemented yet")
async def set_time_server(self, newtime):
self.log.logging("TransportZigpy", "Debug", "set_time_server not implemented yet")
async def get_firmware_version(self):
return self.znp.version
async def erase_pdm(self):
pass
async def set_extended_pan_id(self,extended_pan_ip):
self.config[znp_conf.CONF_NWK][znp_conf.CONF_NWK_EXTENDED_PAN_ID] = extended_pan_ip
self.startup(self.callBackFunction,self.callBackGetDevice,auto_form=True,force_form=True,log=self.log)
async def set_channel(self,channel):
self.config[znp_conf.CONF_NWK][znp_conf.CONF_NWK_EXTENDED_PAN_ID] = channel
self.startup(self.callBackFunction,self.callBackGetDevice,auto_form=True,force_form=True,log=self.log)
async def remove_ieee(self, ieee):
await self.remove( ieee )
async def coordinator_backup( self ):
if self.config[zigpy_conf.CONF_NWK_BACKUP_ENABLED]:
self.callBackBackup(await self.backups.create_backup(load_devices=self.pluginconf.pluginConf["BackupFullDevices"]))
def is_bellows(self):
return False
def is_znp(self):
return True
def is_deconz(self):
return Fals
|
2,410 |
last alive time
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSyncAgentResult',
'AwaitableGetSyncAgentResult',
'get_sync_agent',
'get_sync_agent_output',
]
@pulumi.output_type
class GetSyncAgentResult:
"""
An Azure SQL Database sync agent.
"""
def __init__(__self__, expiry_time=None, id=None, is_up_to_date=None, METHOD_NAME=None, name=None, state=None, sync_database_id=None, type=None, version=None):
if expiry_time and not isinstance(expiry_time, str):
raise TypeError("Expected argument 'expiry_time' to be a str")
pulumi.set(__self__, "expiry_time", expiry_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_up_to_date and not isinstance(is_up_to_date, bool):
raise TypeError("Expected argument 'is_up_to_date' to be a bool")
pulumi.set(__self__, "is_up_to_date", is_up_to_date)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'last_alive_time' to be a str")
pulumi.set(__self__, "last_alive_time", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if sync_database_id and not isinstance(sync_database_id, str):
raise TypeError("Expected argument 'sync_database_id' to be a str")
pulumi.set(__self__, "sync_database_id", sync_database_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="expiryTime")
def expiry_time(self) -> str:
"""
Expiration time of the sync agent version.
"""
return pulumi.get(self, "expiry_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isUpToDate")
def is_up_to_date(self) -> bool:
"""
If the sync agent version is up to date.
"""
return pulumi.get(self, "is_up_to_date")
@property
@pulumi.getter(name="lastAliveTime")
def METHOD_NAME(self) -> str:
"""
Last alive time of the sync agent.
"""
return pulumi.get(self, "last_alive_time")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
State of the sync agent.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="syncDatabaseId")
def sync_database_id(self) -> Optional[str]:
"""
ARM resource id of the sync database in the sync agent.
"""
return pulumi.get(self, "sync_database_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the sync agent.
"""
return pulumi.get(self, "version")
class AwaitableGetSyncAgentResult(GetSyncAgentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncAgentResult(
expiry_time=self.expiry_time,
id=self.id,
is_up_to_date=self.is_up_to_date,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
state=self.state,
sync_database_id=self.sync_database_id,
type=self.type,
version=self.version)
def get_sync_agent(resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
sync_agent_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncAgentResult:
"""
Gets a sync agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server on which the sync agent is hosted.
:param str sync_agent_name: The name of the sync agent.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['syncAgentName'] = sync_agent_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20221101preview:getSyncAgent', __args__, opts=opts, typ=GetSyncAgentResult).value
return AwaitableGetSyncAgentResult(
expiry_time=pulumi.get(__ret__, 'expiry_time'),
id=pulumi.get(__ret__, 'id'),
is_up_to_date=pulumi.get(__ret__, 'is_up_to_date'),
METHOD_NAME=pulumi.get(__ret__, 'last_alive_time'),
name=pulumi.get(__ret__, 'name'),
state=pulumi.get(__ret__, 'state'),
sync_database_id=pulumi.get(__ret__, 'sync_database_id'),
type=pulumi.get(__ret__, 'type'),
version=pulumi.get(__ret__, 'version'))
@_utilities.lift_output_func(get_sync_agent)
def get_sync_agent_output(resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sync_agent_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSyncAgentResult]:
"""
Gets a sync agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server on which the sync agent is hosted.
:param str sync_agent_name: The name of the sync agent.
"""
...
|
2,411 |
dcuboid
|
import numpy as np
from .geoalg import project
def dcircle(p, cxy=[0, 0], r=1):
x = p[..., 0]
y = p[..., 1]
return np.sqrt((x - cxy[0])**2 + (y - cxy[1])**2) - r
def drectangle(p, box):
x = p[..., 0]
y = p[..., 1]
d = dmin(y - box[2], box[3] - y)
d = dmin(d, x - box[0])
d = dmin(d, box[1] - x)
return -d
def dsine(p, cxy, r):
x = p[..., 0]
y = p[..., 1]
return (y - cxy[1]) - r*np.sin(x-cxy[0])
def dparabolic(p, cxy, r):
x = p[..., 0]
y = p[..., 1]
return (y - cxy[1])**2 - 2*r*x
def dcurve(p, curve, maxit=200, tol=1e-12):
"""
Notes
-----
输入一组点和一个水平集函数表示的曲线,计算这些点到水平集的符号距离
"""
_, d, _= project(curve, p, maxit=200, tol=1e-8, returngrad=True, returnd=True)
return d
def dpoly(p, poly):
pass
def ddiff(d0, d1):
return np.maximum(d0, -d1)
def dunion(*args):
d = np.array(args)
return np.min(d, axis=0)
def dmin(*args):
d = np.array(args)
return np.min(d, axis=0)
def dmax(*args):
d = np.array(args)
return np.max(d, axis=0)
def METHOD_NAME(p, domain=[0, 1, 0, 1, 0, 1]):
"""
@brief 长方体上的符号距离函数
"""
x = p[..., 0]
y = p[..., 1]
z = p[..., 2]
d = -dmin(
z - domain[4], domain[5] - z,
y - domain[2], domain[3] - y,
x - domain[0], domain[1] - x)
# (0, 1)
val0 = domain[2] - y
val1 = domain[4] - z
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (1, 2)
val0 = x - domain[1]
val1 = domain[4] - z
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (2, 3)
val0 = y - domain[3]
val1 = domain[4] - z
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (0, 3)
val0 = domain[0] - x
val1 = domain[4] - z
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (0, 4)
val0 = domain[0] - x
val1 = domain[2] - y
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (1, 5)
val0 = x - domain[1]
val1 = domain[2] - y
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (2, 6)
val0 = x - domain[1]
val1 = y - domain[3]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (3, 7)
val0 = domain[0] - x
val1 = y - domain[3]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (4, 5)
val0 = domain[2] - y
val1 = z - domain[5]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (5, 6)
val0 = x - domain[1]
val1 = z - domain[5]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (6, 7)
val0 = y - domain[3]
val1 = z - domain[5]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
# (4, 7)
val0 = domain[0] - x
val1 = z - domain[5]
flag = (val0 > 0) & (val1 > 0)
d[flag] = np.sqrt(val0[flag]**2 + val1[flag]**2)
return d
def dcylinder(p,
center=np.array([0.0, 0.0, 0.0]),
height=2,
radius=1,
direction=np.array([0.0, 0.0, 1.0])):
"""
@brief 圆柱体的符号距离函数
@param[in] p numpy 数组
@param[in] c 圆柱体中心
@param[in] h 圆柱体高度
@param[in] r 圆柱体半径
@param[in] d 圆柱体方向
"""
v = p - center
d = np.sum(v*direction, axis=-1) # 中轴方向上到中心点的距离
v -= d[..., None]*direction # v 到中轴的距离
shape = p.shape[:-1] + (3, )
val = np.zeros(shape, dtype=p.dtype)
val[..., 0] = np.sqrt(np.sum(v**2, axis=1)) - radius # 到圆柱面的距离
val[..., 1] = d - height/2 # 到圆柱上圆面的距离
val[..., 2] = -d - height/2 # 到圆柱下圆面的距离
d = np.max(val, axis=-1)
flag = (val[..., 0] > 0) & (val[..., 1] > 0)
d[flag] = np.sqrt(val[flag, 0]**2 + val[flag, 1]**2)
flag = (val[..., 0] > 0) & (val[..., 2] > 0)
d[flag] = np.sqrt(val[flag, 0]**2 + val[flag, 2]**2)
return d
def dsphere(p,
center=np.array([0.0, 0.0, 0.0]),
radius=1.0):
return np.sqrt(np.sum((p-center)**2, axis=-1)) - radius
|
2,412 |
split namespace
|
import ctypes
import struct
# 3p
import bson
from bson.codec_options import CodecOptions
from bson.son import SON
# project
from ...ext import net as netx
from ...internal.compat import to_unicode
from ...internal.logger import get_logger
log = get_logger(__name__)
# MongoDB wire protocol commands
# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
OP_CODES = {
1: "reply",
1000: "msg", # DEV: 1000 was deprecated at some point, use 2013 instead
2001: "update",
2002: "insert",
2003: "reserved",
2004: "query",
2005: "get_more",
2006: "delete",
2007: "kill_cursors",
2010: "command",
2011: "command_reply",
2013: "msg",
}
# The maximum message length we'll try to parse
MAX_MSG_PARSE_LEN = 1024 * 1024
header_struct = struct.Struct("<iiii")
class Command(object):
"""Command stores information about a pymongo network command,"""
__slots__ = ["name", "coll", "db", "tags", "metrics", "query"]
def __init__(self, name, db, coll):
self.name = name
self.coll = coll
self.db = db
self.tags = {}
self.metrics = {}
self.query = None
def __repr__(self):
return ("Command(" "name=%s," "db=%s," "coll=%s)") % (self.name, self.db, self.coll)
def parse_msg(msg_bytes):
"""Return a command from a binary mongo db message or None if we shouldn't
trace it. The protocol is documented here:
http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
"""
# NOTE[matt] this is used for queries in pymongo <= 3.0.0 and for inserts
# in up to date versions.
msg_len = len(msg_bytes)
if msg_len <= 0:
return None
header = header_struct.unpack_from(msg_bytes, 0)
(length, req_id, response_to, op_code) = header
op = OP_CODES.get(op_code)
if not op:
log.debug("unknown op code: %s", op_code)
return None
db = None
coll = None
offset = header_struct.size
cmd = None
if op == "query":
# NOTE[matt] inserts, updates and queries can all use this opcode
offset += 4 # skip flags
ns = _cstring(msg_bytes[offset:])
offset += len(ns) + 1 # include null terminator
# note: here coll could be '$cmd' because it can be overridden in the
# query itself (like {'insert':'songs'})
db, coll = METHOD_NAME(ns)
offset += 8 # skip numberToSkip & numberToReturn
if msg_len <= MAX_MSG_PARSE_LEN:
# FIXME[matt] don't try to parse large messages for performance
# reasons. ideally we'd just peek at the first bytes to get
# the critical info (op type, collection, query, # of docs)
# rather than parse the whole thing. i suspect only massive
# inserts will be affected.
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
# If the command didn't contain namespace info, set it here.
if not cmd.coll:
cmd.coll = coll
elif op == "msg":
# Skip header and flag bits
offset += 4
# Parse the msg kind
kind = ord(msg_bytes[offset : offset + 1])
offset += 1
# Kinds: https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#sections
# - 0: BSON Object
# - 1: Document Sequence
if kind == 0:
if msg_len <= MAX_MSG_PARSE_LEN:
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command("command", db, "untraced_message_too_large")
else:
# let's still note that a command happened.
cmd = Command("command", db, "unsupported_msg_kind")
if cmd:
cmd.metrics[netx.BYTES_OUT] = msg_len
return cmd
def parse_query(query):
"""Return a command parsed from the given mongo db query."""
db, coll = None, None
ns = getattr(query, "ns", None)
if ns:
# version < 3.1 stores the full namespace
db, coll = METHOD_NAME(ns)
else:
# version >= 3.1 stores the db and coll separately
coll = getattr(query, "coll", None)
db = getattr(query, "db", None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, "name", "query"), db, coll)
cmd.query = query.spec
return cmd
def parse_spec(spec, db=None):
"""Return a Command that has parsed the relevant detail for the given
pymongo SON spec.
"""
# the first element is the command and collection
items = list(spec.items())
if not items:
return None
name, coll = items[0]
cmd = Command(name, db or spec.get("$db"), coll)
if "ordered" in spec: # in insert and update
cmd.tags["mongodb.ordered"] = spec["ordered"]
if cmd.name == "insert":
if "documents" in spec:
cmd.metrics["mongodb.documents"] = len(spec["documents"])
elif cmd.name == "update":
updates = spec.get("updates")
if updates:
# FIXME[matt] is there ever more than one here?
cmd.query = updates[0].get("q")
elif cmd.name == "delete":
dels = spec.get("deletes")
if dels:
# FIXME[matt] is there ever more than one here?
cmd.query = dels[0].get("q")
return cmd
def _cstring(raw):
"""Return the first null terminated cstring from the buffer."""
return ctypes.create_string_buffer(raw).value
def METHOD_NAME(ns):
"""Return a tuple of (db, collection) from the 'db.coll' string."""
if ns:
# NOTE[matt] ns is unicode or bytes depending on the client version
# so force cast to unicode
split = to_unicode(ns).split(".", 1)
if len(split) == 1:
raise Exception("namespace doesn't contain period: %s" % ns)
return split
return (None, None)
|
2,413 |
test by pid dwarf
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
import os
import unittest
from drgn import Object, Program
from tests import assertReprPrettyEqualsStr
from tests.linux_kernel import (
LinuxKernelTestCase,
fork_and_sigwait,
setenv,
skip_unless_have_stack_tracing,
skip_unless_have_test_kmod,
)
from util import NORMALIZED_MACHINE_NAME
@skip_unless_have_stack_tracing
class TestStackTrace(LinuxKernelTestCase):
def _test_drgn_test_kthread_trace(self, trace):
for i, frame in enumerate(trace):
if frame.name == "drgn_test_kthread_fn3":
break
else:
self.fail("Couldn't find drgn_test_kthread_fn3 frame")
self.assertEqual(trace[i + 1].name, "drgn_test_kthread_fn2")
self.assertEqual(trace[i + 2].name, "drgn_test_kthread_fn")
@skip_unless_have_test_kmod
def test_by_task_struct(self):
self._test_drgn_test_kthread_trace(
self.prog.stack_trace(self.prog["drgn_test_kthread"])
)
def _test_by_pid(self, orc):
old_orc = int(os.environ.get("DRGN_PREFER_ORC_UNWINDER", "0")) != 0
with setenv("DRGN_PREFER_ORC_UNWINDER", "1" if orc else "0"):
if orc == old_orc:
prog = self.prog
else:
prog = Program()
prog.set_kernel()
self._load_debug_info(prog)
self._test_drgn_test_kthread_trace(
prog.stack_trace(prog["drgn_test_kthread"].pid)
)
@skip_unless_have_test_kmod
def METHOD_NAME(self):
self._test_by_pid(False)
@unittest.skipUnless(
NORMALIZED_MACHINE_NAME == "x86_64",
f"{NORMALIZED_MACHINE_NAME} does not use ORC",
)
@skip_unless_have_test_kmod
def test_by_pid_orc(self):
self._test_by_pid(True)
@skip_unless_have_test_kmod
def test_by_pt_regs(self):
pt_regs = self.prog["drgn_test_kthread_pt_regs"]
self._test_drgn_test_kthread_trace(self.prog.stack_trace(pt_regs))
self._test_drgn_test_kthread_trace(self.prog.stack_trace(pt_regs.address_of_()))
@skip_unless_have_test_kmod
def test_local_variable(self):
for frame in self.prog.stack_trace(self.prog["drgn_test_kthread"]):
if frame.name == "drgn_test_kthread_fn3":
break
else:
self.fail("Couldn't find drgn_test_kthread_fn3 frame")
self.assertEqual(frame["a"], 1)
self.assertEqual(frame["b"], 2)
self.assertEqual(frame["c"], 3)
@skip_unless_have_test_kmod
def test_locals(self):
task = self.prog["drgn_test_kthread"]
stack_trace = self.prog.stack_trace(task)
for frame in stack_trace:
if frame.name == "drgn_test_kthread_fn3":
self.assertSetEqual(set(frame.locals()), {"a", "b", "c"})
break
else:
self.fail("Couldn't find drgn_test_kthread_fn3 frame")
def test_registers(self):
# Smoke test that we get at least one register and that
# StackFrame.registers() agrees with StackFrame.register().
with fork_and_sigwait() as pid:
trace = self.prog.stack_trace(pid)
have_registers = False
for frame in trace:
for name, value in frame.registers().items():
self.assertEqual(frame.register(name), value)
have_registers = True
self.assertTrue(have_registers)
def test_sp(self):
# Smoke test that the stack pointer register shows up in
# StackFrame.registers().
with fork_and_sigwait() as pid:
trace = self.prog.stack_trace(pid)
self.assertIn(trace[0].sp, trace[0].registers().values())
def test_prog(self):
self.assertEqual(
self.prog.stack_trace(Object(self.prog, "struct pt_regs", value={})).prog,
self.prog,
)
def test_stack__repr_pretty_(self):
with fork_and_sigwait() as pid:
trace = self.prog.stack_trace(pid)
assertReprPrettyEqualsStr(trace)
for frame in trace:
assertReprPrettyEqualsStr(frame)
|
2,414 |
verify captcha
|
import secrets
from typing import Optional
from aiohttp import ClientSession
from fastapi import HTTPException
from fastapi.security import OAuth2PasswordBearer, SecurityScopes
from passlib.context import CryptContext
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from api import models, schemes, utils
from api.constants import TFA_RECOVERY_ALPHABET, TFA_RECOVERY_LENGTH
from api.plugins import run_hook
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def generate_tfa_recovery_code():
return (
"".join(secrets.choice(TFA_RECOVERY_ALPHABET) for i in range(TFA_RECOVERY_LENGTH))
+ "-"
+ "".join(secrets.choice(TFA_RECOVERY_ALPHABET) for i in range(TFA_RECOVERY_LENGTH))
)
async def authenticate_user(email: str, password: str):
user = await utils.database.get_object(
models.User, custom_query=models.User.query.where(models.User.email == email), raise_exception=False
)
if not user:
return False, 404
if not verify_password(password, user.hashed_password):
return False, 401
return user, 200
oauth_kwargs = {
"tokenUrl": "/token/oauth2",
"scopes": {
"server_management": "Edit server settings",
"token_management": "Create, list or edit tokens",
"wallet_management": "Create, list or edit wallets",
"store_management": "Create, list or edit stores",
"discount_management": "Create, list or edit discounts",
"product_management": "Create, list or edit products",
"invoice_management": "Create, list or edit invoices",
"payout_management": "Create, list or edit payouts",
"notification_management": "Create, list or edit notification providers",
"template_management": "Create, list or edit templates",
"file_management": "Create, list or edit files",
"full_control": "Full control over what current user has",
},
}
bearer_description = """Token authorization. Get a token by sending a POST request to `/token` endpoint (JSON-mode, preferred)
or `/token/oauth2` OAuth2-compatible endpoint.
Ensure to use only those permissions that your app actually needs. `full_control` gives access to all permissions of a user
To authorize, send an `Authorization` header with value of `Bearer <token>` (replace `<token>` with your token)
"""
optional_bearer_description = "Same as Bearer, but not required. Logic for unauthorized users depends on current endpoint"
def check_selective_scopes(request, scope, token):
model_id = request.path_params.get("model_id", None)
if model_id is None:
return False
return f"{scope}:{model_id}" in token.permissions
class AuthDependency(OAuth2PasswordBearer):
def __init__(self, enabled: bool = True, token_required: bool = True, token: Optional[str] = None, return_token=False):
self.enabled = enabled
self.return_token = return_token
self.token = token
super().__init__(
**oauth_kwargs,
auto_error=token_required,
scheme_name="Bearer" if token_required else "BearerOptional",
description=bearer_description if token_required else optional_bearer_description,
)
async def _process_request(self, request: Request, security_scopes: SecurityScopes):
if not self.enabled:
if self.return_token: # pragma: no cover
return None, None
return None
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = "Bearer"
token: str = self.token if self.token else await super().__call__(request)
exc = HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": authenticate_value},
)
if not token:
raise exc
data = (
await models.User.join(models.Token)
.select(models.Token.id == token)
.gino.load((models.User, models.Token))
.first()
)
if data is None:
raise exc
user, token = data # first validate data, then unpack
await user.load_data()
if not user.is_enabled:
raise HTTPException(403, "Account is disabled")
forbidden_exception = HTTPException(
status_code=HTTP_403_FORBIDDEN,
detail="Not enough permissions",
headers={"WWW-Authenticate": authenticate_value},
)
if "full_control" not in token.permissions:
for scope in security_scopes.scopes:
if scope not in token.permissions and not check_selective_scopes(request, scope, token):
await run_hook("permission_denied", user, token, scope)
raise forbidden_exception
if "server_management" in security_scopes.scopes and not user.is_superuser:
await run_hook("permission_denied", user, token, "server_management")
raise forbidden_exception
await run_hook("permission_granted", user, token, security_scopes.scopes)
if self.return_token:
return user, token
return user
async def __call__(self, request: Request, security_scopes: SecurityScopes):
try:
return await self._process_request(request, security_scopes)
except HTTPException:
if self.auto_error:
raise
if self.return_token:
return None, None
return None
auth_dependency = AuthDependency()
optional_auth_dependency = AuthDependency(token_required=False)
# TODO: add tests for captcha
async def METHOD_NAME(code, secret): # pragma: no cover
try:
async with ClientSession() as session:
async with session.post(
"https://hcaptcha.com/siteverify",
data={"response": code, "secret": secret},
) as resp:
return (await resp.json())["success"]
except Exception:
return False
async def captcha_flow(code):
policies = await utils.policies.get_setting(schemes.Policy)
if policies.enable_captcha: # pragma: no cover
if not await METHOD_NAME(code, policies.captcha_secretkey):
await run_hook("captcha_failed")
raise HTTPException(401, {"message": "Unauthorized", "status": 403})
await run_hook("captcha_passed")
|
2,415 |
run forever
|
"""From https://github.com/erdewit/nest_asyncio"""
import asyncio
import asyncio.events as events
import os
import sys
import threading
from contextlib import contextmanager, suppress
from heapq import heappop
def apply(loop=None):
"""Patch asyncio to make its event loop reentrant."""
_patch_asyncio()
_patch_task()
_patch_tornado()
loop = loop or asyncio.get_event_loop()
_patch_loop(loop)
def _patch_asyncio():
"""
Patch asyncio module to use pure Python tasks and futures,
use module level _current_tasks, all_tasks and patch run method.
"""
def run(main, *, debug=False):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
_patch_loop(loop)
loop.set_debug(debug)
task = asyncio.ensure_future(main)
try:
return loop.run_until_complete(task)
finally:
if not task.done():
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
def _get_event_loop(stacklevel=3):
loop = events._get_running_loop()
if loop is None:
loop = events.get_event_loop_policy().get_event_loop()
return loop
if hasattr(asyncio, "_nest_patched"):
return
if sys.version_info >= (3, 6, 0):
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = asyncio.tasks._PyTask
asyncio.Future = (
asyncio.futures._CFuture
) = asyncio.futures.Future = asyncio.futures._PyFuture
if sys.version_info < (3, 7, 0):
asyncio.tasks._current_tasks = asyncio.tasks.Task._current_tasks
asyncio.all_tasks = asyncio.tasks.Task.all_tasks
if sys.version_info >= (3, 9, 0):
events._get_event_loop = (
events.get_event_loop
) = asyncio.get_event_loop = _get_event_loop
_get_event_loop
asyncio.run = run
asyncio._nest_patched = True
def _patch_loop(loop):
"""Patch loop to make it reentrant."""
def METHOD_NAME(self):
with manage_run(self), manage_asyncgens(self):
while True:
self._run_once()
if self._stopping:
break
self._stopping = False
def run_until_complete(self, future):
with manage_run(self):
f = asyncio.ensure_future(future, loop=self)
if f is not future:
f._log_destroy_pending = False
while not f.done():
self._run_once()
if self._stopping:
break
if not f.done():
raise RuntimeError("Event loop stopped before Future completed.")
return f.result()
def _run_once(self):
"""
Simplified re-implementation of asyncio's _run_once that
runs handles as they become ready.
"""
ready = self._ready
scheduled = self._scheduled
while scheduled and scheduled[0]._cancelled:
heappop(scheduled)
timeout = (
0
if ready or self._stopping
else min(max(scheduled[0]._when - self.time(), 0), 86400)
if scheduled
else None
)
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while scheduled and scheduled[0]._when < end_time:
handle = heappop(scheduled)
ready.append(handle)
for _ in range(len(ready)):
if not ready:
break
handle = ready.popleft()
if not handle._cancelled:
handle._run()
handle = None
@contextmanager
def manage_run(self):
"""Set up the loop for running."""
self._check_closed()
old_thread_id = self._thread_id
old_running_loop = events._get_running_loop()
try:
self._thread_id = threading.get_ident()
events._set_running_loop(self)
self._num_runs_pending += 1
if self._is_proactorloop:
if self._self_reading_future is None:
self.call_soon(self._loop_self_reading)
yield
finally:
self._thread_id = old_thread_id
events._set_running_loop(old_running_loop)
self._num_runs_pending -= 1
if self._is_proactorloop:
if (
self._num_runs_pending == 0
and self._self_reading_future is not None
):
ov = self._self_reading_future._ov
self._self_reading_future.cancel()
if ov is not None:
self._proactor._unregister(ov)
self._self_reading_future = None
@contextmanager
def manage_asyncgens(self):
if not hasattr(sys, "get_asyncgen_hooks"):
# Python version is too old.
return
old_agen_hooks = sys.get_asyncgen_hooks()
try:
self._set_coroutine_origin_tracking(self._debug)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(
firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook,
)
yield
finally:
self._set_coroutine_origin_tracking(False)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(*old_agen_hooks)
def _check_running(self):
"""Do not throw exception if loop is already running."""
pass
if hasattr(loop, "_nest_patched"):
return
if not isinstance(loop, asyncio.BaseEventLoop):
raise ValueError("Can't patch loop of type %s" % type(loop))
cls = loop.__class__
cls.METHOD_NAME = METHOD_NAME
cls.run_until_complete = run_until_complete
cls._run_once = _run_once
cls._check_running = _check_running
cls._check_runnung = _check_running # typo in Python 3.7 source
cls._num_runs_pending = 0
cls._is_proactorloop = os.name == "nt" and issubclass(
cls, asyncio.ProactorEventLoop
)
if sys.version_info < (3, 7, 0):
cls._set_coroutine_origin_tracking = cls._set_coroutine_wrapper
cls._nest_patched = True
def _patch_task():
"""Patch the Task's step and enter/leave methods to make it reentrant."""
def step(task, exc=None):
curr_task = curr_tasks.get(task._loop)
try:
step_orig(task, exc)
finally:
if curr_task is None:
curr_tasks.pop(task._loop, None)
else:
curr_tasks[task._loop] = curr_task
Task = asyncio.Task
if hasattr(Task, "_nest_patched"):
return
if sys.version_info >= (3, 7, 0):
def enter_task(loop, task):
curr_tasks[loop] = task
def leave_task(loop, task):
curr_tasks.pop(loop, None)
asyncio.tasks._enter_task = enter_task
asyncio.tasks._leave_task = leave_task
curr_tasks = asyncio.tasks._current_tasks
step_orig = Task._Task__step
Task._Task__step = step
else:
curr_tasks = Task._current_tasks
step_orig = Task._step
Task._step = step
Task._nest_patched = True
def _patch_tornado():
"""
If tornado is imported before nest_asyncio, make tornado aware of
the pure-Python asyncio Future.
"""
if "tornado" in sys.modules:
import tornado.concurrent as tc
tc.Future = asyncio.Future
if asyncio.Future not in tc.FUTURES:
tc.FUTURES += (asyncio.Future,)
|
2,416 |
extract sdk
|
#
# Copyright 2018 by Garmin Ltd. or its subsidiaries
#
# SPDX-License-Identifier: MIT
#
from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
class TestSDKBase(object):
@staticmethod
def get_sdk_configuration(d, test_type):
import platform
import oe.lsb
from oeqa.utils.metadata import get_layers
configuration = {'TEST_TYPE': test_type,
'MACHINE': d.getVar("MACHINE"),
'SDKMACHINE': d.getVar("SDKMACHINE"),
'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
'STARTTIME': d.getVar("DATETIME"),
'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
'LAYERS': get_layers(d.getVar("BBLAYERS"))}
return configuration
@staticmethod
def get_sdk_json_result_dir(d):
json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
if custom_json_result_dir:
json_result_dir = custom_json_result_dir
return json_result_dir
@staticmethod
def get_sdk_result_id(configuration):
return '%s_%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['SDKMACHINE'], configuration['MACHINE'], configuration['STARTTIME'])
class TestSDK(TestSDKBase):
context_executor_class = OESDKTestContextExecutor
context_class = OESDKTestContext
test_type = 'sdk'
def get_tcname(self, d):
"""
Get the name of the SDK file
"""
return d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
def METHOD_NAME(self, tcname, sdk_dir, d):
"""
Extract the SDK to the specified location
"""
import subprocess
try:
subprocess.check_output("cd %s; %s <<EOF\n./\nY\nEOF" % (sdk_dir, tcname), shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Couldn't install the SDK:\n%s" % e.output.decode("utf-8"))
def setup_context(self, d):
"""
Return a dictionary of additional arguments that should be passed to
the context_class on construction
"""
return dict()
def run(self, d):
import os
import subprocess
import json
import logging
from bb.utils import export_proxies
from oeqa.utils import make_logger_bitbake_compatible
pn = d.getVar("PN")
logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
# sdk use network for download projects for build
export_proxies(d)
tcname = self.get_tcname(d)
if not os.path.exists(tcname):
bb.fatal("The toolchain %s is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' ." % tcname)
tdname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.testdata.json")
test_data = json.load(open(tdname, "r"))
target_pkg_manifest = self.context_executor_class._load_manifest(
d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"))
host_pkg_manifest = self.context_executor_class._load_manifest(
d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"))
processes = d.getVar("TESTIMAGE_NUMBER_THREADS") or d.getVar("BB_NUMBER_THREADS")
if processes:
try:
import testtools, subunit
except ImportError:
bb.warn("Failed to import testtools or subunit, the testcases will run serially")
processes = None
sdk_dir = d.expand("${WORKDIR}/testimage-sdk/")
bb.utils.remove(sdk_dir, True)
bb.utils.mkdirhier(sdk_dir)
context_args = self.setup_context(d)
self.METHOD_NAME(tcname, sdk_dir, d)
fail = False
sdk_envs = self.context_executor_class._get_sdk_environs(sdk_dir)
for s in sdk_envs:
sdk_env = sdk_envs[s]
bb.plain("SDK testing environment: %s" % s)
tc = self.context_class(td=test_data, logger=logger, sdk_dir=sdk_dir,
sdk_env=sdk_env, target_pkg_manifest=target_pkg_manifest,
host_pkg_manifest=host_pkg_manifest, **context_args)
try:
tc.loadTests(self.context_executor_class.default_cases)
except Exception as e:
import traceback
bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
if processes:
result = tc.runTests(processes=int(processes))
else:
result = tc.runTests()
component = "%s %s" % (pn, self.context_executor_class.name)
context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
configuration = self.get_sdk_configuration(d, self.test_type)
result.logDetails(self.get_sdk_json_result_dir(d),
configuration,
self.get_sdk_result_id(configuration))
result.logSummary(component, context_msg)
if not result.wasSuccessful():
fail = True
if fail:
bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
|
2,417 |
create prepared statement
|
"""Amazon Athena Module gathering all functions related to prepared statements."""
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, cast
import boto3
from botocore.exceptions import ClientError
from awswrangler import _utils, exceptions
from awswrangler._config import apply_configs
if TYPE_CHECKING:
from mypy_boto3_athena.client import AthenaClient
_logger: logging.Logger = logging.getLogger(__name__)
def _does_statement_exist(
statement_name: str,
workgroup: str,
athena_client: "AthenaClient",
) -> bool:
try:
athena_client.get_prepared_statement(StatementName=statement_name, WorkGroup=workgroup)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
return False
raise e
return True
@apply_configs
def METHOD_NAME(
sql: str,
statement_name: str,
workgroup: Optional[str] = None,
mode: Literal["update", "error"] = "update",
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""
Create a SQL statement with the name statement_name to be run at a later time. The statement can include parameters represented by question marks.
https://docs.aws.amazon.com/athena/latest/ug/sql-prepare.html
Parameters
----------
sql : str
The query string for the prepared statement.
statement_name : str
The name of the prepared statement.
workgroup : str, optional
The name of the workgroup to which the prepared statement belongs.
mode: str
Determines the behaviour if the prepared statement already exists:
- ``update`` - updates statement if already exists
- ``error`` - throws an error if table exists
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Examples
--------
>>> import awswrangler as wr
>>> wr.athena.create_prepared_statement(
... sql="SELECT * FROM my_table WHERE name = ?",
... statement_name="statement",
... )
"""
if mode not in ["update", "error"]:
raise exceptions.InvalidArgumentValue("`mode` must be one of 'update' or 'error'.")
athena_client = _utils.client("athena", session=boto3_session)
workgroup = workgroup if workgroup else "primary"
already_exists = _does_statement_exist(statement_name, workgroup, athena_client)
if already_exists and mode == "error":
raise exceptions.AlreadyExists(f"Prepared statement {statement_name} already exists.")
if already_exists:
_logger.info(f"Updating prepared statement {statement_name}")
athena_client.update_prepared_statement(
StatementName=statement_name,
WorkGroup=workgroup,
QueryStatement=sql,
)
else:
_logger.info(f"Creating prepared statement {statement_name}")
athena_client.METHOD_NAME(
StatementName=statement_name,
WorkGroup=workgroup,
QueryStatement=sql,
)
@apply_configs
def list_prepared_statements(
workgroup: Optional[str] = None, boto3_session: Optional[boto3.Session] = None
) -> List[str]:
"""
List the prepared statements in the specified workgroup.
Parameters
----------
workgroup: str, optional
The name of the workgroup to which the prepared statement belongs.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[Dict[str, Any]]
List of prepared statements in the workgroup.
Each item is a dictionary with the keys ``StatementName`` and ``LastModifiedTime``.
"""
athena_client = _utils.client("athena", session=boto3_session)
workgroup = workgroup if workgroup else "primary"
response = athena_client.list_prepared_statements(WorkGroup=workgroup)
statements = response["PreparedStatements"]
while "NextToken" in response:
response = athena_client.list_prepared_statements(WorkGroup=workgroup, NextToken=response["NextToken"])
statements += response["PreparedStatements"]
return cast(List[Dict[str, Any]], statements)
@apply_configs
def delete_prepared_statement(
statement_name: str,
workgroup: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""
Delete the prepared statement with the specified name from the specified workgroup.
https://docs.aws.amazon.com/athena/latest/ug/sql-deallocate-prepare.html
Parameters
----------
statement_name : str
The name of the prepared statement.
workgroup : str, optional
The name of the workgroup to which the prepared statement belongs.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Examples
--------
>>> import awswrangler as wr
>>> wr.athena.delete_prepared_statement(
... statement_name="statement",
... )
"""
athena_client = _utils.client("athena", session=boto3_session)
workgroup = workgroup if workgroup else "primary"
_logger.info(f"Deallocating prepared statement {statement_name}")
athena_client.delete_prepared_statement(
StatementName=statement_name,
WorkGroup=workgroup,
)
|
2,418 |
features
|
"""Host function like audio, D-Bus or systemd."""
from contextlib import suppress
from functools import lru_cache
import logging
from awesomeversion import AwesomeVersion
from ..const import BusEvent
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import HassioError, HostLogError, PulseAudioError
from ..hardware.const import PolicyGroup
from ..hardware.data import Device
from .apparmor import AppArmorControl
from .const import HostFeature
from .control import SystemControl
from .info import InfoCenter
from .logs import LogsControl
from .network import NetworkManager
from .services import ServiceManager
from .sound import SoundControl
_LOGGER: logging.Logger = logging.getLogger(__name__)
class HostManager(CoreSysAttributes):
"""Manage supported function from host."""
def __init__(self, coresys: CoreSys):
"""Initialize Host manager."""
self.coresys: CoreSys = coresys
self._apparmor: AppArmorControl = AppArmorControl(coresys)
self._control: SystemControl = SystemControl(coresys)
self._info: InfoCenter = InfoCenter(coresys)
self._services: ServiceManager = ServiceManager(coresys)
self._network: NetworkManager = NetworkManager(coresys)
self._sound: SoundControl = SoundControl(coresys)
self._logs: LogsControl = LogsControl(coresys)
@property
def apparmor(self) -> AppArmorControl:
"""Return host AppArmor handler."""
return self._apparmor
@property
def control(self) -> SystemControl:
"""Return host control handler."""
return self._control
@property
def info(self) -> InfoCenter:
"""Return host info handler."""
return self._info
@property
def services(self) -> ServiceManager:
"""Return host services handler."""
return self._services
@property
def network(self) -> NetworkManager:
"""Return host NetworkManager handler."""
return self._network
@property
def sound(self) -> SoundControl:
"""Return host PulseAudio control."""
return self._sound
@property
def logs(self) -> LogsControl:
"""Return host logs handler."""
return self._logs
@property
def METHOD_NAME(self) -> list[HostFeature]:
"""Return a list of host features."""
return self.supported_features()
@lru_cache(maxsize=128)
def supported_features(self) -> list[HostFeature]:
"""Return a list of supported host features."""
METHOD_NAME = []
if self.sys_dbus.systemd.is_connected:
METHOD_NAME.extend(
[HostFeature.REBOOT, HostFeature.SHUTDOWN, HostFeature.SERVICES]
)
if self.sys_dbus.network.is_connected and self.sys_dbus.network.interfaces:
METHOD_NAME.append(HostFeature.NETWORK)
if self.sys_dbus.hostname.is_connected:
METHOD_NAME.append(HostFeature.HOSTNAME)
if self.sys_dbus.timedate.is_connected:
METHOD_NAME.append(HostFeature.TIMEDATE)
if self.sys_dbus.agent.is_connected:
METHOD_NAME.append(HostFeature.OS_AGENT)
if self.sys_os.available:
METHOD_NAME.append(HostFeature.HAOS)
if self.sys_dbus.resolved.is_connected:
METHOD_NAME.append(HostFeature.RESOLVED)
if self.logs.available:
METHOD_NAME.append(HostFeature.JOURNAL)
if self.sys_dbus.udisks2.is_connected:
METHOD_NAME.append(HostFeature.DISK)
# Support added in OS10. Propagation mode changed on mount in 10.2 to support this
if (
self.sys_dbus.systemd.is_connected
and self.sys_supervisor.instance.host_mounts_available
and (
not self.sys_os.available
or self.sys_os.version >= AwesomeVersion("10.2")
)
):
METHOD_NAME.append(HostFeature.MOUNT)
return METHOD_NAME
async def reload(self):
"""Reload host functions."""
await self.info.update()
if self.sys_dbus.systemd.is_connected:
await self.services.update()
if self.sys_dbus.network.is_connected:
await self.network.update()
if self.sys_dbus.agent.is_connected:
await self.sys_dbus.agent.update()
if self.sys_dbus.udisks2.is_connected:
await self.sys_dbus.udisks2.update()
with suppress(PulseAudioError):
await self.sound.update()
_LOGGER.info("Host information reload completed")
self.supported_features.cache_clear() # pylint: disable=no-member
async def load(self):
"""Load host information."""
with suppress(HassioError):
if self.sys_dbus.systemd.is_connected:
await self.services.update()
with suppress(PulseAudioError):
await self.sound.update()
with suppress(HostLogError):
await self.logs.load()
await self.network.load()
# Register for events
self.sys_bus.register_event(BusEvent.HARDWARE_NEW_DEVICE, self._hardware_events)
self.sys_bus.register_event(
BusEvent.HARDWARE_REMOVE_DEVICE, self._hardware_events
)
# Load profile data
try:
await self.apparmor.load()
except HassioError as err:
_LOGGER.warning("Loading host AppArmor on start failed: %s", err)
async def _hardware_events(self, device: Device) -> None:
"""Process hardware requests."""
if self.sys_hardware.policy.is_match_cgroup(PolicyGroup.AUDIO, device):
await self.sound.update()
|
2,419 |
get config dict
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Dict, Any, Type, Union
from ....core import ChunkGraph, Chunk, TileContext
from ....core.operand.shuffle import ShuffleFetchType
from ....resource import Resource
from ....typing import BandType
from ....utils import merge_dict
from ...subtask import SubtaskGraph, SubtaskResult
class ExecutionConfig:
"""
The config for execution backends.
This class should ONLY provide the APIs for the parts other than
just the execution. Each backend may have a different implementation
of the API.
If some configuration is for a specific backend. They should be in
the backend config. e.g. `get_mars_special_config()` should be in
the `MarsExecutionConfig`.
"""
name = None
def __init__(self, config: Dict):
"""
An example of config:
{
"backend": "mars",
"mars": {
"n_worker": 1,
"n_cpu": 2,
...
},
}
"""
self._config = config
def merge_from(self, execution_config: "ExecutionConfig") -> "ExecutionConfig":
assert isinstance(execution_config, ExecutionConfig)
assert self.backend == execution_config.backend
merge_dict(
self._config,
execution_config.METHOD_NAME(),
)
return self
@property
def backend(self) -> str:
"""The backend from config."""
return self._config["backend"]
def METHOD_NAME(self) -> Dict:
"""Get the execution config dict."""
return self._config
@abstractmethod
def get_deploy_band_resources(self) -> List[Dict[str, Resource]]:
"""Get the band resources for deployment."""
@abstractmethod
def get_shuffle_fetch_type(self) -> ShuffleFetchType:
"""Get shuffle fetch type for shuffle execution"""
@classmethod
def from_config(cls, config: Dict, backend: str = None) -> "ExecutionConfig":
"""Construct an execution config instance from config."""
execution_config = config["task"]["execution_config"]
return cls.from_execution_config(execution_config, backend)
@classmethod
def from_execution_config(
cls, execution_config: Union[Dict, "ExecutionConfig"], backend: str = None
) -> "ExecutionConfig":
"""Construct an execution config instance from execution config."""
if isinstance(execution_config, ExecutionConfig):
assert backend is None
return execution_config
if backend is not None:
name = execution_config["backend"] = backend
else:
name = execution_config.setdefault("backend", "mars")
config_cls = _name_to_config_cls[name]
execution_config.setdefault(name, {})
return config_cls(execution_config)
@classmethod
def from_params(
cls,
backend: str,
n_worker: int,
n_cpu: int,
mem_bytes: int = 0,
cuda_devices: List[List[int]] = None,
**kwargs,
) -> "ExecutionConfig":
"""Construct an execution config instance from params."""
execution_config = {
"backend": backend,
backend: dict(
{
"n_worker": n_worker,
"n_cpu": n_cpu,
"mem_bytes": mem_bytes,
"cuda_devices": cuda_devices,
},
**kwargs,
),
}
return cls.from_execution_config(execution_config)
_name_to_config_cls: Dict[str, Type[ExecutionConfig]] = {}
def register_config_cls(config_cls: Type[ExecutionConfig]):
_name_to_config_cls[config_cls.name] = config_cls
return config_cls
@dataclass
class ExecutionChunkResult:
meta: Dict # The chunk meta for iterative tiling.
context: Any # The context info, e.g. ray.ObjectRef.
class TaskExecutor(ABC):
name = None
@classmethod
@abstractmethod
async def create(
cls,
config: Union[Dict, ExecutionConfig],
*,
session_id: str,
address: str,
task,
tile_context: TileContext,
**kwargs,
) -> "TaskExecutor":
backend_config = ExecutionConfig.from_execution_config(config)
executor_cls = _name_to_task_executor_cls[backend_config.backend]
if executor_cls.create.__func__ is TaskExecutor.create.__func__:
raise NotImplementedError(
f"The {executor_cls} should implement the abstract classmethod `create`."
)
return await executor_cls.create(
backend_config,
session_id=session_id,
address=address,
task=task,
tile_context=tile_context,
**kwargs,
)
@abstractmethod
def get_execution_config(self) -> ExecutionConfig:
"""Return execution config."""
def destroy(self):
"""Destroy the executor."""
async def __aenter__(self):
"""Called when begin to execute the task."""
@abstractmethod
async def execute_subtask_graph(
self,
stage_id: str,
subtask_graph: SubtaskGraph,
chunk_graph: ChunkGraph,
tile_context: TileContext,
context: Any = None,
) -> Dict[Chunk, ExecutionChunkResult]:
"""Execute a subtask graph and returns result."""
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Called when finish the task."""
@abstractmethod
async def get_available_band_resources(self) -> Dict[BandType, Resource]:
"""Get available band resources."""
@abstractmethod
async def get_progress(self) -> float:
"""Get the execution progress."""
@abstractmethod
async def cancel(self):
"""Cancel execution."""
# The following APIs are for compatible with mars backend, they
# will be removed as soon as possible.
async def set_subtask_result(self, subtask_result: SubtaskResult):
"""Set the subtask result."""
def get_stage_processors(self):
"""Get stage processors."""
_name_to_task_executor_cls: Dict[str, Type[TaskExecutor]] = {}
def register_executor_cls(executor_cls: Type[TaskExecutor]):
_name_to_task_executor_cls[executor_cls.name] = executor_cls
return executor_cls
class Fetcher:
"""The data fetcher for execution backends."""
name = None
required_meta_keys = () # The required meta keys.
@abstractmethod
def __init__(self, **kwargs):
pass
@abstractmethod
async def append(self, chunk_key: str, chunk_meta: Dict, conditions: List = None):
"""Append chunk key and related infos."""
@abstractmethod
async def get(self):
"""Get all the data of appended chunk keys."""
@classmethod
def create(cls, backend: str, **kwargs) -> "Fetcher":
fetcher_cls = _name_to_fetcher_cls[backend]
return fetcher_cls(**kwargs)
_name_to_fetcher_cls: Dict[str, Type[Fetcher]] = {}
def register_fetcher_cls(fetcher_cls: Type[Fetcher]):
_name_to_fetcher_cls[fetcher_cls.name] = fetcher_cls
return fetcher_cls
|
2,420 |
set image to pre view
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2018, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import sys
import os
import numpy
from . import preView
from PyQt5 import uic
from PyQt5.QtWidgets import QDialog
import qimage2ndarray
class FeatureDlg(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# init
# ------------------------------------------------
localDir = os.path.split(os.path.abspath(__file__))[0]
uic.loadUi(localDir + "/featureDialog.ui", self)
# the preview is currently shown in a separate window
self.preView = preView.PreView()
self.cancel.clicked.connect(self.reject)
self.ok.clicked.connect(self.accept)
self.featureTableWidget.itemSelectionChanged.connect(self.updateOKButton)
# methods
# ------------------------------------------------
def get_scales(self):
"""Return the list of scale values that the user might have edited."""
return self.featureTableWidget.sigmas
def get_computeIn2d(self):
"""Return the list of scale values that the user might have edited."""
return self.featureTableWidget.computeIn2d
def get_selectionMatrix(self):
"""Return the bool matrix of features that the user selected."""
return numpy.asarray(self.featureTableWidget.featureMatrix)
def set_selectionMatrix(self, newMatrix):
"""Populate the table of selected features with the provided matrix."""
self.featureTableWidget.setFeatureMatrix(newMatrix)
def createFeatureTable(self, features, sigmas, computeIn2d, window_size):
self.featureTableWidget.setup(features, sigmas, computeIn2d, window_size)
def METHOD_NAME(self, image):
self.preView.setVisible(image is not None)
if image is not None:
self.preView.setPreviewImage(qimage2ndarray.array2qimage(image))
def updateOKButton(self):
num_features = numpy.sum(self.featureTableWidget.featureMatrix)
self.ok.setEnabled(num_features > 0)
def showEvent(self, event):
super().showEvent(event)
self.updateOKButton()
def setEnableItemMask(self, mask):
# See comments in FeatureTableWidget.setEnableItemMask()
self.featureTableWidget.setEnableItemMask(mask)
def setComputeIn2dHidden(self, hidden):
if hidden:
self.featureTableWidget.hideRow(0)
else:
self.featureTableWidget.showRow(0)
if __name__ == "__main__":
# make the program quit on Ctrl+C
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from PyQt5.QtWidgets import QApplication
from featureTableWidget import FeatureEntry
app = QApplication(sys.argv)
# app.setStyle("windows")
# app.setStyle("motif")
# app.setStyle("cde")
# app.setStyle("plastique")
# app.setStyle("macintosh")
# app.setStyle("cleanlooks")
ex = FeatureDlg()
ex.createFeatureTable(
[
("Color", [FeatureEntry("Banananananaana", minimum_scale=0.3)]),
("Edge", [FeatureEntry("Mango"), FeatureEntry("Cherry")]),
],
[0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0],
[False, False, False, False, True, True, True],
3.5,
)
ex.setWindowTitle("FeatureTest")
ex.METHOD_NAME(None)
def handle_accepted():
print("ACCEPTED")
print(ex.get_selectionMatrix)
ex.accepted.connect(handle_accepted)
ex.exec_()
print("DONE")
# app.exec_()
|
2,421 |
test urls used by tornado client
|
"""
Unit test on client selection:
- By default: RPCClient should be used
- If we use Tornado service TornadoClient is used
Should work with
- 'Component/Service'
- URL
- List of URL
Mock Config:
- Service using HTTPS with Tornado
- Service using Diset
You don't need to setup anything, just run ``pytest TestClientSelection.py`` !
"""
import os
import re
from pytest import mark, fixture
from DIRAC.Core.Tornado.Client.ClientSelector import RPCClientSelector
from DIRAC.Core.Tornado.Client.TornadoClient import TornadoClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.private.ConfigurationClient import ConfigurationClient
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from diraccfg import CFG
from DIRAC.Core.Base.Client import Client
from DIRAC.Core.DISET.private.InnerRPCClient import InnerRPCClient
parametrize = mark.parametrize
testCfgFileName = "test.cfg"
@fixture(scope="function")
def config(request):
"""
fixture is the pytest way to declare initalization function.
Scope = module significate that this function will be called only time for this file.
If no scope precised it call config for each test.
This function can have a return value, it will be the value of 'config' argument for the tests
"""
cfgContent = """
DIRAC
{
Setup=TestSetup
Setups
{
TestSetup
{
WorkloadManagement=MyWM
}
}
}
Systems
{
WorkloadManagement
{
MyWM
{
URLs
{
ServiceDips = dips://$MAINSERVERS$:1234/WorkloadManagement/ServiceDips
ServiceHttps = https://$MAINSERVERS$:1234/WorkloadManagement/ServiceHttps
}
}
}
}
Operations{
Defaults
{
MainServers = server1, server2
}
}
"""
with open(testCfgFileName, "w") as f:
f.write(cfgContent)
gConfig = ConfigurationClient(fileToLoadList=[testCfgFileName]) # we replace the configuration by our own one.
# def tearDown():
# Wait for teardown
yield config
"""
This function is called at the end of the test.
"""
try:
os.remove(testCfgFileName)
except OSError:
pass
# SUPER UGLY: one must recreate the CFG objects of gConfigurationData
# not to conflict with other tests that might be using a local dirac.cfg
gConfigurationData.localCFG = CFG()
gConfigurationData.remoteCFG = CFG()
gConfigurationData.mergedCFG = CFG()
gConfigurationData.generateNewVersion()
print("TearDown")
# request is given by @fixture decorator, addfinalizer set the function who need to be called after the tests
# request.addfinalizer(tearDown)
# Tuple with (expectedClient, serviceName)
client_imp = (
(TornadoClient, "WorkloadManagement/ServiceHttps"),
(TornadoClient, "https://server1:1234/WorkloadManagement/ServiceHttps"),
(
TornadoClient,
"https://server1:1234/WorkloadManagement/ServiceHttps,https://server2:1234/WorkloadManagement/ServiceHttps",
),
(RPCClient, "WorkloadManagement/ServiceDips"),
(RPCClient, "dips://server1:1234/WorkloadManagement/ServiceDips"),
(
RPCClient,
"dips://server1:1234/WorkloadManagement/ServiceDips,dips://server2:1234/WorkloadManagement/ServiceDips",
),
)
@parametrize("client", client_imp)
def test_selection_when_using_RPCClientSelector(client, config):
"""
One way to call service is to use RPCClient or TornadoClient
If service is HTTPS, it must return client who work with tornado (TornadoClient)
else it must return the RPCClient
"""
clientWanted = client[0]
component_service = client[1]
clientSelected = RPCClientSelector(component_service)
assert isinstance(clientSelected, clientWanted)
error_component = (
"Too/Many/Sections",
"JustAName",
"InexistantComponent/InexistantService",
"dummyProtocol://dummy/url",
)
@parametrize("component_service", error_component)
def test_error(component_service, config):
"""
In any other cases (including error cases) it must return RPCClient by default
This test is NOT testing if RPCClient handle the errors
It just test that we get RPCClient and not Tornadoclient
"""
clientSelected = RPCClientSelector(component_service)
assert isinstance(clientSelected, RPCClient)
def test_interface():
"""
Interface of TornadoClient MUST contain at least interface of RPCClient.
BUT a __getattr__ method extends this interface with interface of InnerRPCClient.
"""
interfaceTornadoClient = dir(TornadoClient)
interfaceRPCClient = dir(RPCClient) + dir(InnerRPCClient)
for element in interfaceRPCClient:
# We don't need to test private methods / attribute
# Private methods/attribute starts with __
# dir also return private methods named with something like _ClassName__PrivateMethodName
if not element.startswith("_"):
assert element in interfaceTornadoClient
client_imp = ((2, "WorkloadManagement/ServiceHttps"), (1, "https://server1:1234/WorkloadManagement/ServiceHttps"))
@parametrize("client", client_imp)
def METHOD_NAME(config, client):
# We can't directly get url because they are randomized but we can check if we have right number of URL
nbOfUrl = client[0]
component_service = client[1]
clientSelected = RPCClientSelector(component_service)
# Little hack to get the private attribute
assert nbOfUrl == clientSelected._TornadoBaseClient__nbOfUrls
|
2,422 |
symbolic bcast
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
import dace
from common import compare_numpy_output
### Left, match first pos ######################################################
@compare_numpy_output()
def test_subl1(A: dace.float64[5, 3], B: dace.float64[3]):
return A - B
@compare_numpy_output()
def test_multl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A * B
@compare_numpy_output()
def test_bitorl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A | B
@compare_numpy_output()
def test_bitxorl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A ^ B
@compare_numpy_output()
def test_noteql1(A: dace.int64[5, 3], B: dace.int64[3]):
return A != B
@compare_numpy_output()
def test_ltl1(A: dace.int64[5, 3], B: dace.int64[3]):
return A < B
### Right, match first pos #####################################################
@compare_numpy_output()
def test_subr1(A: dace.float64[5], B: dace.float64[3, 5]):
return A - B
@compare_numpy_output()
def test_multr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_bitxorr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A ^ B
@compare_numpy_output()
def test_noteqr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A != B
@compare_numpy_output()
def test_ltr1(A: dace.int64[5], B: dace.int64[3, 5]):
return A < B
### Left, first pos 1, match second pos ########################################
@compare_numpy_output()
def test_subl2(A: dace.float64[5, 3], B: dace.float64[5, 1]):
return A - B
@compare_numpy_output()
def test_multl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A * B
@compare_numpy_output()
def test_bitorl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A | B
@compare_numpy_output()
def test_bitxorl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A ^ B
@compare_numpy_output()
def test_noteql2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A != B
@compare_numpy_output()
def test_ltl2(A: dace.int64[5, 3], B: dace.int64[5, 1]):
return A < B
### Right, first pos 1, match second ###########################################
@compare_numpy_output()
def test_subr2(A: dace.float64[3, 1], B: dace.float64[3, 5]):
return A - B
@compare_numpy_output()
def test_multr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_bitxorr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A ^ B
@compare_numpy_output()
def test_noteqr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A != B
@compare_numpy_output()
def test_ltr2(A: dace.int64[3, 1], B: dace.int64[3, 5]):
return A < B
### Left, first pos 1, match second pos, None last pos ########################
@compare_numpy_output()
def test_subl3(A: dace.float64[5, 3], B: dace.float64[2, 5, 1]):
return A - B
@compare_numpy_output()
def test_bitxorl3(A: dace.int64[5, 3], B: dace.int64[2, 5, 1]):
return A ^ B
@compare_numpy_output()
def test_ltl3(A: dace.int64[5, 3], B: dace.int64[2, 5, 1]):
return A < B
### Right, first pos 1, match second pos, None last pos #######################
@compare_numpy_output()
def test_multr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A | B
@compare_numpy_output()
def test_noteqr3(A: dace.int64[4, 3, 1], B: dace.int64[3, 5]):
return A != B
### Left Errors ###############################################################
@compare_numpy_output()
def test_subl4(A: dace.float64[5, 3], B: dace.float64[2]):
return A - B
@compare_numpy_output()
def test_bitxorl4(A: dace.int64[5, 3], B: dace.int64[2, 3]):
return A ^ B
@compare_numpy_output()
def test_ltl4(A: dace.int64[5, 3], B: dace.int64[3, 2, 3]):
return A < B
### Right Errors ##############################################################
@compare_numpy_output()
def test_multr4(A: dace.int64[4], B: dace.int64[3, 5]):
return A * B
@compare_numpy_output()
def test_bitorr4(A: dace.int64[4, 1], B: dace.int64[3, 5]):
return A | B
# this is broken as of numpy 1.18: numpy doesn't raise an error
#
# >>> import numpy as np
# >>> a = np.random.rand(3, 2)
# >>> b = np.random.rand(2)
# >>> a == b # this works as expected
# array([[False, False],
# [False, False],
# [False, False]])
# >>> b = np.random.rand(3)
# >>> a == b # ?
# <stdin>:1: DeprecationWarning: elementwise comparison failed; this will raise an error in the future.
# False
#
# this test can be reenabled when this is fixed
#@compare_numpy_output()
#def test_noteqr4(A: dace.int64[3, 3, 2], B: dace.int64[3, 5]):
# return A != B
@compare_numpy_output()
def test_regression_result_none(A: dace.int32[1, 3], B: dace.int32[3]):
return A + B
@compare_numpy_output()
def test_both_match(A: dace.float64[5, 1], B: dace.float64[1, 3]):
return A + B
def test_symbolic_bcast_same():
N = dace.symbol("N")
I = dace.symbol("I")
@dace.program
def METHOD_NAME(A: dace.float64[N, 4], B: dace.float64[N * (I + 1) - N * I, 1]):
return A + B
A = np.arange(40).astype(np.float64).reshape(10, 4)
B = np.arange(10).astype(np.float64).reshape(10, 1)
result = METHOD_NAME(A.copy(), B.copy(), I=42, N=10)
expected = A + B
np.testing.assert_allclose(result, expected)
if __name__ == '__main__':
# generate this with
# cat binop_broadcasting_test.py | grep -oP '(?<=f ).*(?=\()' | awk '{print $0 "()"}'
test_subl1()
test_multl1()
test_bitorl1()
test_bitxorl1()
test_noteql1()
test_ltl1()
test_subr1()
test_multr1()
test_bitorr1()
test_bitxorr1()
test_noteqr1()
test_ltr1()
test_subl2()
test_multl2()
test_bitorl2()
test_bitxorl2()
test_noteql2()
test_ltl2()
test_subr2()
test_multr2()
test_bitorr2()
test_bitxorr2()
test_noteqr2()
test_ltr2()
test_subl3()
test_bitxorl3()
test_ltl3()
test_multr3()
test_bitorr3()
test_noteqr3()
test_subl4()
test_bitxorl4()
test_ltl4()
test_multr4()
test_bitorr4()
test_regression_result_none()
test_symbolic_bcast_same()
|
2,423 |
create network
|
from unittest import TestCase
from lamden.crypto.wallet import Wallet
from lamden.network import Network
from lamden.peer import Peer
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
GET_ALL_PEERS = "get_all_peers"
GET_LATEST_BLOCK = 'get_latest_block'
class TestMultiNode(TestCase):
def setUp(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.networks = []
def tearDown(self):
for network in self.networks:
if network.running:
network.stop()
del self.networks
loop = asyncio.get_event_loop()
loop.stop()
loop.close()
def METHOD_NAME(self, index=0):
network = Network(
wallet=Wallet(),
socket_ports=self.create_socket_ports(index),
)
network.ip = '127.0.0.1'
network.add_action(GET_ALL_PEERS, self.get_peer_list)
network.add_action(GET_LATEST_BLOCK, self.get_latest_block)
self.networks.append(network)
network.get_all_peers = self.get_peer_list
network.router.cred_provider.get_all_peers = self.get_peer_list
return network
def get_peer_list(self):
return [network.wallet.verifying_key for network in self.networks]
def get_latest_block(self):
return {}
def start_network(self, network):
tasks = asyncio.gather(
network.start()
)
loop = asyncio.get_event_loop()
res = loop.run_until_complete(tasks)
return res
def start_all_networks(self):
for network in self.networks:
self.start_network(network=network)
def ensure_async_process(self, process):
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop=loop)
asyncio.ensure_future(process())
def await_async_process(self, process):
tasks = asyncio.gather(
process()
)
loop = asyncio.get_event_loop()
res = loop.run_until_complete(tasks)
return res
def async_sleep(self, delay):
tasks = asyncio.gather(
asyncio.sleep(delay)
)
loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
def create_socket_ports(self, index=0):
return {
'router': 19000 + index,
'publisher': 19080 + index,
'webserver': 18080 + index
}
def test_connects_to_peer_network(self):
# Create two network instances
network_1 = self.METHOD_NAME()
self.start_network(network=network_1)
self.assertTrue(network_1.running)
network_2 = self.METHOD_NAME(index=1)
self.start_network(network=network_2)
self.assertTrue(network_2.running)
# connect networks to each other
network_1.connect(ip=network_2.external_address, vk=network_2.vk)
# await connections
self.async_sleep(delay=1)
# verify connections
peer_1 = network_1.get_peer(network_2.vk)
self.assertTrue(peer_1.running)
peer_2 = network_2.get_peer(network_1.vk)
self.assertTrue(peer_2.running)
def test_network_propagates_joined_peers(self):
# Create two network instances
network_1 = self.METHOD_NAME()
self.start_network(network=network_1)
self.assertTrue(network_1.running)
network_2 = self.METHOD_NAME(index=1)
self.start_network(network=network_2)
self.assertTrue(network_2.running)
# connect networks to each other
network_1.connect(ip=network_2.external_address, vk=network_2.vk)
# await connections
self.async_sleep(delay=1)
# verify connections
peer_1 = network_1.get_peer(network_2.vk)
self.assertTrue(peer_1.running)
peer_2 = network_2.get_peer(network_1.vk)
self.assertTrue(peer_2.running)
# Create new network
network_3 = self.METHOD_NAME(index=2)
self.start_network(network=network_3)
# Join to one peer on the network
network_3.connect(ip=network_1.external_address, vk=network_1.vk)
# await connect
self.async_sleep(1)
peer_3 = network_3.get_peer(vk=network_1.vk)
self.assertTrue(peer_3.running)
# await connect
self.async_sleep(1)
# All networks joined new peer
for network in self.networks:
self.assertEqual(2, len(network.peers))
for peer in network.peers.values():
self.assertTrue(peer.running)
def test_num_of_peers_zero(self):
network_1 = self.METHOD_NAME()
self.assertEqual(0, network_1.num_of_peers())
def test_num_of_peers(self):
network_1 = self.METHOD_NAME()
network_1.peers['node_2'] = {}
network_1.peers['node_3'] = {}
self.assertEqual(2, network_1.num_of_peers())
def test_num_of_peers_connected_zero(self):
network_1 = self.METHOD_NAME()
self.assertEqual(0, network_1.num_of_peers_connected())
def test_num_of_peers_connected(self):
network_1 = self.METHOD_NAME()
network_1.peers['node_2'] = Peer()
network_1.peers['node_3'] = Peer(dealer_running=False)
self.assertEqual(1, network_1.num_of_peers_connected())
def test_all_peers_connected_True(self):
network_1 = self.METHOD_NAME()
network_1.peers['node_2'] = Peer()
network_1.peers['node_3'] = Peer()
self.assertTrue(network_1.all_peers_connected())
def test_all_peers_connected_False(self):
network_1 = self.METHOD_NAME()
network_1.peers['node_2'] = Peer()
network_1.peers['node_3'] = Peer(subscriber_running=False)
self.assertFalse(network_1.all_peers_connected())
def test_reconnect_peer(self):
# Create two network instances
network_1 = self.METHOD_NAME()
self.start_network(network=network_1)
self.assertTrue(network_1.running)
network_2 = self.METHOD_NAME(index=1)
self.start_network(network=network_2)
self.assertTrue(network_2.running)
# connect networks to each other
network_1.connect(ip=network_2.external_address, vk=network_2.vk)
# await connections
self.async_sleep(delay=1)
# Disable Network 2
network_2.router.pause()
# Call reconnect loop on other network
peer = network_1.get_peer(vk=network_2.vk)
peer.dealer.check_connection()
self.async_sleep(delay=1)
self.assertFalse(peer.is_running)
self.assertTrue(peer.reconnecting)
# Enable Network 2
network_2.router.unpause()
# await Network 1 reconnects to network 2
self.async_sleep(delay=2.5)
net_1_all_connected = network_1.all_peers_connected()
net_2_all_connected = network_2.all_peers_connected()
self.assertTrue(net_1_all_connected)
self.assertTrue(net_2_all_connected)
def test_METHOD_set_to_local__ip_is_set_to_local(self):
network = Network(
wallet=Wallet(),
socket_ports=self.create_socket_ports(index=0),
)
network.set_to_local()
self.assertTrue(network.local)
self.assertEqual('127.0.0.1', network.external_ip)
|
2,424 |
test array dual
|
# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <[email protected]>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor import compile_extern
from jittor.test.test_core import expect_error
class TestArray(unittest.TestCase):
def test_data(self):
a = jt.array([1,2,3])
assert (a.data == [1,2,3]).all()
d = a.data
a.data[1] = -2
assert (a.data == [1,-2,3]).all()
assert (a.fetch_sync()==[1,-2,3]).all()
li = jt.liveness_info()
del a
assert li == jt.liveness_info()
del d
assert li != jt.liveness_info()
def test_set_data(self):
a = jt.array([1,2,3])
assert (a.fetch_sync()==[1,2,3]).all()
a.data = [4,5,6]
assert (a.fetch_sync()==[4,5,6]).all()
a.data = jt.array([7,8,9])
assert (a.fetch_sync()==[7,8,9]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
@jt.flag_scope(use_cuda=1)
def test_memcopy_overlap(self):
import time
from jittor.models import resnet
im=np.random.rand(100,3,224,224).astype(np.float32)
net = resnet.Resnet34()
net.eval()
# warm up
x = jt.array(im).stop_grad()
for i in range(10):
a = net(x)
a.sync()
jt.sync(device_sync=True)
# pure compute
time_start=time.time()
x = jt.array(im).stop_grad()
for i in range(10):
a = net(x)
a.sync()
jt.sync(device_sync=True)
t1 = time.time() - time_start
# warm up
for i in range(3):
x = jt.array(im)
b = net(x)
b.fetch(lambda b: None)
b.sync()
jt.sync(device_sync=True)
# overlap
time_start=time.time()
results = []
for i in range(10):
x = jt.array(im)
b = net(x)
b.fetch(lambda b: results.append(b))
b.sync()
# del c
jt.sync(device_sync=True)
t2 = time.time() - time_start
assert t2-t1 < 0.010, (t2, t1, t2-t1)
assert np.allclose(a.data, b.data)
assert len(results) == 10
for v in results:
assert np.allclose(a.data, v), (v.shape, a.data.shape)
jt.LOG.v(f"pure compute: {t1}, overlap: {t2}")
def test_segfault(self):
a = jt.array([1.0,2.0,3.0])
b = (jt.maximum(a, 0)).sum() * 2.0
da = jt.grad(b, a)
jt.sync_all()
assert (a.data==[1,2,3]).all()
assert (da.data==[2,2,2]).all()
def test_segfault2(self):
assert (jt.array([1,2,3]).reshape((1,3)).data==[1,2,3]).all()
if jt.has_cuda:
with jt.flag_scope(use_cuda=1):
assert (jt.array([1,2,3]).reshape((1,3)).data==[1,2,3]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def METHOD_NAME(self):
with jt.flag_scope(use_cuda=1):
a = jt.array(np.float32([1,2,3]))
assert (a.data==[1,2,3]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def test_array_migrate(self):
with jt.flag_scope(use_cuda=1):
a = jt.array(np.float32([1,2,3]))
b = jt.code(a.shape, a.dtype, [a], cpu_src="""
for (int i=0; i<in0_shape0; i++)
@out(i) = @in0(i)*@in0(i)*2;
""")
assert (b.data==[2,8,18]).all()
def test_not_c_style(self):
a = np.array([1,2,3])
b = a[::-1]
x = jt.array(b)
x = x + b
assert (x.data == [6,4,2]).all()
def test_scalar(self):
assert jt.array(1).data == 1
assert jt.array(np.float64(1)).data == 1
assert jt.array(np.float32(1)).data == 1
assert jt.array(np.int32(1)).data == 1
assert jt.array(np.int64(1)).data == 1
def test_array_dtype(self):
a = jt.array([1,2,3], dtype=jt.NanoString("float32"))
a = jt.array([1,2,3], dtype=jt.float32)
def test_var(self):
a = jt.Var([1,2,3])
b = jt.Var([1,2,3], "float32")
assert a.dtype == "int32"
assert b.dtype == "float32"
assert (a.numpy() == [1,2,3]).all()
assert (b.numpy() == [1,2,3]).all()
def test_np_array(self):
a = jt.Var([1,2,3])
b = np.array(a)
assert (b==[1,2,3]).all()
def test_pickle(self):
import pickle
a = jt.Var([1,2,3,4])
s = pickle.dumps(a, pickle.HIGHEST_PROTOCOL)
b = pickle.loads(s)
assert isinstance(b, jt.Var)
assert (b.data == [1,2,3,4]).all()
def test_tuple_array(self):
a = jt.array((4,5))
expect_error(lambda : jt.array({}))
expect_error(lambda : jt.array("asdasd"))
expect_error(lambda : jt.array(jt))
def test_64_bit(self):
a = np.random.rand(10)
b = jt.array(a)
assert b.dtype == "float32"
with jt.flag_scope(auto_convert_64_to_32=0):
a = np.random.rand(10)
b = jt.array(a)
assert b.dtype == "float64"
a = np.random.rand(10)
b = jt.array64(a)
assert b.dtype == "float64"
def test_all_type(self):
with jt.flag_scope(auto_convert_64_to_32=0):
types = [
"bool",
"int8", "uint8",
"int16", "uint16",
"int32", "uint32",
"int64", "uint64",
"float32", "float64",
]
for t in types:
a = np.random.random(1000).astype(t)
b = jt.array(a)
assert str(b.dtype) == t
c = b.numpy()
assert str(c.dtype) == t
np.testing.assert_allclose(a, c)
def test_scalar_fuse_unary(self):
c = jt.ones(10)
jt.sync_all()
with jt.profile_scope() as rep:
b = c-1
assert b.data[1] == 0
assert len(rep) == 2
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
def test_scalar_fuse_unary_cuda(self):
with jt.flag_scope(use_cuda=1):
self.test_scalar_fuse_unary()
if __name__ == "__main__":
unittest.main(
|
2,425 |
test
|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from lxml import etree
from lxml.builder import E
from django.contrib.gis.geos import Polygon, MultiPolygon
from eoxserver.core.util.xmltools import parse
from eoxserver.core.util.timetools import isoformat
from eoxserver.core.util.iteratortools import pairwise
from eoxserver.core.util.timetools import parse_iso8601
from eoxserver.core.decoders import xml
from eoxserver.core import Component
class NativeFormat(Component):
formats = ("native", )
def METHOD_NAME(self, obj):
xml = parse(obj)
return xml is not None and xml.getroot().tag == "Metadata"
def get_format_name(self, obj):
return "native"
def read(self, obj):
tree = parse(obj)
if tree is not None:
decoder = NativeFormatDecoder(tree)
return {
"identifier": decoder.identifier,
"begin_time": decoder.begin_time,
"end_time": decoder.end_time,
"footprint": MultiPolygon(*decoder.polygons),
"format": "native"
}
raise Exception("Could not parse from obj '%s'." % repr(obj))
def write(self, values, file_obj, format=None, encoding=None, pretty=False):
def flip(point):
return point[1], point[0]
# ignore format
tree = E.Metadata(
E.EOID(values["identifier"]),
E.BeginTime(isoformat(values["begin_time"])),
E.EndTime(isoformat(values["end_time"])),
E.Footprint(
*map(lambda polygon:
E.Polygon(
E.Exterior(
" ".join([
"%f %f" % flip(point)
for point in polygon.exterior_ring
])
),
*[E.Interior(
" ".join([
"%f %f" % flip(point)
for point in interior
])
) for interior in polygon[1:]]
),
values["footprint"]
)
)
)
file_obj.write(
etree.tostring(tree, pretty_print=pretty, encoding=encoding)
)
def parse_polygon_xml(elem):
return Polygon(
parse_ring(elem.findtext("Exterior")),
*map(lambda e: parse_ring(e.text), elem.findall("Interior"))
)
def parse_ring(string):
raw_coords = map(float, string.split(" "))
return [(lon, lat) for lat, lon in pairwise(raw_coords)]
class NativeFormatDecoder(xml.Decoder):
identifier = xml.Parameter("EOID/text()")
begin_time = xml.Parameter("BeginTime/text()", type=parse_iso8601)
end_time = xml.Parameter("EndTime/text()", type=parse_iso8601)
polygons = xml.Parameter("Footprint/Polygon", type=parse_polygon_xml, num="+")
|
2,426 |
get available tables
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO, BytesIO
from astropy.io import votable
import astropy.units as u
from astropy.table import Table
from requests import HTTPError
from astroquery.query import BaseQuery
from astroquery.exceptions import InvalidQueryError
from astroquery.utils import url_helpers, prepend_docstr_nosections, async_to_sync
from . import conf
try:
from regions import CircleSkyRegion
except ImportError:
print('Could not import regions, which is required for some of the '
'functionalities of this module.')
@async_to_sync
class XMatchClass(BaseQuery):
URL = conf.url
TIMEOUT = conf.timeout
def query(self, cat1, cat2, max_distance, *,
colRA1=None, colDec1=None, colRA2=None, colDec2=None,
area='allsky', cache=True, get_query_payload=False, **kwargs):
"""
Query the `CDS cross-match service
<http://cdsxmatch.u-strasbg.fr/xmatch>`_ by finding matches between
two (potentially big) catalogues.
Parameters
----------
cat1 : str, file or `~astropy.table.Table`
Identifier of the first table. It can either be a URL, the
payload of a local file being uploaded, a CDS table
identifier (either *simbad* for a view of SIMBAD data / to
point out a given VizieR table) or a an AstroPy table.
If the table is uploaded or accessed through a URL, it must be
in VOTable or CSV format with the positions in J2000
equatorial frame and as decimal degrees numbers.
cat2 : str or file
Identifier of the second table. Follows the same rules as *cat1*.
max_distance : `~astropy.units.Quantity`
Maximum distance to look for counterparts.
Maximum allowed value is 180 arcsec.
colRA1 : str
Name of the column holding the right ascension. Only required
if ``cat1`` is an uploaded table or a pointer to a URL.
colDec1 : str
Name of the column holding the declination. Only required if
``cat1`` is an uploaded table or a pointer to a URL.
colRA2 : str
Name of the column holding the right ascension. Only required
if ``cat2`` is an uploaded table or a pointer to a URL.
colDec2 : str
Name of the column holding the declination. Only required if
``cat2`` is an uploaded table or a pointer to a URL.
area : ``regions.CircleSkyRegion`` or 'allsky' str
Restrict the area taken into account when performing the xmatch
Default value is 'allsky' (no restriction). If a
``regions.CircleSkyRegion`` object is given, only sources in
this region will be considered.
Returns
-------
table : `~astropy.table.Table`
Query results table
"""
response = self.query_async(cat1, cat2, max_distance, colRA1=colRA1, colDec1=colDec1,
colRA2=colRA2, colDec2=colDec2, area=area, cache=cache,
get_query_payload=get_query_payload,
**kwargs)
if get_query_payload:
return response
content = BytesIO(response.content)
return Table.read(content, format='votable', use_names_over_ids=True)
@prepend_docstr_nosections("\n" + query.__doc__)
def query_async(self, cat1, cat2, max_distance, *, colRA1=None, colDec1=None,
colRA2=None, colDec2=None, area='allsky', cache=True,
get_query_payload=False, **kwargs):
"""
Returns
-------
response : `~requests.Response`
The HTTP response returned from the service.
"""
if max_distance > 180 * u.arcsec:
raise ValueError('max_distance argument must not be greater than 180')
payload = {'request': 'xmatch',
'distMaxArcsec': max_distance.to(u.arcsec).value,
'RESPONSEFORMAT': 'votable',
**kwargs}
kwargs = {}
self._prepare_sending_table(1, payload, kwargs, cat1, colRA1, colDec1)
self._prepare_sending_table(2, payload, kwargs, cat2, colRA2, colDec2)
self._prepare_area(payload, area)
if get_query_payload:
return payload, kwargs
response = self._request(method='POST', url=self.URL, data=payload,
timeout=self.TIMEOUT, cache=cache, **kwargs)
try:
response.raise_for_status()
except HTTPError as err:
error_votable = votable.parse(BytesIO(response.content))
error_reason = error_votable.get_info_by_id('QUERY_STATUS').content
raise InvalidQueryError(error_reason) from err
return response
def _prepare_sending_table(self, cat_index, payload, kwargs, cat, colRA, colDec):
'''Check if table is a string, a `astropy.table.Table`, etc. and set
query parameters accordingly.
'''
catstr = 'cat{0}'.format(cat_index)
if isinstance(cat, str):
payload[catstr] = cat
elif isinstance(cat, Table):
# write the Table's content into a new, temporary CSV-file
# so that it can be pointed to via the `files` option
# file will be closed when garbage-collected
fp = StringIO()
cat.write(fp, format='ascii.csv')
fp.seek(0)
kwargs['files'] = {catstr: ('cat1.csv', fp.read())}
else:
# assume it's a file-like object, support duck-typing
kwargs['files'] = {catstr: ('cat1.csv', cat.read())}
if not self.is_table_available(cat):
if ((colRA is None) or (colDec is None)):
raise ValueError('Specify the name of the RA/Dec columns in the input table.')
# if `cat1` is not a VizieR table,
# it is assumed it's either a URL or an uploaded table
payload['colRA{0}'.format(cat_index)] = colRA
payload['colDec{0}'.format(cat_index)] = colDec
def _prepare_area(self, payload, area):
'''Set the area parameter in the payload'''
if area is None or area == 'allsky':
payload['area'] = 'allsky'
elif isinstance(area, CircleSkyRegion):
payload['area'] = 'cone'
cone_center = area.center
payload['coneRA'] = cone_center.icrs.ra.deg
payload['coneDec'] = cone_center.icrs.dec.deg
payload['coneRadiusDeg'] = area.radius.to_value(u.deg)
else:
raise ValueError('Unsupported area {}'.format(str(area)))
def is_table_available(self, table_id):
"""Return True if the passed CDS table identifier is one of the
available VizieR tables, otherwise False.
"""
# table_id can actually be a Table instance, there is no point in
# comparing those to stings
if not isinstance(table_id, str):
return False
if (table_id[:7] == 'vizier:'):
table_id = table_id[7:]
return table_id in self.METHOD_NAME()
def METHOD_NAME(self, *, cache=True):
"""Get the list of the VizieR tables which are available in the
xMatch service and return them as a list of strings.
"""
response = self._request(
'GET',
url_helpers.urljoin_keep_path(self.URL, 'tables'),
{'action': 'getVizieRTableNames', 'RESPONSEFORMAT': 'txt'},
cache=cache,
)
content = response.text
return content.splitlines()
XMatch = XMatchClass()
|
2,427 |
fix files
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class docsCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def METHOD_NAME(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=docsCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the docs client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
METHOD_NAME(input_dir, output_dir)
|
2,428 |
filter param
|
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved.
import os.path as osp
from typing import Any, Dict
import numpy as np
import torch
from modelscope.metainfo import Models
from modelscope.models.base import TorchModel
from modelscope.models.builder import MODELS
from modelscope.models.cv.product_retrieval_embedding.item_detection import \
YOLOXONNX
from modelscope.models.cv.product_retrieval_embedding.item_embedding import (
preprocess, resnet50_embed)
from modelscope.outputs import OutputKeys
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.device import create_device
from modelscope.utils.logger import get_logger
logger = get_logger()
__all__ = ['ProductRetrievalEmbedding']
@MODELS.register_module(
Tasks.product_retrieval_embedding,
module_name=Models.product_retrieval_embedding)
class ProductRetrievalEmbedding(TorchModel):
def __init__(self, model_dir, device='cpu', **kwargs):
super().__init__(model_dir=model_dir, device=device, **kwargs)
def METHOD_NAME(src_params, own_state):
copied_keys = []
for name, param in src_params.items():
if 'module.' == name[0:7]:
name = name[7:]
if '.module.' not in list(own_state.keys())[0]:
name = name.replace('.module.', '.')
if (name in own_state) and (own_state[name].shape
== param.shape):
own_state[name].copy_(param)
copied_keys.append(name)
def load_pretrained(model, src_params):
if 'state_dict' in src_params:
src_params = src_params['state_dict']
own_state = model.state_dict()
METHOD_NAME(src_params, own_state)
model.load_state_dict(own_state)
self.device = create_device(
device) # device.type == "cpu" or device.type == "cuda"
self.use_gpu = self.device.type == 'cuda'
# config the model path
self.local_model_dir = model_dir
# init feat model
self.preprocess_for_embed = preprocess # input is cv2 bgr format
model_feat = resnet50_embed()
src_params = torch.load(
osp.join(self.local_model_dir, ModelFile.TORCH_MODEL_BIN_FILE),
'cpu')
load_pretrained(model_feat, src_params)
if self.use_gpu:
model_feat.to(self.device)
logger.info('Use GPU: {}'.format(self.device))
else:
logger.info('Use CPU for inference')
self.model_feat = model_feat
# init det model
self.model_det = YOLOXONNX(
onnx_path=osp.join(self.local_model_dir, 'onnx_detection.onnx'),
multi_detect=False)
logger.info('load model done')
def forward(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""
detection and feature extraction for input product image
"""
# input should be cv2 bgr format
assert 'img' in input.keys()
def set_phase(model, is_train):
if is_train:
model.train()
else:
model.eval()
is_train = False
set_phase(self.model_feat, is_train)
img = input['img'] # for detection
cid = '3' # preprocess detection category bag
# transform img(tensor) to numpy array with bgr
if isinstance(img, torch.Tensor):
img = img.data.cpu().numpy()
res, crop_img = self.model_det.forward(img,
cid) # detect with bag category
crop_img = self.preprocess_for_embed(crop_img) # feat preprocess
input_tensor = torch.from_numpy(crop_img.astype(np.float32))
device = next(self.model_feat.parameters()).device
use_gpu = device.type == 'cuda'
with torch.no_grad():
if use_gpu:
input_tensor = input_tensor.to(device)
out_embedding = self.model_feat(input_tensor)
out_embedding = out_embedding.cpu().numpy()[
0, :] # feature array with 512 elements
output = {OutputKeys.IMG_EMBEDDING: None}
output[OutputKeys.IMG_EMBEDDING] = out_embedding
return output
|
2,429 |
discriminator
|
# coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from paasta_tools.paastaapi.model.task_tail_lines import TaskTailLines
globals()['TaskTailLines'] = TaskTailLines
class MarathonMesosNonrunningTask(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'deployed_timestamp': (float,), # noqa: E501
'hostname': (str,), # noqa: E501
'id': (str,), # noqa: E501
'state': (str,), # noqa: E501
'tail_lines': (TaskTailLines,), # noqa: E501
}
@cached_property
def METHOD_NAME():
return None
attribute_map = {
'deployed_timestamp': 'deployed_timestamp', # noqa: E501
'hostname': 'hostname', # noqa: E501
'id': 'id', # noqa: E501
'state': 'state', # noqa: E501
'tail_lines': 'tail_lines', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MarathonMesosNonrunningTask - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
deployed_timestamp (float): The unix timestamp at which the task was deployed. [optional] # noqa: E501
hostname (str): Name of the Mesos agent on which this task is running. [optional] # noqa: E501
id (str): The ID of the task in Mesos. [optional] # noqa: E501
state (str): The current state of the task. [optional] # noqa: E501
tail_lines (TaskTailLines): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
2,430 |
test csv tsv parser with csv
|
"""
Tests of neo.rawio.phyrawio
Author: Regimantas Jurkus
"""
import unittest
from neo.rawio.phyrawio import PhyRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
import csv
import tempfile
from pathlib import Path
from collections import OrderedDict
import sys
class TestPhyRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = PhyRawIO
entities_to_download = [
'phy'
]
entities_to_test = [
'phy/phy_example_0'
]
def METHOD_NAME(self):
csv_tempfile = Path(tempfile.gettempdir()).joinpath('test.csv')
with open(csv_tempfile, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['cluster_id', 'some_annotation', 'some_other_annotation'])
csv_writer.writerow([1, 'Good', 'Bad'])
csv_writer.writerow([2, 10, -2])
csv_writer.writerow([3, 1.23, -0.38])
# the parser in PhyRawIO runs csv.DictReader to parse the file
# csv.DictReader for python version 3.6+ returns list of OrderedDict
if (3, 6) <= sys.version_info < (3, 8):
target = [OrderedDict({'cluster_id': 1,
'some_annotation': 'Good',
'some_other_annotation': 'Bad'}),
OrderedDict({'cluster_id': 2,
'some_annotation': 10,
'some_other_annotation': -2}),
OrderedDict({'cluster_id': 3,
'some_annotation': 1.23,
'some_other_annotation': -0.38})]
# csv.DictReader for python version 3.8+ returns list of dict
elif sys.version_info >= (3, 8):
target = [{'cluster_id': 1,
'some_annotation': 'Good',
'some_other_annotation': 'Bad'},
{'cluster_id': 2,
'some_annotation': 10,
'some_other_annotation': -2},
{'cluster_id': 3,
'some_annotation': 1.23,
'some_other_annotation': -0.38}]
list_of_dict = PhyRawIO._parse_tsv_or_csv_to_list_of_dict(csv_tempfile)
self.assertEqual(target, list_of_dict)
def test_csv_tsv_parser_with_tsv(self):
tsv_tempfile = Path(tempfile.gettempdir()).joinpath('test.tsv')
with open(tsv_tempfile, 'w') as tsv_file:
tsv_writer = csv.writer(tsv_file, delimiter='\t')
tsv_writer.writerow(['cluster_id', 'some_annotation'])
tsv_writer.writerow([1, 'Good'])
tsv_writer.writerow([2, 10])
tsv_writer.writerow([3, 1.23])
# the parser in PhyRawIO runs csv.DictReader to parse the file
# csv.DictReader for python version 3.6+ returns list of OrderedDict
if (3, 6) <= sys.version_info < (3, 8):
target = [OrderedDict({'cluster_id': 1,
'some_annotation': 'Good'}),
OrderedDict({'cluster_id': 2,
'some_annotation': 10}),
OrderedDict({'cluster_id': 3,
'some_annotation': 1.23})]
# csv.DictReader for python version 3.8+ returns list of dict
elif sys.version_info >= (3, 8):
target = [{'cluster_id': 1, 'some_annotation': 'Good'},
{'cluster_id': 2, 'some_annotation': 10},
{'cluster_id': 3, 'some_annotation': 1.23}]
list_of_dict = PhyRawIO._parse_tsv_or_csv_to_list_of_dict(tsv_tempfile)
self.assertEqual(target, list_of_dict)
def test_csv_tsv_parser_error_raising(self):
txt_tempfile = Path(tempfile.gettempdir()).joinpath('test.txt')
with open(txt_tempfile, 'w') as txt_file:
txt_file.write('This is a test')
self.assertRaises(ValueError,
PhyRawIO._parse_tsv_or_csv_to_list_of_dict,
txt_tempfile)
if __name__ == "__main__":
unittest.main()
|
2,431 |
test pol list
|
#####################################################################
# Module for testing the functionality of the SNAP processing module
#####################################################################
import os
import pytest
from pyroSAR import identify
from pyroSAR.snap import geocode
from spatialist import bbox
from spatialist.ancillary import finder
from pyroSAR.snap.auxil import is_consistent, split, groupbyWorkers, Workflow, parse_recipe
from pyroSAR.examine import ExamineSnap
def test_installation():
reg = ExamineSnap()
assert os.path.isfile(reg.gpt)
def test_consistency():
with parse_recipe('base') as wf:
assert is_consistent(wf)
def test_geocode(tmpdir, testdata):
scene = testdata['s1']
geocode(scene, str(tmpdir), test=True)
xmlfile = finder(str(tmpdir), ['*.xml'])[0]
tree = Workflow(xmlfile)
assert is_consistent(tree) is True
groups = groupbyWorkers(xmlfile, 2)
assert len(groups) == 4
groups2 = groupbyWorkers(xmlfile, 100)
assert len(groups2) == 1
split(xmlfile, groups)
id = identify(scene)
basename = '{}_{}'.format(id.outname_base(), tree.suffix())
procdir = os.path.join(str(tmpdir), basename)
assert os.path.isdir(procdir)
tempdir = os.path.join(procdir, 'tmp')
assert os.path.isdir(tempdir)
parts = finder(tempdir, ['*.xml'])
assert len(parts) == 4
class Test_geocode_opts():
def test_infile_type(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(TypeError):
geocode(infile=123, outdir=str(tmpdir), test=True)
id = identify(scene)
geocode(infile=id, outdir=str(tmpdir), test=True)
def test_pol(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), polarizations=1, test=True)
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), polarizations='foobar', test=True)
geocode(scene, str(tmpdir), polarizations='VV', test=True)
def METHOD_NAME(self, tmpdir, testdata):
scene = testdata['s1']
geocode(scene, str(tmpdir), polarizations=['VV', 'VH'], test=True)
def test_geotype(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), geocoding_type='foobar', test=True)
geocode(scene, str(tmpdir), test=True,
geocoding_type='SAR simulation cross correlation')
def test_srs(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), t_srs='foobar', test=True)
geocode(scene, str(tmpdir), t_srs=32632, test=True)
def test_scaling(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), scaling='foobar', test=True)
def test_shp(self, tmpdir, testdata):
scene = testdata['s1']
ext = {'xmin': 12, 'xmax': 13, 'ymin': 53, 'ymax': 54}
with bbox(ext, 4326) as new:
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), shapefile=new, test=True)
with identify(scene).bbox() as box:
ext = box.extent
ext['xmax'] -= 1
with bbox(ext, 4326) as new:
geocode(scene, str(tmpdir), shapefile=new, test=True)
def test_offset(self, tmpdir, testdata):
scene = testdata['s1']
geocode(scene, str(tmpdir), offset=(100, 100, 0, 0), test=True)
def test_export_extra(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), test=True,
export_extra=['foobar'])
geocode(scene, str(tmpdir), test=True,
export_extra=['localIncidenceAngle'])
def test_externalDEM(self, tmpdir, testdata):
scene = testdata['s1']
dem_dummy = testdata['tif']
with pytest.raises(RuntimeError):
geocode(scene, str(tmpdir), externalDEMFile='foobar', test=True)
geocode(scene, str(tmpdir), externalDEMFile=dem_dummy, test=True)
def test_speckleFilter(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(ValueError):
geocode(scene, str(tmpdir), speckleFilter='foobar', test=True)
geocode(scene, str(tmpdir), speckleFilter='Refined Lee', test=True)
def test_refarea(self, tmpdir, testdata):
scene = testdata['s1']
with pytest.raises(ValueError):
geocode(scene, str(tmpdir), terrainFlattening=False, refarea='foobar', test=True)
geocode(scene, str(tmpdir), terrainFlattening=True, refarea='gamma0', test=True)
def test_sliceassembly(self, tmpdir, testdata):
scene1 = testdata['s1']
scene2 = testdata['s1_2']
wf = geocode([scene1, scene2], str(tmpdir), test=True, returnWF=True)
for n in range(1, 4):
groups = groupbyWorkers(wf, n=n)
split(wf, groups)
|
2,432 |
fwd
|
# coding=utf-8
# Copyright 2022 The Pax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator to wrap any layer to add support for Ghost Norm."""
from typing import Any, Callable
import jax
import jax.numpy as jnp
from paxml.ghostnorm import base
from paxml.ghostnorm import embedding
from paxml.ghostnorm import linears
from praxis import base_layer
from praxis import pax_fiddle
from praxis import pytypes
from praxis.layers import attentions as praxis_attentions
from praxis.layers import embedding_softmax as praxis_embedding
from praxis.layers import linears as praxis_linears
from praxis.layers import normalizations as praxis_normalizations
from praxis.layers import transformers as praxis_transformers
template_field = base_layer.template_field
instantiate = base_layer.instantiate
LayerTpl = pax_fiddle.Config[base_layer.BaseLayer]
PARAMS = base_layer.PARAMS
LAYER = 'ghostnorm_wrapped_layer'
JTensor = pytypes.JTensor
NestedJTensor = pytypes.NestedJTensor
ResType = tuple[
Callable[..., tuple[JTensor, ...]],
NestedJTensor,
JTensor,
list[Any],
]
def _create_ghostnorm_fn(fn: Callable[..., JTensor]) -> Callable[..., JTensor]:
"""Adds a custom_vjp to a function to output per example gradient norms.
Args:
fn: A function that accepts input in the format (params, *args). The added
custom_vjp will add the per example gradient norms for params when using
jax.grad.
Returns:
A function with the custom_vjp added.
"""
@jax.custom_vjp
def f(params: NestedJTensor, *args: Any) -> JTensor:
return fn(base.get_param(params), *args)
def METHOD_NAME(params: NestedJTensor, *args: Any) -> tuple[JTensor, ResType]:
params, aux = base.get_param(params), base.get_aux(params)
out, vjp_fun = jax.vjp(fn, params, *args)
return out, (vjp_fun, params, aux, args)
def bwd(
res: ResType,
g: JTensor,
) -> tuple[JTensor, ...]:
vjp_fun, params, aux, args = res
if aux is None:
return vjp_fun(g)
# When aux is not None, it contains per-example scaling, and the
# back-propagation also returns per-example gradient square norms.
# Per-example scaling coefficient. Normally this is all ones. When
# computing the average of scaled (i.e. L2-norm clipped) per-example
# gradients, this contains a scaling coefficient for each example in the
# batch. Shape is (batch_size,).
scales = aux
# scaled gradients for parameters to achieve per-eg grad clipping
# scaled_g: (batch_size, ..., output_dim)
scaled_g = jax.tree_map(
lambda g_: jnp.einsum('i, i... -> i...', scales, g_), g
)
vjp_params, *vjp_args = vjp_fun(scaled_g)
def vmappable_vjp(g_, *args_):
_, vjp_fun = jax.vjp(fn, params, *args_)
return vjp_fun(g_)[0]
per_example_grad = jax.vmap(vmappable_vjp)(scaled_g, *args)
# -----------------------------------------------------------------------
# Compute per-example gradient square norms.
# The batch_size factor is needed when the loss is *averaged* over the
# mini-batch of examples (instead of summed over).
batch_size = args[0].shape[0]
batch_scaled_per_example_grad = jax.tree_map(
lambda x: x * batch_size, per_example_grad
)
per_example_grad_sq_norms = jax.tree_map(
jax.vmap(lambda x: (x**2).sum()), batch_scaled_per_example_grad
)
vjp_params = jax.tree_map(
base.ParamWithAux, vjp_params, per_example_grad_sq_norms
)
return vjp_params, *vjp_args
f.defvjp(METHOD_NAME, bwd)
return f
class WrappedGhostNorm(base_layer.BaseLayer):
"""Wraps a pax layer to be compatible with ghost clipping.
Attributes:
layer_tpl: A PaxConfig defining the layer that should be wrapped.
"""
layer_tpl: LayerTpl | None = template_field(None)
def setup(self):
super().setup()
if self.layer_tpl is not None:
self.create_child(LAYER, self.layer_tpl.clone())
self.layer_fn = _create_ghostnorm_fn(self.ghostnorm_wrapped_layer.apply)
def __call__(self, *args: Any) -> JTensor:
# This is a special case that is used when the layer is being initialized.
if PARAMS not in self.variables:
return self.ghostnorm_wrapped_layer(*args)
return self.layer_fn({PARAMS: self.variables[PARAMS][LAYER]}, *args)
_SPECIAL_ATTRS = {'layer_tpl'}
class GhostNormPaxConfig(pax_fiddle.Config):
"""A special PaxFiddle config which applies attributes to its child."""
def set(self, *args, **kwargs):
self.layer_tpl.set(*args, **kwargs)
return self
def __getattr__(self, name: str) -> Any:
if name in _SPECIAL_ATTRS:
return super().__getattr__(name)
return self.layer_tpl.__getattr__(name)
def __setattr__(self, name: str, value: Any):
if name in _SPECIAL_ATTRS:
super().__setattr__(name, value)
else:
self.layer_tpl.__setattr__(name, value)
# Add layers to this list that should be wrapped with WrappedGhostNorm wrapper.
# Note that this list should mututally exclusive with _REPLACE_MAP.
_WRAPPABLE_LAYERS = {
praxis_normalizations.LayerNorm,
praxis_normalizations.RmsNorm,
praxis_attentions.PerDimScale,
praxis_attentions.CausalDepthwiseConv1D,
praxis_attentions.AttentionProjection, # optimize most likely.
praxis_attentions.CombinedQKVProjectionLayer, # optimize most likely.
praxis_transformers.TransformerFeedForwardMoe, # optimize maybe.
}
# Add a mapping to replace layer with a custom implementation.
_REPLACE_MAP = {
praxis_embedding.Embedding: embedding.EmbeddingGhostNorm,
praxis_linears.Linear: linears.LinearGhostNorm,
praxis_linears.Bias: linears.BiasGhostNorm,
}
def _is_wrappable(model_or_layer_p: pax_fiddle.Config) -> bool:
return (
issubclass(model_or_layer_p.cls, base_layer.BaseLayer)
and model_or_layer_p.cls in _WRAPPABLE_LAYERS
)
def _is_replaceable(model_or_layer_p: pax_fiddle.Config) -> bool:
return model_or_layer_p.cls in _REPLACE_MAP.keys()
def _replace(model_or_layer_p: pax_fiddle.Config) -> pax_fiddle.Config:
model_or_layer_p.cls = _REPLACE_MAP[model_or_layer_p.cls]
return model_or_layer_p
def generate_wrapped_template(
model_or_layer_p: pax_fiddle.Config,
) -> pax_fiddle.Config:
"""Wraps a Pax Layer PaxFiddle template to be compatible with ghost clipping.
Note that it only replaces or wraps layers that we know are compatible. To be
compatible, all the parameteric layers (layers with trainable parameters) in
the model/layer need to either wrappable or replacable. Furthermore, weight
sharing is not allowed and will cause an error even if all the others can be
wrapped or replaced.
Args:
model_or_layer_p: A PaxConfig describing the model or layer to wrap.
Returns:
A PaxConfig describing the wrapped model.
"""
assert isinstance(model_or_layer_p, pax_fiddle.Config)
for attr_name in model_or_layer_p.__dir__():
layer_p = model_or_layer_p.__getattr__(attr_name)
if isinstance(layer_p, pax_fiddle.Config) and issubclass(
layer_p.cls, base_layer.BaseLayer
):
wrapped_layer_p = generate_wrapped_template(layer_p)
model_or_layer_p.__setattr__(attr_name, wrapped_layer_p)
if _is_replaceable(model_or_layer_p):
model_or_layer_p = _replace(model_or_layer_p)
if _is_wrappable(model_or_layer_p):
model_or_layer_p = GhostNormPaxConfig(
WrappedGhostNorm, layer_tpl=model_or_layer_p
)
return model_or_layer_p
|
2,433 |
filter accepts row
|
######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Contains a class for storing Tool specifications.
"""
import bisect
from PySide6.QtCore import Qt, QModelIndex, QAbstractListModel, QSortFilterProxyModel, Slot, Signal
class ProjectItemSpecificationModel(QAbstractListModel):
"""Class to store specs that are available in a project e.g. GAMS or Julia models."""
specification_replaced = Signal(str, str)
def __init__(self, icons):
super().__init__()
self._spec_names = list()
self._icons = icons
self._project = None
@Slot(str)
def add_specification(self, name):
"""Adds a specification to the model.
Args:
name (str): specification's name
"""
pos = bisect.bisect_left([x.lower() for x in self._spec_names], name.lower())
self.insertRow(name, pos)
@Slot(str)
def remove_specification(self, name):
"""Removes a specification from the model
Args:
name (str): specification's name
"""
for i, spec_name in enumerate(self._spec_names):
if spec_name == name:
self.removeRow(i)
break
@Slot(str, str)
def replace_specification(self, old_name, new_name):
"""Replaces a specification.
Args:
old_name (str): previous name
new_name (str): new name
"""
self.remove_specification(old_name)
self.add_specification(new_name)
self.specification_replaced.emit(old_name, new_name)
def connect_to_project(self, project):
"""Connects the model to a project.
Args:
project (SpineToolboxProject): project to connect to
"""
self.clear()
self._project = project
for spec in self._project.specifications():
self.insertRow(spec.name)
self._project.specification_added.connect(self.add_specification)
self._project.specification_about_to_be_removed.connect(self.remove_specification)
self._project.specification_replaced.connect(self.replace_specification)
def clear(self):
self.beginResetModel()
self._spec_names = list()
self.endResetModel()
def rowCount(self, parent=None):
"""Returns the number of specs in the model.
Args:
parent (QModelIndex): Not used (because this is a list)
Returns:
Number of rows (available specs) in the model
"""
return len(self._spec_names)
def data(self, index, role=None):
"""Must be reimplemented when subclassing.
Args:
index (QModelIndex): Requested index
role (int): Data role
Returns:
Data according to requested role
"""
if not index.isValid() or self.rowCount() == 0:
return None
row = index.row()
if role == Qt.ItemDataRole.DisplayRole:
return self._spec_names[row]
if role == Qt.ItemDataRole.ToolTipRole:
if row >= self.rowCount():
return ""
return (
"<p>Drag-and-drop this onto the Design View "
f"to create a new <b>{self._spec_names[row]}</b> item.</p>"
)
if role == Qt.ItemDataRole.DecorationRole:
spec = self.specification(row)
return self._icons[spec.item_type]
def flags(self, index):
"""Returns enabled flags for the given index.
Args:
index (QModelIndex): Index of spec
"""
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def insertRow(self, spec_name, row=None, parent=QModelIndex()):
"""Insert row (specification) into model.
Args:
spec_name (str): name of spec added to the model
row (int, optional): Row to insert spec to
parent (QModelIndex): Parent of child (not used)
Returns:
Void
"""
if row is None:
row = self.rowCount()
self.beginInsertRows(parent, row, row)
self._spec_names.insert(row, spec_name)
self.endInsertRows()
def removeRow(self, row, parent=QModelIndex()):
"""Remove row (spec) from model.
Args:
row (int): Row to remove the spec from
parent (QModelIndex): Parent of spec on row (not used)
Returns:
Boolean variable
"""
if row < 0 or row > self.rowCount():
# logging.error("Invalid row number")
return False
self.beginRemoveRows(parent, row, row)
self._spec_names.pop(row)
self.endRemoveRows()
return True
def specification(self, row):
"""Returns spec on given row.
Args:
row (int): Row of spec specification
Returns:
ProjectItemSpecification from specification list or None if given row is zero
"""
if row < 0 or row >= self.rowCount():
return None
return self._project.get_specification(self._spec_names[row])
def specification_row(self, name):
"""Returns the row on which the given specification is located or -1 if it is not found."""
for i, spec_name in enumerate(self._spec_names):
if name.lower() == spec_name.lower():
return i
return -1
def specification_index(self, name):
"""Returns the QModelIndex on which a specification with
the given name is located or invalid index if it is not found."""
row = self.specification_row(name)
if row == -1:
return QModelIndex()
return self.createIndex(row, 0)
class FilteredSpecificationModel(QSortFilterProxyModel):
def __init__(self, item_type):
super().__init__()
self.item_type = item_type
def METHOD_NAME(self, source_row, source_parent):
spec = self.sourceModel().specification(source_row)
return spec.item_type == self.item_type
def get_mime_data_text(self, index):
row = self.mapToSource(index).row()
return ",".join([self.item_type, self.sourceModel().specification(row).name])
def specifications(self):
"""Yields all specs."""
for row in range(self.rowCount()):
source_row = self.mapToSource(self.index(row, 0)).row()
yield self.sourceModel().specification(source_row)
def specification(self, row):
if row < 0 or row >= self.rowCount():
return None
index = self.index(row, 0)
source_index = self.mapToSource(index)
source_row = source_index.row()
return self.sourceModel().specification(source_row)
|
2,434 |
fit func ex
|
import numpy as np
import scipy
import scipy.linalg.basic as slb
from scipy.optimize import leastsq
from . import functions
import qt
from qt import plot as plot
from lmfit import minimize, Parameters, Parameter, report_fit
def residuals_lmfit(pars, fit_func, data):
res = fit_func(pars)-data
#print type(data[0])
#print type(1.j)
if type(data[0]) == np.complex128:
res = np.append(res.real,res.imag)
#print res
return res
def fitxy(pars,data, fit_func, **kw):
'''
Fits pars to data
set independent pars.vary = False
'''
tol = kw.pop('xtol',1.e-10)
#print 'tol: ',tol
result = minimize(residuals_lmfit, pars, args=(fit_func,data),xtol=tol,ftol=tol)
#print result
#rep = report_fit(result)
if kw.pop('ret_fit',True):
return result, fit_func(pars)
else:
return result
def print_fitres(pars):
p = pars.copy()
for key in list(p.keys()):
if not p[key].vary:
p.pop(key)
report_fit(p)
def _residuals(p0, x, y, fit_func):
return y-fit_func(x,*p0)
def _residuals_m(p0, fit_func, *x):
'''
residual for fnctions y= f(u,v,w,...)
x = variable length tuple with one dependent variable at the end
x=(u,w,v,y)
u,v,w,y all have the same length
'''
p0=tuple(p0)
print(p0)
qt.msleep()
return x[-1]-fit_func(*(x[:-1]+p0))
def fit_func_example(x1,x2,p1,p2,p3,p4):
y=(x+p1)*p2**p3/p4
return y
#fit1D((x1,x2,p1,p2),y,(p3,p4))
def METHOD_NAME(x1,x2,p1,p2,p4,p3):
return fit_func_example(x1,x2,p1,p2,p3,p4)
def fit_func_examplep1(x,p1,p2,p3,p4):
y=fit_func_example(p1,x,p2,p3,p4)
return y
def _residuals2(p0, x1,x2, y, fit_func):
return y-fit_func(x1,x2,p0)
def fit1D(x,y,fit_func, **kw):
'''
parameters
x : tuple of indep. variables
y : dep. var. (data)
fit_func : function to be fitted structure fit_fun(x, p0) with p0 parameters
known KW
init_guess : vector of initial estimates for p0
guess_func : function that guesses the initial parameters p0
full_output : True by default
onscreen : print reuslts on screen
ret_fit : return an array fit_func(x, Pfit)
plot_results=False
'''
full_output = kw.pop('full_output',True)
onscreen = kw.pop('onscreen',True)
kw_c=kw.copy()
qt.mstart()
try:
p0 = kw_c.pop('init_guess')
except KeyError:
print('No initial guess given, trying guess func instead')
p0 = kw_c.pop('guess_func')(x,y)
if type(x)==type((0,)):
var = x+(y,)
else :
x=(x,)
var=x+(y,)
if type(p0)==type((0,)):
pass
else :
p0=tuple(p0)
plres=kw.pop('plot_results', False)
if plres:
pltr=plot(name='fit monitor')
pltr.clear()
init_guess_arr = fit_func(*(x+tuple(p0)))
plot(x[0], init_guess_arr, title = 'initial guess',name='fit monitor')
plot(x[0], y, title = 'data',name='fit monitor')
plsq,cov,info,mesg,success = leastsq(_residuals_m, \
p0, args=(fit_func,)+var, full_output = 1, maxfev=10000)#,xtol=kw.pop('xtol',1.e-10))
try :
len(plsq)
except TypeError :
plsq=[plsq]
if onscreen:
print('plsq',plsq)
print('mesg',mesg)
print(cov)
#print 'info: ',info
dof = max(np.shape(y)) - len(plsq)
errors = estimate_errors(plsq,info,cov,dof)
k=0
if plres:
fresarr = fit_func(*(x+tuple(plsq)))
plot(x[0],fresarr , title = 'fit',name='fit monitor')
if onscreen:
for p in plsq:
print('p%s = %s +/- %s'%(k,p,errors[k]))
k+=1
if kw.pop('ret_fit',False):
return plsq, errors, fit_func(*(x+tuple(plsq))),cov,info
else:
return plsq, errors
qt.mstop()
def estimate_errors(plsq, fit_info, cov, dof):
'''
plsq = fitparams
fit_info = full_output of leastsq
cov = covariance matrix
dof = degrees of freedom (or len(x_data) - len(plsq))
'''
error = len(plsq)*[0]
chisq=sum(fit_info["fvec"]*fit_info["fvec"])
for i in np.arange(len(plsq)):
print('cov: ',cov[i,i])
error[i] = np.sqrt(cov[i,i])*np.sqrt(chisq/dof)
return error
|
2,435 |
to json
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import dumps
from typing import Optional
from deprecated import deprecated
class InstrumentationInfo:
"""Immutable information about an instrumentation library module.
See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
properties.
"""
__slots__ = ("_name", "_version", "_schema_url")
@deprecated(version="1.11.1", reason="You should use InstrumentationScope")
def __init__(
self,
name: str,
version: Optional[str] = None,
schema_url: Optional[str] = None,
):
self._name = name
self._version = version
if schema_url is None:
schema_url = ""
self._schema_url = schema_url
def __repr__(self):
return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
def __hash__(self):
return hash((self._name, self._version, self._schema_url))
def __eq__(self, value):
return type(value) is type(self) and (
self._name,
self._version,
self._schema_url,
) == (value._name, value._version, value._schema_url)
def __lt__(self, value):
if type(value) is not type(self):
return NotImplemented
return (self._name, self._version, self._schema_url) < (
value._name,
value._version,
value._schema_url,
)
@property
def schema_url(self) -> Optional[str]:
return self._schema_url
@property
def version(self) -> Optional[str]:
return self._version
@property
def name(self) -> str:
return self._name
class InstrumentationScope:
"""A logical unit of the application code with which the emitted telemetry can be
associated.
See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
properties.
"""
__slots__ = ("_name", "_version", "_schema_url")
def __init__(
self,
name: str,
version: Optional[str] = None,
schema_url: Optional[str] = None,
) -> None:
self._name = name
self._version = version
if schema_url is None:
schema_url = ""
self._schema_url = schema_url
def __repr__(self) -> str:
return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
def __hash__(self) -> int:
return hash((self._name, self._version, self._schema_url))
def __eq__(self, value: object) -> bool:
if not isinstance(value, InstrumentationScope):
return NotImplemented
return (self._name, self._version, self._schema_url) == (
value._name,
value._version,
value._schema_url,
)
def __lt__(self, value: object) -> bool:
if not isinstance(value, InstrumentationScope):
return NotImplemented
return (self._name, self._version, self._schema_url) < (
value._name,
value._version,
value._schema_url,
)
@property
def schema_url(self) -> Optional[str]:
return self._schema_url
@property
def version(self) -> Optional[str]:
return self._version
@property
def name(self) -> str:
return self._name
def METHOD_NAME(self, indent=4) -> str:
return dumps(
{
"name": self._name,
"version": self._version,
"schema_url": self._schema_url,
},
indent=indent,
)
|
2,436 |
policy name empty
|
# -*- coding: utf-8 -*-
#
# LinOTP - the open source solution for two factor authentication
# Copyright (C) 2010-2019 KeyIdentity GmbH
# Copyright (C) 2019- netgo software GmbH
#
# This file is part of LinOTP server.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License, version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-mail: [email protected]
# Contact: www.linotp.org
# Support: www.linotp.de
#
"""Contains Policy class"""
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
)
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from .helper import fill_form_element, select
from .manage_elements import ManageTab
class PolicyManager(ManageTab):
policy_entries_css_selector = "table#policy_table > tbody > tr"
policy_delete_button_id = "button_policy_delete"
TAB_INDEX = 3
def clear_policies_via_api(self):
"""
Get all policies via API call
and delete all by policy name.
"""
# Get the policies in json format
policies = self.manage.admin_api_call("system/getPolicy")
if policies:
for curr_policy in policies:
self.manage.admin_api_call(
"system/delPolicy", {"name": policies[curr_policy]["name"]}
)
def clear_policies(self):
self.open_tab()
while True:
policies = self.driver.find_elements(
By.CSS_SELECTOR, self.policy_entries_css_selector
)
if not policies:
break
self.delete_policy(policies[0])
def delete_policy(self, p):
"""
Select and delete the given policy line
p: WebElement of policy line
"""
# Clear policy name field
policy_name_element = self.find_by_id("policy_name")
policy_name_element.clear()
def METHOD_NAME(_):
return policy_name_element.get_attribute("value") == ""
WebDriverWait(self.driver, self.testcase.backend_wait_time).until(
METHOD_NAME
)
# Select policy to delete
p.click()
WebDriverWait(self.driver, self.testcase.backend_wait_time).until_not(
METHOD_NAME
)
# Delete the policy
self.find_by_id(self.policy_delete_button_id).click()
self.wait_for_grid_loading()
info = self.manage.alert_box_handler
info.clear_messages()
def policy_still_visible(driver):
try:
return p.is_displayed()
except StaleElementReferenceException:
return False
WebDriverWait(self.driver, self.testcase.backend_wait_time).until_not(
policy_still_visible
)
assert info.check_last_message("Policy deleted.")
def set_new_policy(self, policy):
"""
Create a policy using the UI elements
"""
self.open_tab()
driver = self.driver
policy_active_cb = self.find_by_id("policy_active")
if not policy_active_cb.is_selected():
policy_active_cb.click()
fill_form_element(driver, "policy_name", policy.name)
scope_select = self.find_by_id("policy_scope_combo")
select(driver, scope_select, policy.scope)
fill_form_element(driver, "policy_action", policy.action)
fill_form_element(driver, "policy_realm", policy.realm)
fill_form_element(driver, "policy_name", policy.name)
fill_form_element(driver, "policy_user", policy.user)
self.find_by_id("button_policy_add").click()
self.wait_for_waiting_finished()
class Policy(object):
"""Creates a LinOTP Policy"""
def __init__(self, manage_ui, name, scope, action, realm, user="*"):
"""Opens the LinOTP manage interface and creates a Policy"""
self.name = name
self.scope = scope
self.action = action
self.realm = realm
self.user = user
manage_ui.policy_view.set_new_policy(self)
|
2,437 |
parse arguments
|
#!/usr/bin/env python
import argparse
import importlib
import logging
import re
import sys
from pathlib import Path
from typing import Iterator, List, Optional, Set, Type
from streamlink import Streamlink
from streamlink.logger import basicConfig
# add root dir to sys path, so the "tests" package can be imported
sys.path.append(str(Path(__file__).parent.parent))
from tests.plugins import PluginCanHandleUrl, TUrlOrNamedUrl # noqa: E402
def METHOD_NAME() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"plugin",
help="The plugin name",
)
parser.add_argument(
"-l",
"--loglevel",
choices=["debug", "info", "warning", "error"],
default="info",
metavar="LEVEL",
help="The log level",
)
parser.add_argument(
"--color",
choices=["auto", "always", "never"],
default="auto",
metavar="WHEN",
help="Display errors in red color",
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Only print the plugin's test URLs",
)
parser.add_argument(
"-i",
"--ignore",
action="append",
default=[],
metavar="REGEX",
help="A regex for ignoring specific URLs. Can be set multiple times",
)
return parser.parse_args()
COLOR_RESET = "\033[0m"
COLOR_RED = "\033[0;31m"
class LoggingFormatter(logging.Formatter):
def __init__(self, color="auto", *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = color
def formatMessage(self, record: logging.LogRecord) -> str:
if record.levelno < logging.ERROR:
template = ":: {message}"
elif self.color == "never" or self.color == "auto" and not sys.stdout.isatty():
template = "!! {message}"
else:
template = f"{COLOR_RED}!! {{message}}{COLOR_RESET}"
return template.format(message=super().formatMessage(record))
class PluginUrlTester:
def __init__(self) -> None:
args = METHOD_NAME()
self.pluginname: str = args.plugin.lower()
self.dry_run: bool = args.dry_run
self.loglevel: str = str(args.loglevel).upper()
self.logcolor: str = args.color
self.logger: logging.Logger = self._get_logger()
self.ignorelist: List[str] = args.ignore or []
self.urls: Set[str] = set()
def _get_logger(self) -> logging.Logger:
logger = logging.getLogger(__name__)
logger.setLevel(self.loglevel)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = LoggingFormatter(fmt="{message}", style="{", color=self.logcolor)
handler.setFormatter(formatter)
logger.addHandler(handler)
basicConfig(
stream=sys.stdout,
# indent output of the StreamlinkLogger
format=":::: {message}",
# set the StreamlinkLogger's level to the same level as our logger
level=self.loglevel,
capture_warnings=True,
)
return logger
def add_url(self, item: TUrlOrNamedUrl) -> None:
url: str = item[1] if isinstance(item, tuple) else item
if not any(re.search(ignore, url) for ignore in self.ignorelist):
self.urls.add(url)
def iter_urls(self) -> Iterator[TUrlOrNamedUrl]:
if not re.match(r"^\w+$", self.pluginname):
raise ValueError("Missing plugin name")
try:
module = importlib.import_module(f"tests.plugins.test_{self.pluginname}")
except Exception as err:
raise ImportError(f"Could not load test module of plugin {self.pluginname}: {err}") from err
PluginCanHandleUrlSubclass: Optional[Type[PluginCanHandleUrl]] = next(
(
item
for item in module.__dict__.values()
if type(item) is type and item is not PluginCanHandleUrl and issubclass(item, PluginCanHandleUrl)
),
None,
)
if not PluginCanHandleUrlSubclass:
raise RuntimeError("Could not find URL test class inheriting from PluginCanHandleURL")
yield from PluginCanHandleUrlSubclass.urls_all()
def run(self) -> int:
code = 0
session = Streamlink()
for url in sorted(self.urls):
self.logger.info(f"Finding streams for URL: {url}")
# noinspection PyBroadException
try:
pluginname, Pluginclass, resolved_url = session.resolve_url(url)
except Exception:
self.logger.error("Error while finding plugin")
code = 1
continue
if pluginname != self.pluginname:
self.logger.error("URL<->Plugin mismatch")
code = 1
continue
# noinspection PyBroadException
try:
plugininst = Pluginclass(session, url)
streams = plugininst.streams()
except Exception:
self.logger.error("Error while fetching streams")
code = 1
continue
if not streams:
self.logger.error("No streams found")
code = 1
else:
self.logger.info(f"Found streams: {', '.join(streams.keys())}")
return code
def main(self) -> int:
try:
for item in self.iter_urls():
self.add_url(item)
if self.dry_run:
for url in sorted(self.urls):
self.logger.info(url)
return 0
return self.run()
except KeyboardInterrupt:
return 1
except Exception as err:
self.logger.error(str(err))
return 1
if __name__ == "__main__":
sys.exit(PluginUrlTester().main())
|
2,438 |
get position
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
try:
from msvcrt import get_osfhandle
except ImportError:
def get_osfhandle(_):
raise OSError("This isn't windows!")
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def METHOD_NAME(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.METHOD_NAME(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
def enable_vt_processing(fd):
if win32.windll is None or not win32.winapi_test():
return False
try:
handle = get_osfhandle(fd)
mode = win32.GetConsoleMode(handle)
win32.SetConsoleMode(
handle,
mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
)
mode = win32.GetConsoleMode(handle)
if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING:
return True
# Can get TypeError in testsuite where 'fd' is a Mock()
except (OSError, TypeError):
return False
|
2,439 |
build
|
from conan import ConanFile
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rm, rmdir
from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
import os
required_conan_version = ">=1.54.0"
class OpenclIcdLoaderConan(ConanFile):
name = "opencl-icd-loader"
description = "OpenCL ICD Loader."
license = "Apache-2.0"
topics = ("opencl", "khronos", "parallel", "icd-loader")
homepage = "https://github.com/KhronosGroup/OpenCL-ICD-Loader"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"disable_openclon12": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"disable_openclon12": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.disable_openclon12
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires(f"opencl-headers/{self.version}", transitive_headers=True)
self.requires(f"opencl-clhpp-headers/{self.version}", transitive_headers=True)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
opencl_headers_includedirs = self.dependencies["opencl-headers"].cpp_info.aggregated_components().includedirs
tc.cache_variables["OPENCL_ICD_LOADER_HEADERS_DIR"] = ";".join(opencl_headers_includedirs)
if is_msvc(self):
tc.variables["USE_DYNAMIC_VCXX_RUNTIME"] = not is_msvc_static_runtime(self)
tc.variables["OPENCL_ICD_LOADER_PIC"] = self.options.get_safe("fPIC", True)
tc.variables["OPENCL_ICD_LOADER_BUILD_TESTING"] = False
if self.settings.os == "Windows":
tc.variables["OPENCL_ICD_LOADER_DISABLE_OPENCLON12"] = self.options.disable_openclon12
tc.generate()
def METHOD_NAME(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.METHOD_NAME()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
fix_apple_shared_install_name(self)
def package_info(self):
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_module_file_name", "OpenCL")
self.cpp_info.set_property("cmake_file_name", "OpenCLICDLoader")
self.cpp_info.set_property("cmake_target_name", "OpenCL::OpenCL")
self.cpp_info.includedirs = []
self.cpp_info.libs = ["OpenCL"]
if not self.options.shared:
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs = ["dl", "pthread"]
elif self.settings.os == "Windows":
self.cpp_info.system_libs = ["cfgmgr32", "runtimeobject"]
# TODO: to remove in conan v2
self.cpp_info.filenames["cmake_find_package"] = "OpenCL"
self.cpp_info.filenames["cmake_find_package_multi"] = "OpenCLICDLoader"
self.cpp_info.names["cmake_find_package"] = "OpenCL"
self.cpp_info.names["cmake_find_package_multi"] = "OpenCL"
|
2,440 |
day of week
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Any, Callable, Dict, List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
from pydantic import BaseModel
TimeFeature = Callable[[pd.PeriodIndex], np.ndarray]
def _normalize(xs, num: float):
"""Scale values of ``xs`` to [-0.5, 0.5]."""
return np.asarray(xs) / (num - 1) - 0.5
def second_of_minute(index: pd.PeriodIndex) -> np.ndarray:
"""
Second of minute encoded as value between [-0.5, 0.5]
"""
return _normalize(index.second, num=60)
def second_of_minute_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Second of minute encoded as zero-based index, between 0 and 59.
"""
return np.asarray(index.second)
def minute_of_hour(index: pd.PeriodIndex) -> np.ndarray:
"""
Minute of hour encoded as value between [-0.5, 0.5]
"""
return _normalize(index.minute, num=60)
def minute_of_hour_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Minute of hour encoded as zero-based index, between 0 and 59.
"""
return np.asarray(index.minute)
def hour_of_day(index: pd.PeriodIndex) -> np.ndarray:
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
return _normalize(index.hour, num=24)
def hour_of_day_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Hour of day encoded as zero-based index, between 0 and 23.
"""
return np.asarray(index.hour)
def METHOD_NAME(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of week encoded as value between [-0.5, 0.5]
"""
return _normalize(index.dayofweek, num=7)
def day_of_week_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of week encoded as zero-based index, between 0 and 6.
"""
return np.asarray(index.dayofweek)
def day_of_month(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of month encoded as value between [-0.5, 0.5]
"""
# first day of month is `1`, thus we deduct one
return _normalize(index.day - 1, num=31)
def day_of_month_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of month encoded as zero-based index, between 0 and 11.
"""
return np.asarray(index.day) - 1
def day_of_year(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of year encoded as value between [-0.5, 0.5]
"""
return _normalize(index.dayofyear - 1, num=366)
def day_of_year_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Day of year encoded as zero-based index, between 0 and 365.
"""
return np.asarray(index.dayofyear) - 1
def month_of_year(index: pd.PeriodIndex) -> np.ndarray:
"""
Month of year encoded as value between [-0.5, 0.5]
"""
return _normalize(index.month - 1, num=12)
def month_of_year_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Month of year encoded as zero-based index, between 0 and 11.
"""
return np.asarray(index.month) - 1
def week_of_year(index: pd.PeriodIndex) -> np.ndarray:
"""
Week of year encoded as value between [-0.5, 0.5]
"""
# TODO:
# * pandas >= 1.1 does not support `.week`
# * pandas == 1.0 does not support `.isocalendar()`
# as soon as we drop support for `pandas == 1.0`, we should remove this
try:
week = index.isocalendar().week
except AttributeError:
week = index.week
return _normalize(week - 1, num=53)
def week_of_year_index(index: pd.PeriodIndex) -> np.ndarray:
"""
Week of year encoded as zero-based index, between 0 and 52.
"""
# TODO:
# * pandas >= 1.1 does not support `.week`
# * pandas == 1.0 does not support `.isocalendar()`
# as soon as we drop support for `pandas == 1.0`, we should remove this
try:
week = index.isocalendar().week
except AttributeError:
week = index.week
return np.asarray(week) - 1
class Constant(BaseModel):
"""
Constant time feature using a predefined value.
"""
value: float = 0.0
def __call__(self, index: pd.PeriodIndex) -> np.ndarray:
return np.full(index.shape, self.value)
def norm_freq_str(freq_str: str) -> str:
base_freq = freq_str.split("-")[0]
# Pandas has start and end frequencies, e.g `AS` and `A` for yearly start
# and yearly end frequencies. We don't make that difference and instead
# rely only on the end frequencies which don't have the `S` prefix.
# Note: Secondly ("S") frequency exists, where we don't want to remove the
# "S"!
if len(base_freq) >= 2 and base_freq.endswith("S"):
return base_freq[:-1]
return base_freq
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given
frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H",
"5min", "1D" etc.
"""
features_by_offsets: Dict[Any, List[TimeFeature]] = {
offsets.YearBegin: [],
offsets.YearEnd: [],
offsets.QuarterBegin: [month_of_year],
offsets.QuarterEnd: [month_of_year],
offsets.MonthBegin: [month_of_year],
offsets.MonthEnd: [month_of_year],
offsets.Week: [day_of_month, week_of_year],
offsets.Day: [METHOD_NAME, day_of_month, day_of_year],
offsets.BusinessDay: [METHOD_NAME, day_of_month, day_of_year],
offsets.Hour: [hour_of_day, METHOD_NAME, day_of_month, day_of_year],
offsets.Minute: [
minute_of_hour,
hour_of_day,
METHOD_NAME,
day_of_month,
day_of_year,
],
offsets.Second: [
second_of_minute,
minute_of_hour,
hour_of_day,
METHOD_NAME,
day_of_month,
day_of_year,
],
}
offset = to_offset(freq_str)
for offset_type, features in features_by_offsets.items():
if isinstance(offset, offset_type):
return features
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
Q - quarterly
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
|
2,441 |
set up
|
# coding: utf-8
# Copyright (C) 1994-2021 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of both the OpenPBS software ("OpenPBS")
# and the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# OpenPBS is free software. You can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenPBS is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# PBS Pro is commercially licensed software that shares a common core with
# the OpenPBS software. For a copy of the commercial license terms and
# conditions, go to: (http://www.pbspro.com/agreement.html) or contact the
# Altair Legal Department.
#
# Altair's dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of OpenPBS and
# distribute them - whether embedded or bundled with other software -
# under a commercial license agreement.
#
# Use of Altair's trademarks, including but not limited to "PBS™",
# "OpenPBS®", "PBS Professional®", and "PBS Pro™" and Altair's logos is
# subject to Altair's trademark licensing policies.
from tests.performance import *
class StandingResvQuasihang(TestPerformance):
"""
This test suite aims at testing the quasihang caused by a MoM HUP
when there is a standing reservation with more than a 1000 instances.
Without the fix, the server takes a lot of time to respond to a client.
With the fix, the amount of time is significantly reduced.
"""
def METHOD_NAME(self):
TestPerformance.METHOD_NAME(self)
# Set PBS_TZID, needed for standing reservation.
if 'PBS_TZID' in self.conf:
self.tzone = self.conf['PBS_TZID']
elif 'PBS_TZID' in os.environ:
self.tzone = os.environ['PBS_TZID']
else:
self.logger.info('Timezone not set, using Asia/Kolkata')
self.tzone = 'Asia/Kolkata'
a = {'resources_available.ncpus': 2}
self.mom.create_vnodes(a, num=2000, usenatvnode=True)
@timeout(6000)
def test_time_for_stat_after_mom_hup(self):
"""
This test case submits a standing reservation with 2000 instances,
HUPS the MoM, stats the reservation and finds the amount of time
the server took to respond.
The test case is not designed to pass/fail on builds with/without
the fix.
"""
start = int(time.time()) + 3600
attrs = {'Resource_List.select': "64:ncpus=2",
'reserve_start': start,
'reserve_duration': 2000,
'reserve_timezone': self.tzone,
'reserve_rrule': "FREQ=HOURLY;BYHOUR=1,2,3,4,5;COUNT=2000"}
rid = self.server.submit(Reservation(TEST_USER, attrs))
attrs = {'reserve_state': (MATCH_RE, 'RESV_CONFIRMED|2')}
# it takes a while for all the instances of the reservation to get
# confirmed, hence the interval of 5 seconds.
self.server.expect(RESV, attrs, id=rid, interval=5)
self.mom.signal('-HUP')
# sleep for 5 seconds so that the HUP takes its effect.
time.sleep(5)
now1 = int(time.time())
attrs = {'reserve_state': (MATCH_RE, 'RESV_CONFIRMED|2')}
self.server.expect(RESV, attrs, id=rid)
now2 = int(time.time())
self.logger.info("pbs_rstat took %d seconds to return\n",
(now2 - now1))
self.perf_test_result((now2 - now1), "pbs_rstat_return_time", "sec")
|
2,442 |
test token is none then set
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import os
from datetime import datetime
import pytest
from requre.utils import get_datafile_filename
from requre.online_replacing import record_requests_for_all_methods
from tests.integration.pagure.base import PagureTests
from ogr import PagureService
from ogr.abstract import IssueStatus, CommitStatus
from ogr.exceptions import PagureAPIException
@record_requests_for_all_methods()
class PagureProjectTokenCommands(PagureTests):
def setUp(self):
super().setUp()
self.token = os.environ.get("PAGURE_OGR_TEST_TOKEN", "")
if not get_datafile_filename(obj=self) and (not self.token):
raise OSError("please set PAGURE_OGR_TEST_TOKEN env variables")
self._service = None
self._user = None
self._ogr_project = None
self._ogr_fork = None
@property
def service(self):
if not self._service:
self._service = PagureService(
token=self.token, instance_url="https://pagure.io"
)
return self._service
def test_issue_permissions(self):
owners = self.ogr_project.who_can_close_issue()
assert "lachmanfrantisek" in owners
issue = self.ogr_project.get_issue(2)
assert issue.can_close("lachmanfrantisek")
def test_issue_comments(self):
issue_comments = self.ogr_project.get_issue(3)._get_all_comments()
assert issue_comments
assert len(issue_comments) == 4
assert issue_comments[0].body.startswith("test")
assert issue_comments[1].body.startswith("tests")
def test_issue_info(self):
issue_info = self.ogr_project.get_issue(issue_id=2)
assert issue_info
assert issue_info.title.startswith("Test 1")
assert issue_info.status == IssueStatus.closed
def test_issue_comments_reversed(self):
issue_comments = self.ogr_project.get_issue(3).get_comments(reverse=True)
assert len(issue_comments) == 4
assert issue_comments[0].body.startswith("regex")
def test_issue_comments_regex(self):
issue_comments = self.ogr_project.get_issue(3).get_comments(
filter_regex="regex"
)
assert len(issue_comments) == 2
assert issue_comments[0].body.startswith("let's")
def test_issue_comments_regex_reversed(self):
issue_comments = self.ogr_project.get_issue(3).get_comments(
filter_regex="regex", reverse=True
)
assert len(issue_comments) == 2
assert issue_comments[0].body.startswith("regex")
def test_issue_update_title(self):
issue = self.ogr_project.get_issue(3)
old_title, old_description = issue.title, issue.description
issue.title = "testing title"
assert (issue.title, issue.description) == ("testing title", old_description)
issue.title = old_title
assert (issue.title, issue.description) == (old_title, old_description)
def test_issue_update_description(self):
issue = self.ogr_project.get_issue(3)
old_title, old_description = issue.title, issue.description
issue.description = "testing description"
assert (issue.title, issue.description) == (old_title, "testing description")
issue.description = old_description
assert (issue.title, issue.description) == (old_title, old_description)
def test_update_pr_info(self):
pr_info = self.ogr_project.get_pr(pr_id=4)
orig_title = pr_info.title
orig_description = pr_info.description
self.ogr_project.get_pr(4).update_info(
title="changed", description="changed description"
)
pr_info = self.ogr_project.get_pr(pr_id=4)
assert pr_info.title == "changed"
assert pr_info.description == "changed description"
self.ogr_project.get_pr(4).update_info(
title=orig_title, description=orig_description
)
pr_info = self.ogr_project.get_pr(pr_id=4)
assert pr_info.title == orig_title
assert pr_info.description == orig_description
def test_pr_setters(self):
pr = self.ogr_project.get_pr(pr_id=6)
old_title = pr.title
pr.title = "test title"
assert pr.title != old_title
assert pr.title == "test title"
pr.title = old_title
assert pr.title == old_title
old_description = pr.description
pr.description = "test description"
assert pr.description != old_description
assert pr.description == "test description"
pr.description = old_description
assert pr.description == old_description
def test_pr_comments_author_regex(self):
comments = self.ogr_project.get_pr(4).get_comments(
filter_regex="^regex", author="mfocko"
)
assert len(comments) == 1
assert comments[0].body.endswith("test")
def test_pr_comments_author(self):
comments = self.ogr_project.get_pr(4).get_comments(author="lachmanfrantisek")
assert len(comments) == 0
def test_issue_comments_author_regex(self):
comments = self.ogr_project.get_issue(3).get_comments(
filter_regex="^test[s]?$", author="mfocko"
)
assert len(comments) == 2
assert comments[0].body == "test"
assert comments[1].body == "tests"
def test_issue_comments_author(self):
comments = self.ogr_project.get_issue(3).get_comments(author="lachmanfrantisek")
assert len(comments) == 0
def test_pr_status(self):
pr = self.ogr_project.get_pr(pr_id=4)
self.ogr_project.set_commit_status(
commit=pr.head_commit,
state=CommitStatus.success,
target_url="https://pagure.io/ogr-tests/pull-request/4",
description="not failed test",
context="test",
)
statuses = pr.get_statuses()
assert statuses
assert len(statuses) >= 0
assert statuses[-1].state == CommitStatus.success
# What timezone?
assert statuses[-1].created >= datetime(
year=2020,
month=8,
day=31,
hour=1,
minute=0,
second=0,
)
assert statuses[-1].edited >= datetime(
year=2020, month=8, day=31, hour=1, minute=0, second=0
)
def test_is_private(self):
self.service.instance_url = "https://src.fedoraproject.org"
assert not self.ogr_project.is_private()
def METHOD_NAME(self):
token = self.service._token
self.service.change_token("")
try:
with pytest.raises(PagureAPIException) as exc:
self.service.user.get_username()
assert "Invalid or expired token" in str(exc)
finally:
self.service.change_token(token)
self.service.user.get_username()
self.service.user.get_username() # 2nd identical call
def test_create_release(self):
self.ogr_project.create_release(
tag="v0",
name="v0",
message="# v0\n\n• added README",
ref="2988640e03ddee8385a2acb827a36c8e50b1be1a",
)
assert "v0" in map(
lambda release: release.tag_name, self.ogr_project.get_releases()
)
|
2,443 |
strip function space
|
# -*- coding: utf-8 -*-
"""Algorithm for replacing form arguments with 'stripped' versions where any
data-carrying objects have been extracted to a mapping."""
from ufl.classes import Form, Integral
from ufl.classes import Argument, Coefficient, Constant
from ufl.classes import FunctionSpace, TensorProductFunctionSpace, MixedFunctionSpace
from ufl.classes import Mesh, MeshView, TensorProductMesh
from ufl.algorithms.replace import replace
from ufl.corealg.map_dag import map_expr_dag
from ufl.corealg.multifunction import MultiFunction
class TerminalStripper(MultiFunction):
def __init__(self):
super().__init__()
self.mapping = {}
def argument(self, o):
o_new = Argument(METHOD_NAME(o.ufl_function_space()),
o.number(), o.part())
return self.mapping.setdefault(o, o_new)
def coefficient(self, o):
o_new = Coefficient(METHOD_NAME(o.ufl_function_space()),
o.count())
return self.mapping.setdefault(o, o_new)
def constant(self, o):
o_new = Constant(strip_domain(o.ufl_domain()), o.ufl_shape,
o.count())
return self.mapping.setdefault(o, o_new)
expr = MultiFunction.reuse_if_untouched
def strip_terminal_data(o):
"""Return a new form where all terminals have been replaced by UFL-only
equivalents.
:arg o: The object to be stripped. This must either be a :class:`~.Form`
or :class:`~.Integral`.
:returns: A 2-tuple containing an equivalent UFL-only object and a mapping
allowing the original form to be reconstructed using
:func:`replace_terminal_data`.
This function is useful for forms containing augmented UFL objects that
hold references to large data structures. These objects are be extracted
into the mapping allowing the form to be cached without leaking memory.
"""
# We need to keep track of two maps because integrals store references to the
# domain and ``replace`` expects only a mapping containing ``Expr`` objects.
if isinstance(o, Form):
integrals = []
expr_map = {}
domain_map = {}
for integral in o.integrals():
itg, (emap, dmap) = strip_terminal_data(integral)
integrals.append(itg)
expr_map.update(emap)
domain_map.update(dmap)
return Form(integrals), (expr_map, domain_map)
elif isinstance(o, Integral):
handler = TerminalStripper()
integrand = map_expr_dag(handler, o.integrand())
domain = strip_domain(o.ufl_domain())
# invert the mapping so it can be passed straight into replace_terminal_data
expr_map = {v: k for k, v in handler.mapping.items()}
domain_map = {domain: o.ufl_domain()}
return o.reconstruct(integrand, domain=domain), (expr_map, domain_map)
else:
raise ValueError("Only Form or Integral inputs expected")
def replace_terminal_data(o, mapping):
"""Return a new form where the terminals have been replaced using the
provided mapping.
:arg o: The object to have its terminals replaced. This must either be a
:class:`~.Form` or :class:`~.Integral`.
:arg mapping: A mapping suitable for reconstructing the form such as the one
returned by :func:`strip_terminal_data`.
:returns: The new form.
"""
if isinstance(o, Form):
return Form([replace_terminal_data(itg, mapping) for itg in o.integrals()])
elif isinstance(o, Integral):
expr_map, domain_map = mapping
integrand = replace(o.integrand(), expr_map)
return o.reconstruct(integrand, domain=domain_map[o.ufl_domain()])
else:
raise ValueError("Only Form or Integral inputs expected")
def METHOD_NAME(function_space):
"Return a new function space with all non-UFL information removed."
if isinstance(function_space, FunctionSpace):
return FunctionSpace(strip_domain(function_space.ufl_domain()),
function_space.ufl_element())
elif isinstance(function_space, TensorProductFunctionSpace):
subspaces = [METHOD_NAME(sub) for sub in function_space.ufl_sub_spaces()]
return TensorProductFunctionSpace(*subspaces)
elif isinstance(function_space, MixedFunctionSpace):
subspaces = [METHOD_NAME(sub) for sub in function_space.ufl_sub_spaces()]
return MixedFunctionSpace(*subspaces)
else:
raise NotImplementedError(f"{type(function_space)} cannot be stripped")
def strip_domain(domain):
"Return a new domain with all non-UFL information removed."
if isinstance(domain, Mesh):
return Mesh(domain.ufl_coordinate_element(), domain.ufl_id())
elif isinstance(domain, MeshView):
return MeshView(strip_domain(domain.ufl_mesh()),
domain.topological_dimension(), domain.ufl_id())
elif isinstance(domain, TensorProductMesh):
meshes = [strip_domain(mesh) for mesh in domain.ufl_meshes()]
return TensorProductMesh(meshes, domain.ufl_id())
else:
raise NotImplementedError(f"{type(domain)} cannot be stripped")
|
2,444 |
rtptime
|
"""Base classes used by streaming protocols."""
from abc import ABC, abstractmethod
import asyncio
import logging
from random import randrange
from typing import Optional, Tuple
from pyatv.auth.hap_pairing import NO_CREDENTIALS, HapCredentials
from pyatv.protocols.raop import timing
from pyatv.protocols.raop.packets import TimingPacket
from pyatv.support.rtsp import FRAMES_PER_PACKET
_LOGGER = logging.getLogger(__name__)
class StreamContext:
"""Data used for one RAOP session."""
def __init__(self) -> None:
"""Initialize a new StreamContext."""
self.credentials: HapCredentials = NO_CREDENTIALS
self.password: Optional[str] = None
self.sample_rate: int = 44100
self.channels: int = 2
self.bytes_per_channel: int = 2
self.latency = 22050 + self.sample_rate
self.rtpseq: int = 0
self.start_ts = 0
self.head_ts = 0
self.padding_sent: int = 0
self.server_port: int = 0
self.event_port: int = 0
self.control_port: int = 0
self.timing_port: int = 0
self.rtsp_session: int = 0
self.volume: Optional[float] = None
def reset(self) -> None:
"""Reset seasion.
Must be done when sample rate changes.
"""
self.rtpseq = randrange(2**16)
self.start_ts = timing.ntp2ts(timing.ntp_now(), self.sample_rate)
self.head_ts = self.start_ts
self.latency = 22050 + self.sample_rate
self.padding_sent = 0
@property
def METHOD_NAME(self) -> int:
"""Current RTP time with latency."""
return self.head_ts - (self.start_ts - self.latency)
@property
def position(self) -> float:
"""Current position in stream (seconds with fraction)."""
# Do not consider latency here (so do not use rtptime)
return timing.ts2ms(self.head_ts - self.start_ts, self.sample_rate) / 1000.0
@property
def frame_size(self) -> int:
"""Size of a single audio frame."""
return self.channels * self.bytes_per_channel
@property
def packet_size(self) -> int:
"""Size of a full audio packet."""
return FRAMES_PER_PACKET * self.frame_size
class StreamProtocol(ABC):
"""Base interface for a streaming protocol."""
@abstractmethod
async def setup(self, timing_server_port: int, control_client_port: int) -> None:
"""To setup connection prior to starting to stream."""
@abstractmethod
def teardown(self) -> None:
"""Teardown resources allocated by setup efter streaming finished."""
@abstractmethod
async def start_feedback(self) -> None:
"""Start to send feedback (if supported and required)."""
@abstractmethod
async def send_audio_packet(
self, transport: asyncio.DatagramTransport, rtp_header: bytes, audio: bytes
) -> Tuple[int, bytes]:
"""Send audio packet to receiver."""
@abstractmethod
async def play_url(self, timing_server_port: int, url: str, position: float = 0.0):
"""Play media from a URL."""
class TimingServer(asyncio.Protocol):
"""Basic timing server responding to timing requests."""
def __init__(self):
"""Initialize a new TimingServer."""
self.transport = None
def close(self):
"""Close timing server."""
if self.transport:
self.transport.close()
self.transport = None
@property
def port(self):
"""Port this server listens to."""
return self.transport.get_extra_info("socket").getsockname()[1]
def connection_made(self, transport):
"""Handle that connection succeeded."""
self.transport = transport
def datagram_received(self, data, addr):
"""Handle incoming timing requests."""
req = TimingPacket.decode(data)
recvtime_sec, recvtime_frac = timing.ntp2parts(timing.ntp_now())
resp = TimingPacket.encode(
req.proto,
0x53 | 0x80,
7,
0,
req.sendtime_sec,
req.sendtime_frac,
recvtime_sec,
recvtime_frac,
recvtime_sec,
recvtime_frac,
)
self.transport.sendto(resp, addr)
@staticmethod
def error_received(exc) -> None:
"""Handle a connection error."""
_LOGGER.error("Error received: %s", exc)
|
2,445 |
wrapper
|
import asyncio
import functools
import logging
import ssl
import time
from enum import Enum
from typing import Dict
from typing import Optional
import msgpack
from pydantic import BaseModel, validator
import pika
from pika.exceptions import AMQPConnectionError
logger = logging.getLogger(__name__)
def sync(f):
@functools.wraps(f)
def METHOD_NAME(*args, **kwargs):
return asyncio.get_event_loop().run_until_complete(f(*args, **kwargs))
return METHOD_NAME
class Priority(Enum):
LOW = 1
NORMAL = 5
HIGH = 10
class Headers(BaseModel):
job_id: str
priority: Priority
task_type: Optional[str] = None
@validator("priority", pre=True)
def _convert_priority(self, value):
return Priority[value]
class RabbitMQConfig(BaseModel):
host: str
port: int
username: str
password: str
protocol: str
class BasicPikaClient:
def __init__(self):
self.username = "username"
self.password = "password"
self.host = "localhost"
self.port = 5672
self.protocol = "amqp"
self._init_connection_parameters()
self._connect()
def _connect(self):
tries = 0
while True:
try:
self.connection = pika.BlockingConnection(self.parameters)
self.channel = self.connection.channel()
if self.connection.is_open:
break
except (AMQPConnectionError, Exception) as e:
time.sleep(5)
tries += 1
if tries == 20:
raise AMQPConnectionError(e)
def _init_connection_parameters(self):
self.credentials = pika.PlainCredentials(self.username, self.password)
self.parameters = pika.ConnectionParameters(
self.host,
int(self.port),
"/",
self.credentials,
)
if self.protocol == "amqps":
# SSL Context for TLS configuration of Amazon MQ for RabbitMQ
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.set_ciphers("ECDHE+AESGCM:!ECDSA")
self.parameters.ssl_options = pika.SSLOptions(context=ssl_context)
def check_connection(self):
if not self.connection or self.connection.is_closed:
self._connect()
def close(self):
self.channel.close()
self.connection.close()
def declare_queue(
self, queue_name, exclusive: bool = False, max_priority: int = 10
):
self.check_connection()
logger.debug(f"Trying to declare queue({queue_name})...")
self.channel.queue_declare(
queue=queue_name,
exclusive=exclusive,
durable=True,
arguments={"x-max-priority": max_priority},
)
def declare_exchange(self, exchange_name: str, exchange_type: str = "direct"):
self.check_connection()
self.channel.exchange_declare(
exchange=exchange_name, exchange_type=exchange_type
)
def bind_queue(self, exchange_name: str, queue_name: str, routing_key: str):
self.check_connection()
self.channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=routing_key
)
def unbind_queue(self, exchange_name: str, queue_name: str, routing_key: str):
self.channel.queue_unbind(
queue=queue_name, exchange=exchange_name, routing_key=routing_key
)
class BasicMessageSender(BasicPikaClient):
def encode_message(self, body: Dict, encoding_type: str = "bytes"):
if encoding_type == "bytes":
return msgpack.packb(body)
else:
raise NotImplementedError
def send_message(
self,
exchange_name: str,
routing_key: str,
body: Dict,
headers: Optional[Headers],
):
body = self.encode_message(body=body)
self.channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=body,
properties=pika.BasicProperties(
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE,
priority=headers.priority.value,
headers=headers.dict(),
),
)
logger.debug(
f"Sent message. Exchange: {exchange_name}, Routing Key: {routing_key}, Body: {body[:128]}"
)
class BasicMessageReceiver(BasicPikaClient):
def __init__(self):
super().__init__()
self.channel_tag = None
def decode_message(self, body):
if type(body) == bytes:
return msgpack.unpackb(body)
else:
raise NotImplementedError
def get_message(self, queue_name: str, auto_ack: bool = False):
method_frame, header_frame, body = self.channel.basic_get(
queue=queue_name, auto_ack=auto_ack
)
if method_frame:
logger.debug(f"{method_frame}, {header_frame}, {body}")
return method_frame, header_frame, body
else:
logger.debug("No message returned")
return None
def consume_messages(self, queue, callback):
self.check_connection()
self.channel_tag = self.channel.basic_consume(
queue=queue, on_message_callback=callback, auto_ack=True
)
logger.debug(" [*] Waiting for messages. To exit press CTRL+C")
self.channel.start_consuming()
def cancel_consumer(self):
if self.channel_tag is not None:
self.channel.basic_cancel(self.channel_tag)
self.channel_tag = None
else:
logger.error("Do not cancel a non-existing job")
class MyConsumer(BasicMessageReceiver):
@sync
async def consume(self, channel, method, properties, body):
body = self.decode_message(body=body)
file_content = await self._download_image(img_url=body["url"])
# consume message logic ...
async def _download_image(self, img_url):
# do some async stuff here
pass
def create_consumer():
worker = MyConsumer()
worker.declare_queue(queue_name="myqueue")
worker.declare_exchange(exchange_name="myexchange")
worker.bind_queue(
exchange_name="myexchange", queue_name="myqueue", routing_key="randomkey"
)
worker.consume_messages(queue="myqueue", callback=worker.consume)
if __name__ == "__main__":
create_consumer()
|
2,446 |
create connects
|
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QBrush, QColor, QIcon, QPen
from PyQt5.QtWidgets import QMessageBox
from urh import settings
from urh.controller.dialogs.SendRecvDialog import SendRecvDialog
from urh.dev.VirtualDevice import VirtualDevice, Mode
from urh.signalprocessing.IQArray import IQArray
from urh.signalprocessing.Signal import Signal
from urh.ui.painting.SignalSceneManager import SignalSceneManager
from urh.util import FileOperator
from urh.util.Logger import logger
class SendDialog(SendRecvDialog):
def __init__(self, project_manager, modulated_data, modulation_msg_indices=None, continuous_send_mode=False,
parent=None, testing_mode=False):
super().__init__(project_manager, is_tx=True, continuous_send_mode=continuous_send_mode,
parent=parent, testing_mode=testing_mode)
self.graphics_view = self.ui.graphicsViewSend
self.ui.stackedWidget.setCurrentWidget(self.ui.page_send)
self.hide_receive_ui_items()
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-start"))
self.setWindowTitle("Send Signal")
self.setWindowIcon(QIcon.fromTheme("media-playback-start"))
self.ui.btnStart.setToolTip("Send data")
self.ui.btnStop.setToolTip("Stop sending")
self.device_is_sending = False
self.modulation_msg_indices = modulation_msg_indices
if self.modulation_msg_indices is not None:
self.ui.progressBarMessage.setMaximum(len(self.modulation_msg_indices))
else:
self.ui.progressBarMessage.hide()
self.ui.labelCurrentMessage.hide()
if modulated_data is not None:
assert isinstance(modulated_data, IQArray)
# modulated_data is none in continuous send mode
self.ui.progressBarSample.setMaximum(len(modulated_data))
samp_rate = self.device_settings_widget.ui.spinBoxSampleRate.value()
signal = Signal("", "Modulated Preview", sample_rate=samp_rate)
signal.iq_array = modulated_data
self.scene_manager = SignalSceneManager(signal, parent=self)
self.send_indicator = self.scene_manager.scene.addRect(0, -2, 0, 4,
QPen(QColor(Qt.transparent), 0),
QBrush(settings.SEND_INDICATOR_COLOR))
self.send_indicator.stackBefore(self.scene_manager.scene.selection_area)
self.scene_manager.init_scene()
self.graphics_view.set_signal(signal)
self.graphics_view.sample_rate = samp_rate
self.METHOD_NAME()
self.device_settings_widget.update_for_new_device(overwrite_settings=False)
def METHOD_NAME(self):
super().METHOD_NAME()
self.graphics_view.save_as_clicked.connect(self.on_graphics_view_save_as_clicked)
self.scene_manager.signal.data_edited.connect(self.on_signal_data_edited)
def _update_send_indicator(self, width: int):
y, h = self.ui.graphicsViewSend.view_rect().y(), self.ui.graphicsViewSend.view_rect().height()
self.send_indicator.setRect(0, y - h, width, 2 * h + abs(y))
def set_current_message_progress_bar_value(self, current_sample: int):
if self.modulation_msg_indices is not None:
msg_index = next((i for i, sample in enumerate(self.modulation_msg_indices) if sample >= current_sample),
len(self.modulation_msg_indices))
self.ui.progressBarMessage.setValue(msg_index + 1)
def update_view(self):
if super().update_view():
self._update_send_indicator(self.device.current_index)
self.ui.progressBarSample.setValue(self.device.current_index)
self.set_current_message_progress_bar_value(self.device.current_index)
if not self.device.sending_finished:
self.ui.lblCurrentRepeatValue.setText(str(self.device.current_iteration + 1))
else:
self.ui.btnStop.click()
self.ui.lblCurrentRepeatValue.setText("Sending finished")
def init_device(self):
device_name = self.selected_device_name
num_repeats = self.device_settings_widget.ui.spinBoxNRepeat.value()
sts = self.scene_manager.signal.iq_array
self.device = VirtualDevice(self.backend_handler, device_name, Mode.send, samples_to_send=sts,
device_ip="192.168.10.2", sending_repeats=num_repeats, parent=self)
self._create_device_connects()
@pyqtSlot()
def on_graphics_view_save_as_clicked(self):
filename = FileOperator.ask_save_file_name("signal.complex")
if filename:
try:
try:
self.scene_manager.signal.sample_rate = self.device.sample_rate
except Exception as e:
logger.exception(e)
self.scene_manager.signal.save_as(filename)
except Exception as e:
QMessageBox.critical(self, self.tr("Error saving signal"), e.args[0])
@pyqtSlot()
def on_signal_data_edited(self):
signal = self.scene_manager.signal
self.ui.progressBarSample.setMaximum(signal.num_samples)
self.device.samples_to_send = signal.iq_array.data
self.scene_manager.init_scene()
self.ui.graphicsViewSend.redraw_view()
@pyqtSlot()
def on_start_clicked(self):
super().on_start_clicked()
if self.ui.progressBarSample.value() >= self.ui.progressBarSample.maximum() - 1:
self.on_clear_clicked()
if self.device_is_sending:
self.device.stop("Sending paused by user")
else:
self.device.start()
@pyqtSlot()
def on_stop_clicked(self):
super().on_stop_clicked()
self.on_clear_clicked()
@pyqtSlot()
def on_device_stopped(self):
super().on_device_stopped()
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-start"))
self.ui.btnStart.setText("Start")
self.ui.btnStart.setToolTip("Start sending")
self.device_is_sending = False
@pyqtSlot()
def on_device_started(self):
super().on_device_started()
self.device_is_sending = True
self.ui.btnStart.setEnabled(True)
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-pause"))
self.ui.btnStart.setText("Pause")
self.set_device_ui_items_enabled(False)
@pyqtSlot()
def on_clear_clicked(self):
self._update_send_indicator(0)
self.reset()
|
2,447 |
str
|
from bsb import config
from bsb.config import types
from bsb.simulation.cell import CellModel
from bsb.exceptions import AdapterError
from bsb.reporting import warn
import itertools as _it
import collections
try:
import arbor
_has_arbor = True
except ImportError:
_has_arbor = False
import types as _t
# Mock missing requirements, as arbor is, like
# all simulators, an optional dep. of the BSB.
arbor = _t.ModuleType("arbor")
arbor.recipe = type("mock_recipe", (), dict())
def get(*arg):
raise ImportError("Arbor not installed.")
arbor.__getattr__ = get
def _consume(iterator, n=None):
"Advance the iterator n-steps ahead. If n is None, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(_it.islice(iterator, n, n), None)
_it.consume = _consume
@config.node
class ArborCell(CellModel):
node_name = "simulations.?.cell_models"
model = config.attr(
type=types.class_, required=lambda s: "relay" not in s or not s["relay"]
)
default_endpoint = "comp_-1"
def get_description(self, gid):
if not self.relay:
morphology, labels, decor = self.model.cable_cell_template()
labels = self._add_labels(gid, labels, morphology)
decor = self._add_decor(gid, decor)
cc = arbor.cable_cell(morphology, labels, decor)
return cc
else:
schedule = self.get_schedule(gid)
return arbor.spike_source_cell(self.default_endpoint, schedule)
def get_schedule(self, gid):
schedule = arbor.explicit_schedule([])
for device in self.adapter._devices_on[gid]:
pattern = device.get_pattern(gid)
if not pattern:
continue
merged = pattern + schedule.events(0, float("inf"))
schedule = arbor.explicit_schedule(merged)
return schedule
def _add_decor(self, gid, decor):
self._soma_detector(decor)
self._create_transmitters(gid, decor)
self._create_gaps(gid, decor)
self._create_receivers(gid, decor)
return decor
def _add_labels(self, gid, labels, morphology):
pwlin = arbor.place_pwlin(morphology)
def comp_label(comp):
if comp.id == -1:
warn(f"Encountered nil compartment on {gid}")
return
loc, d = pwlin.closest(*comp.start)
if d > 0.0001:
raise AdapterError(f"Couldn't find {comp.start}, on {self.METHOD_NAME(gid)}")
labels[f"comp_{comp.id}"] = str(loc)
comps_from = self.adapter._connections_from[gid]
comps_on = (rcv.comp_on for rcv in self.adapter._connections_on[gid])
gaps = (c.to_compartment for c in self.adapter._gap_junctions_on.get(gid, []))
_it.consume(comp_label(i) for i in _it.chain(comps_from, comps_on, gaps))
labels[self.default_endpoint] = "(root)"
return labels
def METHOD_NAME(self, gid):
return f"{self.adapter._name_of(gid)} {gid}"
def _soma_detector(self, decor):
decor.place("(root)", arbor.spike_detector(-10), self.default_endpoint)
def _create_transmitters(self, gid, decor):
done = set()
for comp in self.adapter._connections_from[gid]:
if comp.id in done:
continue
else:
done.add(comp.id)
decor.place(f'"comp_{comp.id}"', arbor.spike_detector(-10), f"comp_{comp.id}")
def _create_gaps(self, gid, decor):
done = set()
for conn in self.adapter._gap_junctions_on.get(gid, []):
comp = conn.to_compartment
if comp.id in done:
continue
else:
done.add(comp.id)
decor.place(f'"comp_{comp.id}"', arbor.junction("gj"), f"gap_{comp.id}")
def _create_receivers(self, gid, decor):
for rcv in self.adapter._connections_on[gid]:
decor.place(
f'"comp_{rcv.comp_on.id}"',
rcv.synapse,
f"comp_{rcv.comp_on.id}_{rcv.index}",
)
|
2,448 |
bgp connected
|
import logging
import pytest
import re
from collections import defaultdict
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
from .vnet_constants import CLEANUP_KEY
from .vnet_utils import cleanup_vnet_routes, cleanup_dut_vnets, cleanup_vxlan_tunnels, \
apply_dut_config_files, generate_dut_config_files
from tests.common.config_reload import config_reload
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology("t0"),
pytest.mark.asic("mellanox")
]
BGP_WAIT_TIMEOUT = 240
BGP_POLL_RATE = 10
TESTING_STATUS = "Testing"
CLEANUP_STATUS = "Cleanup"
SHOW_VNET_ROUTES_CMD = "show vnet routes all"
SHOW_BGP_SUMMARY_CMD = "show ip bgp summary"
SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE = "show ip bgp neighbor {} advertised-routes"
RESTART_BGP_CMD = "sudo systemctl restart bgp"
CONFIG_SAVE_CMD = "sudo config save -y"
BACKUP_CONFIG_DB_CMD = "sudo cp /etc/sonic/config_db.json /etc/sonic/config_db.json.route_leak_orig"
RESTORE_CONFIG_DB_CMD = "sudo cp /etc/sonic/config_db.json.route_leak_orig /etc/sonic/config_db.json"
DELETE_BACKUP_CONFIG_DB_CMD = "sudo rm /etc/sonic/config_db.json.route_leak_orig"
BGP_ERROR_TEMPLATE = "BGP sessions not established after {} seconds"
LEAKED_ROUTES_TEMPLATE = "Leaked routes: {}"
@pytest.fixture(scope="module")
def configure_dut(request, minigraph_facts, duthosts, rand_one_dut_hostname, vnet_config, vnet_test_params):
"""
Setup/teardown fixture for VNET route leak test
During the setup portion, generates VNET VxLAN configurations and applies them to the DUT
During the teardown portion, removes all previously pushed VNET VxLAN information from the DUT
Args:
minigraph_facts: Minigraph information
duthost: DUT host object
vnet_config: Dictionary containing VNET configuration information
vnet_test_params: Dictionary containing VNET test parameters
"""
duthost = duthosts[rand_one_dut_hostname]
logger.info("Backing up config_db.json")
duthost.shell(BACKUP_CONFIG_DB_CMD)
num_routes = request.config.option.num_routes
duthost.shell("sonic-clear fdb all")
generate_dut_config_files(duthost, minigraph_facts,
vnet_test_params, vnet_config)
apply_dut_config_files(duthost, vnet_test_params, num_routes)
# In this case yield is used only to separate this fixture into setup and teardown portions
yield
if vnet_test_params[CLEANUP_KEY]:
logger.info("Restoring config_db.json")
duthost.shell(RESTORE_CONFIG_DB_CMD)
duthost.shell(DELETE_BACKUP_CONFIG_DB_CMD)
cleanup_vnet_routes(duthost, vnet_test_params, num_routes)
cleanup_dut_vnets(duthost, vnet_config)
cleanup_vxlan_tunnels(duthost, vnet_test_params)
logger.info("Restarting BGP and waiting for BGP sessions")
duthost.shell(RESTART_BGP_CMD)
if not wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0, METHOD_NAME, duthost):
logger.warning("BGP sessions not up {} seconds after BGP restart, restoring with `config_reload`".format(
BGP_WAIT_TIMEOUT))
config_reload(duthost)
else:
logger.info("Skipping cleanup")
def get_bgp_neighbors(duthost):
"""
Retrieve IPs of BGP neighbors
Args:
duthost: DUT host object
Returns:
A Python list containing the IP addresses of all BGP neighbors as strings
"""
# Match only IP addresses at the beginning of the line
# Only IP addresses of neighbors should be matched by this
bgp_neighbor_addr_regex = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}")
bgp_summary = duthost.shell(SHOW_BGP_SUMMARY_CMD)["stdout"].split("\n")
logger.debug("BGP Summary: {}".format(bgp_summary))
bgp_neighbors = []
for line in bgp_summary:
matched = bgp_neighbor_addr_regex.match(line)
if matched:
bgp_neighbors.append(str(matched.group(0)))
return bgp_neighbors
def METHOD_NAME(duthost):
"""
Checks if BGP connections are up
BGP connections are "up" once they have received all prefixes (6400) from all neighbors
Args:
duthost: DUT host object
Returns:
True if BGP sessions are up, False otherwise
"""
bgp_neighbors = get_bgp_neighbors(duthost)
if not bgp_neighbors:
return False
return duthost.check_bgp_session_state(bgp_neighbors)
def get_leaked_routes(duthost):
"""
Gets all VNET routes and checks that they are not advertised to any BGP neighbors
Args:
duthost: DUT host object
Returns:
A defaultdict where each key is a BGP neighbor that has had routes leaked
(formatted as "Neighbor <IP address>") to it and each value is a Python list
of VNET routes (as strings) that were leaked to that neighbor.
Neighbors that did not have routes leaked to them are not included.
"""
vnet_routes = duthost.shell(SHOW_VNET_ROUTES_CMD)["stdout"].split("\n")
logger.debug("VNET prefixes: {}".format(vnet_routes))
vnet_prefixes = []
for line in vnet_routes:
# Ignore header lines and separators
# All other lines will contain numbers in the form of an IP address/prefix,
# which is the information we want to extract
if any(char.isdigit() for char in line):
vnet_prefixes.append(line.split()[1])
bgp_neighbors = get_bgp_neighbors(duthost)
leaked_routes = defaultdict(list)
for neighbor in bgp_neighbors:
adv_routes = duthost.shell(
SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE.format(neighbor))["stdout"]
for prefix in vnet_prefixes:
if prefix in adv_routes:
leaked_routes["Neighbor {}".format(
neighbor)].append(str(prefix))
return leaked_routes
def test_vnet_route_leak(configure_dut, duthosts, rand_one_dut_hostname):
"""
Test case for VNET route leak check
Gets a list of all VNET routes programmed to the DUT, and a list of all BGP neighbors
Verifies that no VNET routes are being advertised to BGP neighbors
Restarts the BGP service and checks for leaked routes again
Performs `config reload` and checks for leaked routes again
Args:
configure_dut: Pytest fixture to prepare DUT for testing
duthost: DUT host object
"""
duthost = duthosts[rand_one_dut_hostname]
leaked_routes = get_leaked_routes(duthost)
pytest_assert(not leaked_routes,
LEAKED_ROUTES_TEMPLATE.format(leaked_routes))
logger.info("Restarting BGP")
duthost.shell(RESTART_BGP_CMD)
pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0,
METHOD_NAME, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT))
leaked_routes = get_leaked_routes(duthost)
pytest_assert(not leaked_routes,
LEAKED_ROUTES_TEMPLATE.format(leaked_routes))
logger.info("Saving and reloading CONFIG_DB")
duthost.shell(CONFIG_SAVE_CMD)
config_reload(duthost)
pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0,
METHOD_NAME, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT))
leaked_routes = get_leaked_routes(duthost)
pytest_assert(not leaked_routes,
LEAKED_ROUTES_TEMPLATE.format(leaked_routes))
|
2,449 |
set geometry
|
#!/usr/bin/env python3
import os
import sys
import numpy as np
import rospkg
import rospy
import yaml
from gazebo_msgs.srv import GetModelState
from geometry_msgs.msg import Point, Pose, PoseStamped, Vector3
from mil_misc_tools import text_effects
from mil_msgs.srv import SetGeometry
from std_msgs.msg import Header
from std_srvs.srv import SetBool, SetBoolResponse
from subjugator_msgs.srv import VisionRequest, VisionRequest2D, VisionRequestResponse
rospack = rospkg.RosPack()
config_file = os.path.join(
rospack.get_path("subjugator_missions"), "sub8", "vision_proxies.yaml"
)
f = yaml.load(open(config_file))
model_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
fprint = text_effects.FprintFactory(title="SIMULATOR").fprint
def handle_fake_perception(extra, target_object):
"""
Calls the GetModelState service from gazebo to get the realtime position of the model targeted.
Provides this information to the mission.
@param extra The target_name passed through some missions.
Other missions do not pass a target_name thus its label of extra.
@param target_object Is the model name of the object targeted by the mission.
Missions that do not pass a target_name must use this.
"""
now = rospy.get_rostime()
k = np.uint32(0)
if extra != "":
target_object = extra
if target_object == "":
fprint("NO TARGET")
sys.exit(0)
model = get_position(target_object)
# Library of offsets. Models must be manually offset as gazebo coordinates != center of model.
centlib = {
"start_gate": Point(1.5, 0, 0),
"nav_gate": Point(1.15, 0, 0),
"orange_rectangle": Point(0, 0, 2),
}
if target_object in centlib:
offset = centlib[target_object]
else:
offset = Point(0, 0, 0)
pose_stamp = PoseStamped(
header=Header(seq=k, stamp=now, frame_id="/map"),
# Offset our pose by the starting position of the sub relative to the world in Gazebo.
pose=Pose(
position=Point(
model.pose.position.x - 13 + offset.x,
model.pose.position.y - 24 + offset.y,
model.pose.position.z + offset.z,
),
orientation=model.pose.orientation,
),
)
covariance_diagonal = Vector3(0, 0, 0)
found = True
resp2 = VisionRequestResponse(pose_stamp, covariance_diagonal, found)
return resp2
def get_position(model_name):
try:
resp1 = model_state(model_name, "world")
return resp1
except rospy.ServiceException:
return None
def METHOD_NAME(req):
return {"success": True}
def vision_cb_2D():
return False
def start(resp):
return SetBoolResponse(True, "")
def init_service(name, target):
# Generates services required for missions and target acquisition
rospy.Service(
"/vision/" + name + "/pose",
VisionRequest,
lambda h: handle_fake_perception(h.target_name, target),
)
# The following three services do nothing other than return true values.
# They are not needed in sim but a return value is required for missions.
rospy.Service("/vision/" + name + "/set_geometry", SetGeometry, METHOD_NAME)
rospy.Service("/vision/" + name + "/2D", VisionRequest2D, vision_cb_2D)
rospy.Service("/vision/" + name + "/enable", SetBool, start)
def fake_perception_server():
rospy.init_node("fake_perception")
"""
In the dictionary below please place the name of the service you wish to mimic and the target of said service.
The target should match the model name found in the duck.launch file. It will be what immediately follows the
-model tag within the node tied to the model. Example is orange_rectangle mapped to channel_marker_1.
If the service provides a target_name you may leave the target empty as done with buoys.
"""
missions = {
"orange_rectangle": "channel_marker_1",
"buoys": "",
"start_gate": "start_gate",
}
for key in missions:
init_service(key, missions[key])
fprint("Faking perception.")
rospy.spin()
if __name__ == "__main__":
fake_perception_server()
rospy.wait_for_service("/gazebo/get_model_state")
|
2,450 |
test aqt device str
|
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from typing import List
import pytest
import cirq
from cirq_aqt import aqt_device, aqt_device_metadata
@pytest.fixture
def qubits() -> List[cirq.LineQubit]:
return cirq.LineQubit.range(3)
@pytest.fixture
def device(qubits) -> aqt_device.AQTDevice:
ms = cirq.Duration(millis=1)
return aqt_device.AQTDevice(
measurement_duration=100 * ms,
twoq_gates_duration=200 * ms,
oneq_gates_duration=10 * ms,
qubits=qubits,
)
class NotImplementedOperation(cirq.Operation):
def with_qubits(self, *new_qubits) -> 'NotImplementedOperation':
raise NotImplementedError()
@property
def qubits(self):
raise NotImplementedError()
def test_init_qubits(device, qubits):
ms = cirq.Duration(millis=1)
assert device.qubits == frozenset(qubits)
with pytest.raises(TypeError, match="NamedQubit"):
aqt_device.AQTDevice(
measurement_duration=100 * ms,
twoq_gates_duration=200 * ms,
oneq_gates_duration=10 * ms,
qubits=[cirq.LineQubit(0), cirq.NamedQubit("a")],
)
@pytest.mark.parametrize('ms', [cirq.Duration(millis=1), timedelta(milliseconds=1)])
def test_init_durations(ms, qubits):
dev = aqt_device.AQTDevice(
qubits=qubits,
measurement_duration=100 * ms,
twoq_gates_duration=200 * ms,
oneq_gates_duration=10 * ms,
)
assert dev.metadata.twoq_gates_duration == cirq.Duration(millis=200)
assert dev.metadata.oneq_gates_duration == cirq.Duration(millis=10)
assert dev.metadata.measurement_duration == cirq.Duration(millis=100)
def test_metadata(device, qubits):
assert isinstance(device.metadata, aqt_device_metadata.AQTDeviceMetadata)
assert device.metadata.qubit_set == frozenset(qubits)
def test_repr(device):
assert repr(device) == (
"cirq_aqt.aqt_device.AQTDevice("
"measurement_duration=cirq.Duration(millis=100), "
"twoq_gates_duration=cirq.Duration(millis=200), "
"oneq_gates_duration=cirq.Duration(millis=10), "
"qubits=[cirq.LineQubit(0), cirq.LineQubit(1), "
"cirq.LineQubit(2)])"
)
cirq.testing.assert_equivalent_repr(device, setup_code='import cirq\nimport cirq_aqt\n')
def test_validate_measurement_non_adjacent_qubits_ok(device):
device.validate_operation(
cirq.GateOperation(cirq.MeasurementGate(2, 'key'), (cirq.LineQubit(0), cirq.LineQubit(1)))
)
def test_validate_operation_existing_qubits(device):
device.validate_operation(cirq.GateOperation(cirq.XX, (cirq.LineQubit(0), cirq.LineQubit(1))))
device.validate_operation(cirq.Z(cirq.LineQubit(0)))
device.validate_operation(
cirq.PhasedXPowGate(phase_exponent=0.75, exponent=0.25, global_shift=0.1).on(
cirq.LineQubit(1)
)
)
with pytest.raises(ValueError):
device.validate_operation(cirq.CZ(cirq.LineQubit(0), cirq.LineQubit(-1)))
with pytest.raises(ValueError):
device.validate_operation(cirq.Z(cirq.LineQubit(-1)))
with pytest.raises(ValueError):
device.validate_operation(cirq.CZ(cirq.LineQubit(1), cirq.LineQubit(1)))
with pytest.raises(ValueError):
device.validate_operation(cirq.X(cirq.NamedQubit("q1")))
def test_validate_operation_supported_gate(device):
class MyGate(cirq.Gate):
def num_qubits(self):
return 1
device.validate_operation(cirq.GateOperation(cirq.Z, [cirq.LineQubit(0)]))
assert MyGate().num_qubits() == 1
with pytest.raises(ValueError):
device.validate_operation(cirq.GateOperation(MyGate(), [cirq.LineQubit(0)]))
with pytest.raises(ValueError):
device.validate_operation(NotImplementedOperation())
def test_aqt_device_eq(device):
eq = cirq.testing.EqualsTester()
eq.make_equality_group(lambda: device)
def test_validate_circuit_repeat_measurement_keys(device):
circuit = cirq.Circuit()
circuit.append(
[cirq.measure(cirq.LineQubit(0), key='a'), cirq.measure(cirq.LineQubit(1), key='a')]
)
with pytest.raises(ValueError, match='Measurement key a repeated'):
device.validate_circuit(circuit)
def METHOD_NAME(device):
assert str(device) == "q(0)───q(1)───q(2)"
def test_aqt_device_pretty_repr(device):
cirq.testing.assert_repr_pretty(device, "q(0)───q(1)───q(2)")
cirq.testing.assert_repr_pretty(device, "AQTDevice(...)", cycle=True)
def test_at(device):
assert device.at(-1) is None
assert device.at(0) == cirq.LineQubit(0)
assert device.at(2) == cirq.LineQubit(2)
|
2,451 |
goal
|
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import numpy as np
from math import sin, cos, pi
from sverchok.utils.geom import linear_approximation, Spline
from sverchok.utils.curve.core import SvCurve
from sverchok.dependencies import scipy
if scipy is not None:
from scipy.optimize import curve_fit
class SvFourierCurve(SvCurve):
def __init__(self, omega, start, coeffs):
self.omega = omega
self.start = start
self.coeffs = coeffs
self.u_bounds = (0.0, 1.0)
def get_u_bounds(self):
return self.u_bounds
def evaluate(self, t):
result = self.start
o = self.omega
for i, coeff in enumerate(self.coeffs):
j = i // 2
if i % 2 == 0:
result += coeff * cos((j+1)*o*t)
else:
result += coeff * sin((j+1)*o*t)
return result
def evaluate_array(self, ts):
n = len(ts)
result = np.broadcast_to(self.start, (n,3))
o = self.omega
for i, coeff in enumerate(self.coeffs):
j = i // 2
if i % 2 == 0:
cost = np.cos((j+1)*o*ts)[np.newaxis].T
result = result + coeff*cost
else:
sint = np.sin((j+1)*o*ts)[np.newaxis].T
result = result + coeff*sint
return result
def tangent(self, t, tangent_delta=None):
result = np.array([0, 0, 0])
o = self.omega
for i, coeff in enumerate(self.coeffs):
j = i // 2
if i % 2 == 0:
result += - (j+1)*o * coeff * sin((j+1)*o*t)
else:
result += (j+1)*o * coeff * cos((j+1)*o*t)
return result
def tangent_array(self, ts, tangent_delta=None):
n = len(ts)
result = np.zeros((n, 3))
o = self.omega
for i, coeff in enumerate(self.coeffs):
j = i // 2
if i % 2 == 0:
cost = - np.sin((j+1)*o*ts)[np.newaxis].T
result = result + (j+1)*o* coeff*cost
else:
sint = np.cos((j+1)*o*ts)[np.newaxis].T
result = result + (j+1)*o* coeff*sint
return result
def second_derivative(self, t, tangent_delta=None):
return self.second_derivative_array(np.array([t]))[0]
def second_derivative_array(self, ts, tangent_delta=None):
n = len(ts)
result = np.zeros((n, 3))
o = self.omega
for i, coeff in enumerate(self.coeffs):
j = i // 2
if i % 2 == 0:
cost = - np.cos((j+1)*o*ts)[np.newaxis].T
result = result + ((j+1)*o)**2 * coeff*cost
else:
sint = - np.sin((j+1)*o*ts)[np.newaxis].T
result = result + ((j+1)*o)**2 * coeff*sint
return result
@classmethod
def approximate(cls, verts, degree, metric='DISTANCE'):
def init_guess(verts, n):
return np.array([pi] + list(verts[0]) + [0,0,0]*2*n)
def METHOD_NAME(ts, *xs):
n3 = len(xs)-1
n = n3 // 3
omega = xs[0]
points = np.array(xs[1:]).reshape((n,3))
curve = SvFourierCurve(omega, points[0], points[1:])
pts = curve.evaluate_array(ts)
return np.ravel(pts)
xdata = Spline.create_knots(verts, metric=metric)
ydata = np.ravel(verts)
p0 = init_guess(verts, degree)
popt, pcov = curve_fit(METHOD_NAME, xdata, ydata, p0)
n3 = len(popt)-1
ncoeffs = n3 // 3
omega = popt[0]
points = popt[1:].reshape((ncoeffs,3))
curve = SvFourierCurve(omega, points[0], points[1:])
return curve
@classmethod
def interpolate(cls, verts, omega, metric='DISTANCE', is_cyclic=False):
ndim = 3
n_verts = len(verts)
verts = np.asarray(verts)
if is_cyclic:
verts = np.append(verts, verts[0][np.newaxis], axis=0)
n_verts += 1
n_equations = n_verts + 1
else:
n_equations = n_verts
tknots = Spline.create_knots(verts, metric=metric)
A = np.zeros((ndim*n_equations, ndim*n_equations))
for equation_idx, t in enumerate(tknots):
for unknown_idx in range(n_equations):
i = (unknown_idx // 2) + 1
if unknown_idx % 2 == 0:
coeff = cos(omega*i*t)
else:
coeff = sin(omega*i*t)
row = ndim*equation_idx
col = ndim*unknown_idx
for d in range(ndim):
A[row+d, col+d] = coeff
if is_cyclic:
equation_idx = len(tknots)
for unknown_idx in range(n_equations):
i = (unknown_idx // 2) + 1
if unknown_idx % 2 == 0:
coeff = -omega*i*sin(omega*i) # - 0
else:
coeff = omega*i*cos(omega*i) - omega*i
row = ndim*equation_idx
col = ndim*unknown_idx
for d in range(ndim):
A[row+d, col+d] = coeff
#print(A)
B = np.empty((ndim*n_equations,1))
for point_idx, point in enumerate(verts):
row = ndim*point_idx
B[row:row+ndim] = point[:,np.newaxis]
if is_cyclic:
point_idx = len(verts)
row = ndim*point_idx
B[row:row+ndim] = np.array([[0,0,0]]).T
#print(B)
x = np.linalg.solve(A, B)
coeffs = []
for i in range(n_equations):
row = i*ndim
coeff = x[row:row+ndim,0].T
coeffs.append(coeff)
coeffs = np.array(coeffs)
#print(coeffs)
return SvFourierCurve(omega, np.array([0.0,0.0,0.0]), coeffs)
|
2,452 |
test delimeter in text
|
"""Test v.in.ascii CSV capabilities
:author: Vaclav Petras
"""
import os
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
from grass.script.core import read_command
INPUT_NOQUOTES = """Id,POINT_X,POINT_Y,Category,ED field estimate
100,437343.6704,4061363.41525,High Erosion,Low Deposition
101,453643.127906,4050070.29852,High Erosion,Low Erosion
102,454903.605427,4049480.80568,High Erosion,High Erosion
105,437734.838807,4060493.98315,High Erosion,Low Erosion
107,450833.019732,4048207.02664,High Erosion,Low Erosion
"""
INPUT_DOUBLEQUOTES = """Id,POINT_X,POINT_Y,Category,"ED field estimate"
100,437343.6704,4061363.41525,"High Erosion","Low Deposition"
101,453643.127906,4050070.29852,"High Erosion","Low Erosion"
102,454903.605427,4049480.80568,"High Erosion","High Erosion"
105,437734.838807,4060493.98315,"High Erosion","Low Erosion"
107,450833.019732,4048207.02664,"High Erosion","Low Erosion"
"""
INPUT_TSV = """Id\tPOINT_X\tPOINT_Y\tCategory\t"ED field estimate"
100\t437343.6704\t4061363.41525\t"High Erosion"\t"Low Deposition"
101\t453643.127906\t4050070.29852\t"High Erosion"\t"Low Erosion"
102\t454903.605427\t4049480.80568\t"High Erosion"\t"High Erosion"
105\t437734.838807\t4060493.98315\t"High Erosion"\t"Low Erosion"
107\t450833.019732\t4048207.02664\t"High Erosion"\t"Low Erosion"
"""
INPUT_UNCOMMON = """Id@POINT_X@POINT_Y@Category@^ED field estimate^
[email protected]@4061363.41525@^High Erosion^@^Low Deposition^
[email protected]@4050070.29852@^High Erosion^@^Low Erosion^
[email protected]@4049480.80568@^High Erosion^@^High Erosion^
[email protected]@4060493.98315@^High Erosion^@^Low Erosion^
[email protected]@4048207.02664@^High Erosion^@^Low Erosion^
"""
TABLE_1 = """cat|x|y|ed_cat|field_estimate
100|437343.6704|4061363.41525|High Erosion|Low Deposition
101|453643.127906|4050070.29852|High Erosion|Low Erosion
102|454903.605427|4049480.80568|High Erosion|High Erosion
105|437734.838807|4060493.98315|High Erosion|Low Erosion
107|450833.019732|4048207.02664|High Erosion|Low Erosion
"""
class SimpleCsvTestCase(TestCase):
xyvector = "yxvetor_test"
def tearDown(self):
"""Remove the vector map after each test method"""
self.runModule("g.remove", flags="f", type="vector", name=self.xyvector)
def test_no_text_delimeter(self):
"""Test type of resulting map"""
self.assertModule(
"v.in.ascii",
input="-",
output=self.xyvector,
separator="comma",
skip=1,
x=2,
y=3,
cat=1,
columns="cat int, x double, y double,"
" ed_cat varchar(20), field_estimate varchar(20)",
stdin_=INPUT_NOQUOTES,
)
category = read_command("v.db.select", map=self.xyvector, separator="pipe")
self.assertEqual(
first=TABLE_1.replace("\n", os.linesep),
second=category,
msg="Attribute table has wrong entries",
)
def test_text_delimeter(self):
"""Test loading CSV with text delimiter
Text delimiter added in r63581
"""
self.assertModule(
"v.in.ascii",
input="-",
output=self.xyvector,
separator="comma",
text="doublequote",
skip=1,
x=2,
y=3,
cat=1,
columns="cat int, x double, y double,"
" ed_cat varchar(20), field_estimate varchar(20)",
stdin_=INPUT_DOUBLEQUOTES,
)
category = read_command("v.db.select", map=self.xyvector, separator="pipe")
self.assertEqual(
first=TABLE_1.replace("\n", os.linesep),
second=category,
msg="Attribute table has wrong entries",
)
# TODO: a general method to compare attribute tables? (might need to solve because of floats)
# TODO: standardize string strip? perhaps discourage, it messes up the diff
# TODO: use replace solution for newlines in lib (compare to current one)
def test_tsv(self):
"""Test loading TSV (CSV with tab as delim)
Using double quote character for quote.
"""
self.assertModule(
"v.in.ascii",
input="-",
output=self.xyvector,
separator="tab",
text='"',
skip=1,
x=2,
y=3,
cat=1,
columns="cat int, x double, y double,"
" ed_cat varchar(20), field_estimate varchar(20)",
stdin_=INPUT_TSV,
)
category = read_command("v.db.select", map=self.xyvector, separator="pipe")
self.assertEqual(
first=TABLE_1.replace("\n", os.linesep),
second=category,
msg="Attribute table has wrong entries",
)
def test_uncommon_delims(self):
"""Test loading CSV with uncommon delimiters"""
self.assertModule(
"v.in.ascii",
input="-",
output=self.xyvector,
separator="@",
text="^",
skip=1,
x=2,
y=3,
cat=1,
columns="cat int, x double, y double,"
" ed_cat varchar(20), field_estimate varchar(20)",
stdin_=INPUT_UNCOMMON,
)
category = read_command("v.db.select", map=self.xyvector, separator="pipe")
self.assertEqual(
first=TABLE_1.replace("\n", os.linesep),
second=category,
msg="Attribute table has wrong entries",
)
INPUT_DELIM_IN_TEXT = """Id,POINT_X,POINT_Y,Category,"ED field estimate"
100,437343.6704,4061363.41525,"High Erosion, Low Canopy","Low Deposition, Low Canopy"
101,453643.127906,4050070.29852,"High Erosion, High Canopy","Low Erosion, Low Canopy"
102,454903.605427,4049480.80568,"High Erosion, High Canopy","High Erosion, Low Canopy"
105,437734.838807,4060493.98315,"High Erosion, Low Canopy","Low Erosion, High Canopy"
107,450833.019732,4048207.02664,"High Erosion, Low Canopy","Low Erosion, High Canopy"
"""
TABLE_2 = """cat|x|y|ed_cat|field_estimate
100|437343.6704|4061363.41525|High Erosion, Low Canopy|Low Deposition, Low Canopy
101|453643.127906|4050070.29852|High Erosion, High Canopy|Low Erosion, Low Canopy
102|454903.605427|4049480.80568|High Erosion, High Canopy|High Erosion, Low Canopy
105|437734.838807|4060493.98315|High Erosion, Low Canopy|Low Erosion, High Canopy
107|450833.019732|4048207.02664|High Erosion, Low Canopy|Low Erosion, High Canopy
"""
class AdvancedCsvTestCase(TestCase):
xyvector = "yxvetor_test"
def tearDown(self):
"""Remove the vector map after each test method"""
self.runModule("g.remove", flags="f", type="vector", name=self.xyvector)
def METHOD_NAME(self):
"""Test loading CSV with delimiter in text
Text delimiter added in r63581
"""
self.assertModule(
"v.in.ascii",
input="-",
output=self.xyvector,
separator="comma",
text="doublequote",
skip=1,
x=2,
y=3,
cat=1,
columns="cat int, x double, y double,"
" ed_cat varchar(40), field_estimate varchar(40)",
stdin_=INPUT_DELIM_IN_TEXT,
)
category = read_command("v.db.select", map=self.xyvector, separator="pipe")
self.assertEqual(
first=TABLE_2.replace("\n", os.linesep),
second=category,
msg="Attribute table has wrong entries",
)
if __name__ == "__main__":
test()
|
2,453 |
find text in file
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that checks the list of models in the tips in the task-specific pages of the doc is up to date and potentially
fixes it.
Use from the root of the repo with:
```bash
python utils/check_task_guides.py
```
for a check that will error in case of inconsistencies (used by `make repo-consistency`).
To auto-fix issues run:
```bash
python utils/check_task_guides.py --fix_and_overwrite
```
which is used by `make fix-copies`.
"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
TRANSFORMERS_PATH = "src/transformers"
PATH_TO_TASK_GUIDES = "docs/source/en/tasks"
def METHOD_NAME(filename: str, start_prompt: str, end_prompt: str) -> str:
"""
Find the text in filename between two prompts.
Args:
filename (`str`): The file to search into.
start_prompt (`str`): A string to look for at the start of the content searched.
end_prompt (`str`): A string that will mark the end of the content to look for.
Returns:
`str`: The content between the prompts.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start prompt.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
# Now go until the end prompt.
end_index = start_index
while not lines[end_index].startswith(end_prompt):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
transformers_module = direct_transformers_import(TRANSFORMERS_PATH)
# Map between a task guide and the corresponding auto class.
TASK_GUIDE_TO_MODELS = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SPECIAL_TASK_GUIDE_TO_MODEL_TYPES = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def get_model_list_for_task(task_guide: str) -> str:
"""
Return the list of models supporting a given task.
Args:
task_guide (`str`): The name of the task guide to check.
Returns:
`str`: The list of models supporting this task, as links to their respective doc pages separated by commas.
"""
model_maping_names = TASK_GUIDE_TO_MODELS[task_guide]
special_model_types = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(task_guide, set())
model_names = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()]) + "\n"
def check_model_list_for_task(task_guide: str, overwrite: bool = False):
"""
For a given task guide, checks the model list in the generated tip for consistency with the state of the lib and
updates it if needed.
Args:
task_guide (`str`):
The name of the task guide to check.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the table when it's not up to date.
"""
current_list, start_index, end_index, lines = METHOD_NAME(
filename=os.path.join(PATH_TO_TASK_GUIDES, task_guide),
start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->",
end_prompt="<!--End of the generated tip-->",
)
new_list = get_model_list_for_task(task_guide)
if current_list != new_list:
if overwrite:
with open(os.path.join(PATH_TO_TASK_GUIDES, task_guide), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
|
2,454 |
release locks
|
import datetime
import uuid
import psutil
import os
import time
import logging
import weakref
from typing import Union
from enum import Enum
from pathlib import Path
from dataclasses import dataclass, field
from dateutil.relativedelta import relativedelta
from filelock import UnixFileLock, SoftFileLock, Timeout
from aim._core.storage.locking import ContainerLock
logger = logging.getLogger(__name__)
class LockingVersion(Enum):
LEGACY = 0
NEW = 1
class LockType(Enum):
SOFT_LOCK = 0
UNIX_LOCK = 1
class RunLockingError(RuntimeError):
pass
@dataclass(frozen=True)
class LockInfo:
run_hash: str = field()
locked: bool = field(default=False)
version: LockingVersion = field(default=LockingVersion.NEW)
type: LockType = field(default=LockType.SOFT_LOCK)
created_at: datetime.datetime = field(default=None)
def age(self, to=None) -> str:
if self.created_at is None:
return 'N/A'
to = to or datetime.datetime.now()
delta = relativedelta(to, self.created_at)
date_attrs = ('years', 'months', 'days', 'hours', 'minutes', 'seconds')
for attr in date_attrs:
if getattr(delta, attr) > 1:
return f'~{getattr(delta, attr)} {attr}'
elif getattr(delta, attr) == 1:
return f'~{getattr(delta, attr)} {attr[:-1]}'
class SFRunLock(ContainerLock):
def __init__(self, lock_manager: 'LockManager', run_hash: str, path: Path, timeout: int):
self.run_hash = run_hash
self._lock_manager = weakref.ref(lock_manager)
self._sf_lock = SoftFileLock(path, timeout=timeout)
def lock(self, force: bool = False):
self._lock_manager().lock(self.run_hash, self._sf_lock, force=force)
def release(self, force: bool = False) -> None:
self._sf_lock.release(force=force)
class LockManager(object):
machine_id = uuid.getnode()
pid = os.getpid()
def __init__(self, repo_path: Union[str, Path]):
self.repo_path = Path(repo_path)
self.locks_path = self.repo_path / 'locks'
self.locks_path.mkdir(parents=True, exist_ok=True)
@staticmethod
def softlock_fname(name: str) -> str:
return f'{name}.softlock'
def get_container_lock_info(self, run_hash: str) -> LockInfo:
# check locks created prior to 3.15 version
locked = False
created_at = None
lock_version = None
lock_type = None
run_lock_path = self.locks_path / self.softlock_fname(run_hash)
if run_lock_path.exists():
locked = True
created_at = datetime.datetime.fromtimestamp(run_lock_path.stat().st_mtime)
lock_version = LockingVersion.NEW
lock_type = LockType.SOFT_LOCK
else:
# consider Run as locked if one of it's containers is locked
for container_dir in ('meta', 'seqs'):
lock_dir = self.repo_path / container_dir / 'locks'
lock_path = lock_dir / run_hash
soft_lock_path = lock_dir / self.softlock_fname(run_hash)
if lock_path.exists():
try:
lock = UnixFileLock(lock_path, timeout=0)
with lock.acquire():
pass
except Timeout:
locked = True
lock_version = LockingVersion.LEGACY
lock_type = LockType.UNIX_LOCK
elif soft_lock_path.exists():
locked = True
created_at = datetime.datetime.fromtimestamp(soft_lock_path.stat().st_mtime)
lock_version = LockingVersion.LEGACY
lock_type = LockType.SOFT_LOCK
return LockInfo(run_hash=run_hash, locked=locked, created_at=created_at, version=lock_version, type=lock_type)
def get_container_lock(self, run_hash: str, timeout: int = 10) -> ContainerLock:
lock_path = self.locks_path / self.softlock_fname(run_hash)
return SFRunLock(self, run_hash, lock_path, timeout=timeout)
def lock(self, run_hash: str, run_lock: SoftFileLock, force: bool = False):
lock_path = Path(run_lock.lock_file)
if force:
logger.warning(f'Force-releasing locks for Run \'{run_hash}\'. Data corruption may occur if there is '
f'active process writing to Run \'{run_hash}\'.')
self.METHOD_NAME(run_hash, force=True)
elif not self.METHOD_NAME(run_hash, force=False):
raise RunLockingError(f'Cannot acquire lock for Run \'{run_hash}\'. '
f'Make sure no process uses Run \'{run_hash}\' and close it via Aim CLI:\n'
f'`aim runs close --force {run_hash}`')
run_lock.acquire()
with open(lock_path, 'w') as lock_metadata_fh:
lock_metadata_fh.write(f'{self.machine_id}-{self.pid}-{time.time()}')
def METHOD_NAME(self, run_hash: str, force: bool) -> bool:
success = True
lock_path = self.locks_path / self.softlock_fname(run_hash)
if force:
# Force-release run lock
if lock_path.exists():
lock_path.unlink()
else:
lock_info = self.get_container_lock_info(run_hash)
if lock_info.locked:
if self.is_stalled_lock(lock_path):
assert lock_info.version == LockingVersion.NEW
logger.info(f'Detected stalled lock for Run \'{run_hash}\'. Removing lock.')
lock_path.unlink()
else:
success = False
return success
def is_stalled_lock(self, lock_file_path: Path) -> bool:
with open(lock_file_path, mode='r') as lock_metadata_fh:
machine_id, pid, *_ = lock_metadata_fh.read().split('-')
if int(machine_id) == self.machine_id and not psutil.pid_exists(int(pid)):
return True
return False
|
2,455 |
test cpu routine
|
import unittest
from unittest import mock
import numpy
import cupy
from cupy import testing
from cupyx import profiler
from cupyx.profiler import _time
class TestBenchmark(unittest.TestCase):
def METHOD_NAME(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[2.4, 3.8, 3.8] * 10)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_repeat=10, n_warmup=3)
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 13
assert perf.cpu_times.shape == (10,)
assert perf.gpu_times.shape == (1, 10,)
assert (perf.cpu_times == 1.4).all()
assert (perf.gpu_times == 2.5).all()
@testing.multi_gpu(2)
def test_multigpu_routine(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[2.4, 3.8, 3.8] * 10)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_repeat=10, n_warmup=3, devices=(0, 1))
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 13
assert perf.cpu_times.shape == (10,)
assert perf.gpu_times.shape == (2, 10,)
assert (perf.cpu_times == 1.4).all()
assert (perf.gpu_times == 2.5).all()
def test_benchmark_max_duration(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[1., 2., 2.] * 6)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_warmup=3, max_duration=2.5)
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 6
assert perf.cpu_times.shape == (3,)
assert perf.gpu_times.shape == (1, 3)
assert (perf.cpu_times == 1.).all()
assert (perf.gpu_times == 2.5).all()
def test_benchmark_kwargs(self):
x = cupy.random.rand(5)
profiler.benchmark(
cupy.nonzero, kwargs={'a': x}, n_repeat=1, n_warmup=1)
class TestPerfCaseResult(unittest.TestCase):
def test_show_gpu(self):
times = numpy.array([
[5.4, 7.1, 6.0, 5.4, 4.2],
[6.4, 4.3, 8.9, 9.6, 3.8],
]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
expected = (
'test_name_xxx :'
' CPU: 5.620 us +/- 0.943 '
'(min: 4.200 / max: 7.100) us '
' GPU-0: 6.600 us +/- 2.344 '
'(min: 3.800 / max: 9.600) us'
)
assert str(perf) == expected
def test_no_show_gpu(self):
times = numpy.array([
[5.4, 7.1, 6.0, 5.4, 4.2],
[6.4, 4.3, 8.9, 9.6, 3.8],
]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
expected = (
'test_name_xxx :'
' CPU: 5.620 us +/- 0.943 '
'(min: 4.200 / max: 7.100) us'
)
assert perf.to_str() == expected
# Checks if the result does not change.
assert perf.to_str() == expected
def test_single_show_gpu(self):
times = numpy.array([[5.4], [6.4]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
assert str(perf) == ('test_name_xxx : CPU: 5.400 us '
' GPU-0: 6.400 us')
def test_single_no_show_gpu(self):
times = numpy.array([[5.4], [6.4]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
assert perf.to_str() == 'test_name_xxx : CPU: 5.400 us'
def test_show_multigpu(self):
times = numpy.array([[5.4], [6.4], [7.0], [8.1]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0, 1, 2))
assert str(perf) == ('test_name_xxx : CPU: 5.400 us '
' GPU-0: 6.400 us '
' GPU-1: 7.000 us '
' GPU-2: 8.100 us')
|
2,456 |
to class
|
import pickle
import cv2
import numpy as np
from cv_bridge import CvBridge
from mil_ros_tools import BagCrawler, CvDebug
from .HOG_descriptor import HOGDescriptor
from .SVM_classifier import SVMClassifier
___author___ = "Tess Bianchi"
class Config:
def __init__(self):
self.classes = ["totem", "scan_the_code", "nothing", "shooter"]
self.classifier = SVMClassifier()
self.descriptor = HOGDescriptor()
self.bridge = CvBridge()
self.MAX_SIZE = 74
self.IMAGE_SIZE = 100
def METHOD_NAME(self, val):
return self.classes[val]
def to_val(self, clss):
for i, c in enumerate(self.classes):
if c in clss:
return i
def get_imgs(self, val):
roi = pickle.load(open(val, "rb"))
imgs = []
rois_vals = []
rois = []
print(roi)
for b in roi.bag_to_rois:
frames = roi.bag_to_rois[b]
bc = BagCrawler(b)
topic = bc.image_topics[0]
bc_crawl = bc.crawl(topic)
print(b)
for frame in frames:
img = next(bc_crawl)
img = self.bridge.imgmsg_to_cv2(img, "bgr8")
imgs.append(img)
a = []
for clss in frame:
r = frame[clss]
myroi = img[r[1] : r[1] + r[3], r[0] : r[0] + r[2]]
myroi = self._resize_image(myroi)
clss = self.to_val(clss)
rois.append((myroi, clss))
a.append((r, myroi, clss))
rois_vals.append(a)
return imgs, rois_vals, rois
def _resize_image(self, img):
h, w, r = img.shape
if h > w:
nh = self.MAX_SIZE
nw = nh * w / h
else:
nw = self.MAX_SIZE
nh = nw * h / w
img = cv2.resize(img, (nw, nh))
# return img
rep = np.ones(nw, dtype=np.int64)
reph = np.ones(nh, dtype=np.int64)
emtpy_slots = self.IMAGE_SIZE - nw
empty_slots_h = self.IMAGE_SIZE - nh
half_empty_slots = emtpy_slots / 2 + 1
half_empty_slots_h = empty_slots_h / 2 + 1
reph[0] = half_empty_slots_h
reph[-1] = half_empty_slots_h
rep[0] = half_empty_slots
rep[-1] = half_empty_slots
if emtpy_slots % 2 == 1:
rep[-1] += 1
if empty_slots_h % 2 == 1:
reph[-1] += 1
img = np.repeat(img, reph, axis=0)
return np.repeat(img, rep, axis=1)
class Training:
def __init__(self, roi_file, output):
self.config = Config()
self.output = output
self.roi_file = roi_file
def train(self):
descs = []
classify = []
imgs, roi_val, rois = self.config.get_imgs(self.roi_file)
for r in rois:
roi, clss = r
desc = self.config.descriptor.get_descriptor(roi)
desc = desc.flatten()
descs.append(desc)
classify.append(clss)
print(clss)
descs = np.array(descs)
classify = np.array(classify)
counts = {x: list(classify).count(x) for x in set(classify)}
counts = {self.config.METHOD_NAME(k): v for k, v in counts.items()}
print(counts)
self.config.classifier.train(descs, classify)
self.config.classifier.pickle("train.p")
# class Classifier(object):
class ClassiferTest:
def __init__(self, roi_file, class_file):
self.config = Config()
self.roi_file = roi_file
self.classifier = pickle.load(open(class_file, "rb"))
self.debug = CvDebug()
def classify(self):
print(self.roi_file)
imgs, roi_val, rois = self.config.get_imgs(self.roi_file)
for i, frames in enumerate(roi_val):
img = imgs[i]
draw = img.copy()
for roi in frames:
myroi, roi_img, tru_clss = roi
desc = self.config.descriptor.get_descriptor(roi_img)
desc = desc.flatten()
clss, prob = self.classifier.classify(desc)
clss = self.config.METHOD_NAME(clss)
cv2.rectangle(
draw,
(myroi[0], myroi[1]),
(myroi[0] + myroi[2], myroi[1] + myroi[3]),
(0, 0, 255),
)
cv2.putText(
draw,
clss + ": " + str(prob),
(myroi[0], myroi[1]),
1,
1.0,
(0, 255, 0),
)
# self.debug.add_image(draw, topic="roi")
cv2.imshow("roi", draw)
cv2.waitKey(33)
if __name__ == "__main__":
t = Training("roi_competition.p", "train_competition.p")
t.train()
print("done")
# c = ClassiferTest("val_roi.p", "train.p")
# c.classify()
|
2,457 |
decrement quota
|
from viur.core import current, db, errors, utils
from viur.core.tasks import PeriodicTask, DeleteEntitiesIter
from typing import Literal, Union
from datetime import timedelta
class RateLimit(object):
"""
This class is used to restrict access to certain functions to *maxRate* calls per minute.
Usage: Create an instance of this object in you modules __init__ function. then call
isQuotaAvailable before executing the action to check if there is quota available and
after executing the action decrementQuota.
"""
rateLimitKind = "viur-ratelimit"
def __init__(self, resource: str, maxRate: int, minutes: int, method: Literal["ip", "user"]):
"""
Initializes a new RateLimit gate.
:param resource: Name of the resource to protect
:param maxRate: Amount of tries allowed in the give time-span
:param minutes: Length of the time-span in minutes
:param method: Lock by IP or by the current user
"""
super(RateLimit, self).__init__()
self.resource = resource
self.maxRate = maxRate
self.minutes = minutes
self.steps = min(minutes, 5)
self.secondsPerStep = 60 * (float(minutes) / float(self.steps))
assert method in ["ip", "user"], "method must be 'ip' or 'user'"
self.useUser = method == "user"
def _getEndpointKey(self) -> Union[db.Key, str]:
"""
:warning:
It's invalid to call _getEndpointKey if method is set to user and there's no user logged in!
:return: the key associated with the current endpoint (it's IP or the key of the current user)
"""
if self.useUser:
user = current.user.get()
assert user, "Cannot decrement usage from guest!"
return user["key"]
else:
remoteAddr = current.request.get().request.remote_addr
if "::" in remoteAddr: # IPv6 in shorted form
remoteAddr = remoteAddr.split(":")
blankIndex = remoteAddr.index("")
missigParts = ["0000"] * (8 - len(remoteAddr))
remoteAddr = remoteAddr[:blankIndex] + missigParts + remoteAddr[blankIndex + 1:]
return ":".join(remoteAddr[:4])
elif ":" in remoteAddr: # It's IPv6, so we remove the last 64 bits (interface id)
# as it is easily controlled by the user
return ":".join(remoteAddr.split(":")[:4])
else: # It's IPv4, simply return that address
return remoteAddr
def _getCurrentTimeKey(self) -> str:
"""
:return: the current lockperiod used in second position of the memcache key
"""
dateTime = utils.utcNow()
key = dateTime.strftime("%Y-%m-%d-%%s")
secsinceMidnight = (dateTime - dateTime.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
currentStep = int(secsinceMidnight / self.secondsPerStep)
return key % currentStep
def METHOD_NAME(self) -> None:
"""
Removes one attempt from the pool of available Quota for that user/ip
"""
def updateTxn(cacheKey: str) -> None:
key = db.Key(self.rateLimitKind, cacheKey)
obj = db.Get(key)
if obj is None:
obj = db.Entity(key)
obj["value"] = 0
obj["value"] += 1
obj["expires"] = utils.utcNow() + timedelta(minutes=2 * self.minutes)
db.Put(obj)
lockKey = "%s-%s-%s" % (self.resource, self._getEndpointKey(), self._getCurrentTimeKey())
db.RunInTransaction(updateTxn, lockKey)
def isQuotaAvailable(self) -> bool:
"""
Checks if there's currently quota available for the current user/ip
:return: True if there's quota available, False otherwise
"""
endPoint = self._getEndpointKey()
currentDateTime = utils.utcNow()
secSinceMidnight = (currentDateTime - currentDateTime.replace(hour=0, minute=0, second=0,
microsecond=0)).total_seconds()
currentStep = int(secSinceMidnight / self.secondsPerStep)
keyBase = currentDateTime.strftime("%Y-%m-%d-%%s")
cacheKeys = []
for x in range(0, self.steps):
cacheKeys.append(
db.Key(self.rateLimitKind, "%s-%s-%s" % (self.resource, endPoint, keyBase % (currentStep - x))))
tmpRes = db.Get(cacheKeys)
return sum([x["value"] for x in tmpRes if x and currentDateTime < x["expires"]]) <= self.maxRate
def assertQuotaIsAvailable(self, setRetryAfterHeader: bool = True) -> bool:
"""Assert quota is available.
If not quota is available a :class:`viur.core.errors.TooManyRequests`
exception will be raised.
:param setRetryAfterHeader: Set the Retry-After header on the
current request response, if the quota is exceeded.
:return: True if quota is available.
:raises: :exc:`viur.core.errors.TooManyRequests`, if no quote is available.
"""
if self.isQuotaAvailable():
return True
if setRetryAfterHeader:
current.request.get().response.headers["Retry-After"] = str(self.maxRate * 60)
raise errors.TooManyRequests(
f"{self.steps} requests allowed per {self.maxRate} minute(s). Try again later."
)
@PeriodicTask(60)
def cleanOldRateLocks(*args, **kwargs) -> None:
DeleteEntitiesIter.startIterOnQuery(db.Query(RateLimit.rateLimitKind).filter("expires <", utils.utcNow()))
|
2,458 |
program listing
|
"""Learner dashboard views"""
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from rest_framework import permissions, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from lms.djangoapps.learner_dashboard.utils import masters_program_tab_view_is_enabled, is_enrolled_or_staff
from common.djangoapps.edxmako.shortcuts import render_to_response
from lms.djangoapps.learner_dashboard.programs import (
ProgramDetailsFragmentView,
ProgramDiscussionLTI,
ProgramsFragmentView, ProgramLiveLTI
)
from lms.djangoapps.program_enrollments.rest_api.v1.utils import ProgramSpecificViewMixin
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.lib.api.authentication import BearerAuthentication
@login_required
@require_GET
def METHOD_NAME(request):
"""View a list of programs in which the user is engaged."""
programs_config = ProgramsApiConfig.current()
programs_fragment = ProgramsFragmentView().render_to_fragment(request, programs_config=programs_config)
context = {
'disable_courseware_js': True,
'programs_fragment': programs_fragment,
'nav_hidden': True,
'show_dashboard_tabs': True,
'show_program_listing': programs_config.enabled,
'uses_bootstrap': True,
}
return render_to_response('learner_dashboard/programs.html', context)
@login_required
@require_GET
def program_details(request, program_uuid):
"""View details about a specific program."""
programs_config = ProgramsApiConfig.current()
program_fragment = ProgramDetailsFragmentView().render_to_fragment(
request, program_uuid, programs_config=programs_config
)
context = {
'program_fragment': program_fragment,
'show_program_listing': programs_config.enabled,
'show_dashboard_tabs': True,
'nav_hidden': True,
'disable_courseware_js': True,
'uses_bootstrap': True,
}
return render_to_response('learner_dashboard/program_details.html', context)
class ProgramDiscussionIframeView(APIView, ProgramSpecificViewMixin):
"""
A view for retrieving Program Discussion IFrame .
Path: ``/dashboard/programs/{program_uuid}/discussion/``
Accepts: [GET]
------------------------------------------------------------------------------------
GET
------------------------------------------------------------------------------------
**Returns**
* 200: OK - Contains a program discussion iframe.
* 401: The requesting user is not authenticated.
* 403: The requesting user lacks access to the program.
* 404: The requested program does not exist.
**Response**
In the case of a 200 response code, the response will be iframe HTML and status if discussion is configured
for the program.
**Example**
{
'tab_view_enabled': True,
'discussion': {
"iframe": "
<iframe
id='lti-tab-embed'
style='width: 100%; min-height: 800px; border: none'
srcdoc='{srcdoc}'
>
</iframe>
",
"configured": false
}
}
"""
authentication_classes = (JwtAuthentication, BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, program_uuid):
""" GET handler """
if not is_enrolled_or_staff(request, program_uuid):
default_response = {
'tab_view_enabled': False,
'discussion': {
'configured': False,
'iframe': ''
}
}
return Response(default_response, status=status.HTTP_200_OK)
program_discussion_lti = ProgramDiscussionLTI(program_uuid, request)
response_data = {
'tab_view_enabled': masters_program_tab_view_is_enabled(),
'discussion': {
'iframe': program_discussion_lti.render_iframe(),
'configured': program_discussion_lti.is_configured,
}
}
return Response(response_data, status=status.HTTP_200_OK)
class ProgramLiveIframeView(APIView, ProgramSpecificViewMixin):
"""
A view for retrieving Program live IFrame .
Path: ``/dashboard/programs/{program_uuid}/live/``
Accepts: [GET]
------------------------------------------------------------------------------------
GET
------------------------------------------------------------------------------------
**Returns**
* 200: OK - Contains a program live zoom iframe.
* 401: The requesting user is not authenticated.
* 403: The requesting user lacks access to the program.
* 404: The requested program does not exist.
**Response**
In the case of a 200 response code, the response will be iframe HTML and status if discussion is configured
for the program.
**Example**
{
'tab_view_enabled': True,
'live': {
"iframe": "
<iframe
id='lti-tab-embed'
style='width: 100%; min-height: 800px; border: none'
srcdoc='{srcdoc}'
>
</iframe>
",
"configured": false
}
}
"""
authentication_classes = (JwtAuthentication, BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, program_uuid):
""" GET handler """
if not is_enrolled_or_staff(request, program_uuid):
default_response = {
'tab_view_enabled': False,
'live': {
'configured': False,
'iframe': ''
}
}
return Response(default_response, status=status.HTTP_200_OK)
program_live_lti = ProgramLiveLTI(program_uuid, request)
response_data = {
'tab_view_enabled': masters_program_tab_view_is_enabled(),
'live': {
'iframe': program_live_lti.render_iframe(),
'configured': program_live_lti.is_configured,
}
}
return Response(response_data, status=status.HTTP_200_OK)
|
2,459 |
test quaternion to rotation matrix y
|
import pytest
import numpy as np
from paz.backend.groups import homogenous_quaternion_to_rotation_matrix
from paz.backend.groups import quaternion_to_rotation_matrix
from paz.backend.groups import rotation_vector_to_rotation_matrix
from paz.backend.groups import to_affine_matrix
from paz.backend.groups import build_rotation_matrix_x
from paz.backend.groups import build_rotation_matrix_y
from paz.backend.groups import build_rotation_matrix_z
from paz.backend.groups import compute_norm_SO3
from paz.backend.groups import calculate_canonical_rotation
from paz.backend.groups import rotation_matrix_to_axis_angle
from paz.backend.groups import rotation_matrix_to_compact_axis_angle
@pytest.fixture
def rotation_matrix_X_HALF_PI():
rotation_matrix = np.array([[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
return rotation_matrix
@pytest.fixture
def rotation_matrix_Y_HALF_PI():
rotation_matrix = np.array([[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0]])
return rotation_matrix
@pytest.fixture
def rotation_matrix_Z_HALF_PI():
rotation_matrix = np.array([[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
return rotation_matrix
def test_homogenous_quaternion_to_rotation_matrix_identity():
quaternion = np.array([0.0, 0.0, 0.0, 1.0])
matrix = homogenous_quaternion_to_rotation_matrix(quaternion)
assert np.allclose(np.eye(3), matrix)
def test_homogenous_quaternion_to_rotation_matrix_Z(rotation_matrix_Z_HALF_PI):
quaternion = np.array([0, 0, 0.7071068, 0.7071068])
matrix = homogenous_quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_Z_HALF_PI, matrix)
def test_homogenous_quaternion_to_rotation_matrix_Y(rotation_matrix_Y_HALF_PI):
quaternion = np.array([0, 0.7071068, 0.0, 0.7071068])
matrix = homogenous_quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_Y_HALF_PI, matrix)
def test_homogenous_quaternion_to_rotation_matrix_X(rotation_matrix_X_HALF_PI):
quaternion = np.array([0.7071068, 0.0, 0.0, 0.7071068])
matrix = homogenous_quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_X_HALF_PI, matrix)
def test_quaternion_to_rotation_matrix_identity():
quaternion = np.array([0.0, 0.0, 0.0, 1.0])
matrix = quaternion_to_rotation_matrix(quaternion)
assert np.allclose(np.eye(3), matrix)
def test_quaternion_to_rotation_matrix_Z(rotation_matrix_Z_HALF_PI):
quaternion = np.array([0, 0, 0.7071068, 0.7071068])
matrix = quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_Z_HALF_PI, matrix)
def METHOD_NAME(rotation_matrix_Y_HALF_PI):
quaternion = np.array([0, 0.7071068, 0.0, 0.7071068])
matrix = quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_Y_HALF_PI, matrix)
def test_quaternion_to_rotation_matrix_X(rotation_matrix_X_HALF_PI):
quaternion = np.array([0.7071068, 0.0, 0.0, 0.7071068])
matrix = quaternion_to_rotation_matrix(quaternion)
assert np.allclose(rotation_matrix_X_HALF_PI, matrix)
def test_rotation_vector_to_rotation_matrix_identity():
rotation_vector = np.array([0.0, 0.0, 0.0])
matrix = rotation_vector_to_rotation_matrix(rotation_vector)
assert np.allclose(np.eye(3), matrix)
def test_rotation_vector_to_rotation_matrix_Z(rotation_matrix_Z_HALF_PI):
rotation_vector = np.array([0.0, 0.0, np.pi / 2.0])
matrix = rotation_vector_to_rotation_matrix(rotation_vector)
assert np.allclose(rotation_matrix_Z_HALF_PI, matrix)
def test_rotation_vector_to_rotation_matrix_Y(rotation_matrix_Y_HALF_PI):
rotation_vector = np.array([0.0, np.pi / 2.0, 0.0])
matrix = rotation_vector_to_rotation_matrix(rotation_vector)
assert np.allclose(rotation_matrix_Y_HALF_PI, matrix)
def test_rotation_vector_to_rotation_matrix_X(rotation_matrix_X_HALF_PI):
rotation_vector = np.array([np.pi / 2.0, 0.0, 0.0])
matrix = rotation_vector_to_rotation_matrix(rotation_vector)
assert np.allclose(rotation_matrix_X_HALF_PI, matrix)
def test_to_affine_matrix_identity():
rotation_matrix = np.eye(3)
translation = np.zeros(3)
matrix = to_affine_matrix(rotation_matrix, translation)
assert np.allclose(matrix, np.eye(4))
def test_to_affine_matrix():
rotation_matrix = np.array([[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
translation = np.array([3.0, 1.2, 3.0])
matrix = to_affine_matrix(rotation_matrix, translation)
affine_matrix = np.array([[1.0, 0.0, 0.0, 3.0],
[0.0, 0.0, -1.0, 1.2],
[0.0, 1.0, 0.0, 3.0],
[0.0, 0.0, 0.0, 1.0]])
assert np.allclose(affine_matrix, matrix)
def test_build_rotation_matrix_x(rotation_matrix_X_HALF_PI):
angle = np.pi / 2.0
matrix = build_rotation_matrix_x(angle)
assert np.allclose(matrix, rotation_matrix_X_HALF_PI)
def test_build_rotation_matrix_y(rotation_matrix_Y_HALF_PI):
angle = np.pi / 2.0
matrix = build_rotation_matrix_y(angle)
assert np.allclose(matrix, rotation_matrix_Y_HALF_PI)
def test_build_rotation_matrix_z(rotation_matrix_Z_HALF_PI):
angle = np.pi / 2.0
matrix = build_rotation_matrix_z(angle)
assert np.allclose(matrix, rotation_matrix_Z_HALF_PI)
def test_compute_norm_SO3_X(rotation_matrix_X_HALF_PI):
norm = compute_norm_SO3(np.eye(3), rotation_matrix_X_HALF_PI)
assert np.allclose(norm, 2.0)
def test_compute_norm_SO3_Y(rotation_matrix_Y_HALF_PI):
norm = compute_norm_SO3(np.eye(3), rotation_matrix_Y_HALF_PI)
assert np.allclose(norm, 2.0)
def test_compute_norm_SO3_Z(rotation_matrix_Z_HALF_PI):
norm = compute_norm_SO3(np.eye(3), rotation_matrix_Z_HALF_PI)
assert np.allclose(norm, 2.0)
def test_compute_norm_SO3_identity():
norm = compute_norm_SO3(np.eye(3), np.eye(3))
assert np.allclose(norm, 0.0)
def test_compute_norm_SO3_X_to_Z(rotation_matrix_X_HALF_PI,
rotation_matrix_Z_HALF_PI):
norm = compute_norm_SO3(rotation_matrix_X_HALF_PI,
rotation_matrix_Z_HALF_PI)
assert np.allclose(norm, 2.449489742783178)
def test_calculate_canonical_rotation(rotation_matrix_X_HALF_PI):
X_PI = np.matmul(rotation_matrix_X_HALF_PI, rotation_matrix_X_HALF_PI)
rotations = [X_PI, rotation_matrix_X_HALF_PI]
canonical_rotation = calculate_canonical_rotation(np.eye(3), rotations)
assert np.allclose(
canonical_rotation, np.linalg.inv(rotation_matrix_X_HALF_PI))
@pytest.fixture
def rotation_matrix():
rotation_matrix = np.array([[0.99394977, -0.02341585, -0.10731083],
[0.02910355, 0.9982362, 0.05174612],
[0.10590983, -0.05455617, 0.99287811]])
return rotation_matrix
@pytest.mark.parametrize(
"axis_angle", [[-0.43571813, -0.87396149, 0.21526963, 0.12228879]])
def test_rotation_matrix_to_axis_angle(rotation_matrix, axis_angle):
estimated_axis_angle = rotation_matrix_to_axis_angle(rotation_matrix)
assert np.allclose(axis_angle, estimated_axis_angle)
@pytest.mark.parametrize(
"compact_axis_angle", [[-0.05328344, -0.10687569, 0.02632506]])
def test_rotation_matrix_to_compact_axis_angle(
rotation_matrix, compact_axis_angle):
estimated_compact_axis_angle = rotation_matrix_to_compact_axis_angle(
rotation_matrix)
assert np.allclose(compact_axis_angle, estimated_compact_axis_angle
|
2,460 |
test log output strided
|
# Data Parallel Control (dpctl)
#
# Copyright 2020-2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
from numpy.testing import assert_equal
import dpctl.tensor as dpt
from dpctl.tests.helper import get_queue_or_skip, skip_if_dtype_not_supported
from .utils import _all_dtypes, _map_to_device_dtype, _usm_types
@pytest.mark.parametrize("dtype", _all_dtypes)
def test_log_out_type(dtype):
q = get_queue_or_skip()
skip_if_dtype_not_supported(dtype, q)
X = dpt.asarray(1, dtype=dtype, sycl_queue=q)
expected_dtype = np.log10(np.array(1, dtype=dtype)).dtype
expected_dtype = _map_to_device_dtype(expected_dtype, q.sycl_device)
assert dpt.log10(X).dtype == expected_dtype
@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"])
def test_log_output_contig(dtype):
q = get_queue_or_skip()
skip_if_dtype_not_supported(dtype, q)
n_seq = 1027
X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q)
Xnp = dpt.asnumpy(X)
Y = dpt.log10(X)
tol = 8 * dpt.finfo(Y.dtype).resolution
np.testing.assert_allclose(
dpt.asnumpy(Y), np.log10(Xnp), atol=tol, rtol=tol
)
@pytest.mark.parametrize("dtype", ["f2", "f4", "f8", "c8", "c16"])
def METHOD_NAME(dtype):
q = get_queue_or_skip()
skip_if_dtype_not_supported(dtype, q)
n_seq = 2 * 1027
X = dpt.linspace(1, 13, num=n_seq, dtype=dtype, sycl_queue=q)[::-2]
Xnp = dpt.asnumpy(X)
Y = dpt.log10(X)
tol = 8 * dpt.finfo(Y.dtype).resolution
np.testing.assert_allclose(
dpt.asnumpy(Y), np.log10(Xnp), atol=tol, rtol=tol
)
@pytest.mark.parametrize("usm_type", _usm_types)
def test_log_usm_type(usm_type):
q = get_queue_or_skip()
arg_dt = np.dtype("f4")
input_shape = (10, 10, 10, 10)
X = dpt.empty(input_shape, dtype=arg_dt, usm_type=usm_type, sycl_queue=q)
X[..., 0::2] = 4 * dpt.e
X[..., 1::2] = 10 * dpt.e
Y = dpt.log10(X)
assert Y.usm_type == X.usm_type
assert Y.sycl_queue == X.sycl_queue
assert Y.flags.c_contiguous
expected_Y = np.empty(input_shape, dtype=arg_dt)
expected_Y[..., 0::2] = np.log10(np.float32(4 * dpt.e))
expected_Y[..., 1::2] = np.log10(np.float32(10 * dpt.e))
tol = 8 * dpt.finfo(Y.dtype).resolution
np.testing.assert_allclose(dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol)
@pytest.mark.parametrize("dtype", _all_dtypes)
def test_log_order(dtype):
q = get_queue_or_skip()
skip_if_dtype_not_supported(dtype, q)
arg_dt = np.dtype(dtype)
input_shape = (10, 10, 10, 10)
X = dpt.empty(input_shape, dtype=arg_dt, sycl_queue=q)
X[..., 0::2] = 4 * dpt.e
X[..., 1::2] = 10 * dpt.e
for ord in ["C", "F", "A", "K"]:
for perms in itertools.permutations(range(4)):
U = dpt.permute_dims(X[:, ::-1, ::-1, :], perms)
Y = dpt.log10(U, order=ord)
expected_Y = np.log10(dpt.asnumpy(U))
tol = 8 * max(
dpt.finfo(Y.dtype).resolution,
np.finfo(expected_Y.dtype).resolution,
)
np.testing.assert_allclose(
dpt.asnumpy(Y), expected_Y, atol=tol, rtol=tol
)
def test_log_special_cases():
q = get_queue_or_skip()
X = dpt.asarray(
[dpt.nan, -1.0, 0.0, -0.0, dpt.inf, -dpt.inf], dtype="f4", sycl_queue=q
)
Xnp = dpt.asnumpy(X)
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(dpt.asnumpy(dpt.log10(X)), np.log10(Xnp))
|
2,461 |
get all background task specs
|
""" collections of wrapper function for helping you to create BackgroundTask
~~BackgroundTasks:Feature~~
"""
import inspect
import pkgutil
from typing import Callable, Type, Iterable, Tuple
from huey import RedisHuey
import portality.tasks
from portality import models, constants
from portality.background import BackgroundApi, BackgroundTask
from portality.core import app
from portality.decorators import write_required
from portality.tasks.redis_huey import long_running, main_queue, configure, schedule
TaskFactory = Callable[[models.BackgroundJob], BackgroundTask]
_queue_for_action = None
def get_queue_id_by_task_queue(task_queue: RedisHuey):
if task_queue is None:
return constants.BGJOB_QUEUE_ID_UNKNOWN
elif task_queue.name == long_running.name:
return constants.BGJOB_QUEUE_ID_LONG
elif task_queue.name == main_queue.name:
return constants.BGJOB_QUEUE_ID_MAIN
else:
app.logger.warning(f'unknown task_queue[{task_queue}]')
return constants.BGJOB_QUEUE_ID_UNKNOWN
def create_job(username, action,
queue_id=constants.BGJOB_QUEUE_ID_UNKNOWN,
task_queue: RedisHuey = None,
params=None):
""" Common way to create BackgroundJob
"""
job = models.BackgroundJob()
job.user = username
job.action = action
if params is not None:
job.params = params
if task_queue is not None:
queue_id = get_queue_id_by_task_queue(task_queue)
job.queue_id = queue_id
return job
def submit_by_bg_task_type(background_task: Type[BackgroundTask], **prepare_kwargs):
""" Common way to submit task by BackgroundTask Class
"""
user = app.config.get("SYSTEM_USERNAME")
job = background_task.prepare(user, **prepare_kwargs)
background_task.submit(job)
def execute_by_job_id(job_id, task_factory: TaskFactory):
""" Common way to execute BackgroundTask by job_id
"""
job = models.BackgroundJob.pull(job_id)
task = task_factory(job)
BackgroundApi.execute(task)
def execute_by_bg_task_type(bg_task_type: Type[BackgroundTask], **prepare_kwargs):
""" wrapper for execute by BackgroundTask
"""
user = app.config.get("SYSTEM_USERNAME")
job = bg_task_type.prepare(user, **prepare_kwargs)
task = bg_task_type(job)
BackgroundApi.execute(task)
return task
class RedisHueyTaskHelper:
def __init__(self, task_queue: RedisHuey, task_name: str):
self.task_queue = task_queue
self.task_name = task_name
@property
def queue_id(self):
return get_queue_id_by_task_queue(self.task_queue)
def register_schedule(self, fn):
fn = write_required(script=True)(fn)
fn = self.task_queue.periodic_task(schedule(self.task_name))(fn)
return fn
def register_execute(self, is_load_config=False):
def wrapper(fn):
if is_load_config:
conf = configure(self.task_name)
else:
conf = {}
fn = write_required(script=True)(fn)
fn = self.task_queue.task(**conf)(fn)
return fn
return wrapper
def _get_background_task_spec(module):
queue_id = None
task_name = None
bg_class = None
for n, member in inspect.getmembers(module):
if isinstance(member, RedisHuey):
queue_id = get_queue_id_by_task_queue(member)
elif (
inspect.isclass(member)
and issubclass(member, BackgroundTask)
and member != BackgroundTask
):
task_name = getattr(member, '__action__', None)
bg_class = member
if queue_id and task_name and bg_class:
return queue_id, task_name, bg_class
return None
def lookup_queue_for_action(action):
""" Find which queue an action is registered to, by action name """
""" Inspect the background tasks to find some useful details. Store in a singleton to reduce work. """
global _queue_for_action
if _queue_for_action is None:
_queue_for_action = {_action: _queue for _queue, _action, _class in METHOD_NAME()}
return _queue_for_action.get(action, constants.BGJOB_QUEUE_ID_UNKNOWN)
def METHOD_NAME() -> Iterable[Tuple[str, str, Type]]:
def _load_bgtask_safe(_mi):
try:
return _mi.module_finder.find_spec(_mi.name).loader.load_module(_mi.name)
except RuntimeError as e:
if 'No configuration for scheduled action' in str(e):
app.logger.warning(f'config for {_mi.name} not found')
return None
raise e
module_infos = (m for m in pkgutil.walk_packages(portality.tasks.__path__) if not m.ispkg)
modules = (_load_bgtask_safe(mi) for mi in module_infos)
modules = filter(None, modules)
bgspec_list = map(_get_background_task_spec, modules)
bgspec_list = filter(None, bgspec_list)
return bgspec_list
def get_value_safe(key, default_v, kwargs, default_cond_fn=None):
""" get value from kwargs and return default_v if condition match
"""
v = kwargs.get(key, default_v)
default_cond_fn = default_cond_fn or (lambda _v: _v is None)
if default_cond_fn(v):
v = default_v
return v
def submit_by_background_job(background_job, execute_fn):
""" Common way of `BackgroundTask.submit`
"""
background_job.save()
execute_fn.schedule(args=(background_job.id,), delay=10)
def create_execute_fn(redis_huey, task_factory: TaskFactory):
@redis_huey.task()
@write_required(script=True)
def _execute_fn(job_id):
execute_by_job_id(job_id, task_factory)
return _execute_fn
|
2,462 |
max pool node
|
import numpy as np
import onnx
##############
## Settings ##
##############
producer_name = "onnx-layer-zoo"
input_name = "x"
output_name = "y"
####################
## Helper methods ##
####################
def make_network(name, node, input_shape, output_shape, aux_nodes):
input = [onnx.helper.make_tensor_value_info(input_name, onnx.TensorProto.FLOAT, input_shape)]
output = [onnx.helper.make_tensor_value_info(output_name, onnx.TensorProto.FLOAT, output_shape)]
graph = onnx.helper.make_graph([node] + aux_nodes, name, input, output)
model = onnx.helper.make_model(graph, producer_name=producer_name)
print(f"Generated {name}.onnx")
onnx.save(model, f"{name}.onnx")
def make_constant_float_node(name, values):
value_array = np.array(values).astype(float)
return onnx.helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=onnx.helper.make_tensor(
name=name,
data_type=onnx.TensorProto.FLOAT,
dims=value_array.shape,
vals=value_array.flatten(),
),
)
def make_constant_int_node(name, values):
value_array = np.array(values).astype(int)
return onnx.helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT64,
dims=value_array.shape,
vals=value_array.flatten(),
),
)
############
## Layers ##
############
def constant_node():
values = np.array([[0, 0.5],[1, 1.5]], dtype=np.float32)
node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=[output_name],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return ("constant", node, [1], [2, 2], [])
def identity_node():
node = onnx.helper.make_node(
"Identity",
inputs=[input_name],
outputs=[output_name],
)
return ("identity", node, [2,2], [2, 2], [])
def reshape_node():
shape_node = make_constant_int_node("shape", [1,4])
node = onnx.helper.make_node(
"Reshape",
inputs=[input_name, "shape"],
outputs=[output_name],
)
return ("reshape", node, [2,2], [1,4], [shape_node])
def reshape_node_with_dimension_inference():
shape_node = make_constant_int_node("shape", [-1,4])
node = onnx.helper.make_node(
"Reshape",
inputs=[input_name, "shape"],
outputs=[output_name],
)
return ("reshape_with_dimension_inference", node, [2,2], [1,4], [shape_node])
def flatten_node():
node = onnx.helper.make_node(
"Flatten",
inputs=[input_name],
outputs=[output_name],
axis=2
)
return ("flatten", node, [2,2,2,1], [4, 2], [])
def transpose_node():
node = onnx.helper.make_node(
"Transpose",
inputs=[input_name],
outputs=[output_name],
perm=[1,0]
)
return ("transpose", node, [2,3], [3,2], [])
def batch_normalization_node():
scale = make_constant_float_node("scale", [0.5, 1, 2])
bias = make_constant_float_node("bias", [0, 1, 0])
mean = make_constant_float_node("mean", [5, 6, 7])
var = make_constant_float_node("var", [0.5, 0.5, 0.5])
node = onnx.helper.make_node(
"BatchNormalization",
inputs=[input_name, "scale", "bias", "mean", "var"],
outputs=[output_name],
)
return ("batchnorm", node, [1, 3, 2, 1], [1, 3, 2, 1], [scale, bias, mean, var])
def METHOD_NAME():
node = onnx.helper.make_node(
"MaxPool",
inputs=[input_name],
outputs=[output_name],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
)
input_shape = [1, 1, 4, 4]
output_shape = [1, 1, 2, 2]
return ("maxpool", node, input_shape, output_shape, [])
def conv_node():
weights = make_constant_float_node("weights_const", [
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
])
node = onnx.helper.make_node(
"Conv",
inputs=[input_name, "weights_const"],
outputs=[output_name],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[1, 1, 1, 1],
)
return ("conv", node, [1, 1, 5, 5], [1, 1, 5, 5], [weights])
def gemm_node():
mul_const_node = make_constant_float_node("mul_const", [[0.5, 1.0], [1.5, 2.0]])
add_const_node = make_constant_float_node("add_const", [[3, 4.5]])
node = onnx.helper.make_node(
"Gemm",
inputs=[input_name, "mul_const", "add_const"],
outputs=[output_name],
alpha=0.25,
beta=0.5,
transA=0,
transB=1,
)
return ("gemm", node, [2,2], [2,2], [mul_const_node, add_const_node])
def relu_node():
node = onnx.helper.make_node(
"Relu",
inputs=[input_name],
outputs=[output_name],
)
return ("relu", node, [2,2], [2,2], [])
def add_node():
const_node = make_constant_float_node("const", [[0.5, 1.0], [1.5, 2.0]])
node = onnx.helper.make_node(
"Add",
inputs=[input_name, "const"],
outputs=[output_name],
)
return ("add", node, [2,2], [2,2], [const_node])
def sub_node():
const_node = make_constant_float_node("const", np.array([[0.5, 1.0], [1.5, 2.0]]))
node = onnx.helper.make_node(
"Sub",
inputs=[input_name, "const"],
outputs=[output_name],
)
return ("sub", node, [2,2], [2,2], [const_node])
def matmul_node():
const_node = make_constant_float_node("const", np.array([[0.0, 0.5], [1.5, 2.0], [-1, -2]]))
node = onnx.helper.make_node(
"MatMul",
inputs=[input_name, "const"],
outputs=[output_name],
)
return ("matmul", node, [2,3], [2,2], [const_node])
def sigmoid_node():
node = onnx.helper.make_node(
"Sigmoid",
inputs=[input_name],
outputs=[output_name],
)
return ("sigmoid", node, [2,2], [2,2], [])
def tanh_node():
node = onnx.helper.make_node(
"Tanh",
inputs=[input_name],
outputs=[output_name],
)
return ("tanh", node, [2,2], [2,2], [])
##########
## Main ##
##########
if __name__ == "__main__":
make_network(*constant_node())
make_network(*identity_node())
make_network(*reshape_node())
make_network(*reshape_node_with_dimension_inference())
make_network(*flatten_node())
make_network(*transpose_node())
make_network(*batch_normalization_node())
make_network(*METHOD_NAME())
make_network(*conv_node())
make_network(*gemm_node())
make_network(*relu_node())
make_network(*add_node())
make_network(*sub_node())
make_network(*matmul_node())
make_network(*sigmoid_node())
make_network(*tanh_node()
|
2,463 |
html
|
import json
from typing import Dict, Optional, Union
from .explorer import Explorer
from .template import read_template, render_template
PLAYGROUND_HTML = read_template("playground.html")
SettingsDict = Dict[str, Union[str, int, bool, Dict[str, str]]]
class ExplorerPlayground(Explorer):
def __init__(
self,
title: str = "Ariadne GraphQL",
editor_cursor_shape: Optional[str] = None,
editor_font_family: Optional[str] = None,
editor_font_size: Optional[int] = None,
editor_reuse_headers: Optional[bool] = None,
editor_theme: Optional[str] = None,
general_beta_updates: Optional[bool] = None,
prettier_print_width: Optional[int] = None,
prettier_tab_width: Optional[int] = None,
prettier_use_tabs: Optional[bool] = None,
request_credentials: Optional[str] = None,
request_global_headers: Optional[Dict[str, str]] = None,
schema_polling_enable: Optional[bool] = None,
schema_polling_endpoint_filter: Optional[str] = None,
schema_polling_interval: Optional[int] = None,
schema_disable_comments: Optional[bool] = None,
tracing_hide_tracing_response: Optional[bool] = None,
tracing_tracing_supported: Optional[bool] = None,
query_plan_hide_query_plan_response: Optional[bool] = None,
) -> None:
settings = self.build_settings(
editor_cursor_shape=editor_cursor_shape,
editor_font_family=editor_font_family,
editor_font_size=editor_font_size,
editor_reuse_headers=editor_reuse_headers,
editor_theme=editor_theme,
general_beta_updates=general_beta_updates,
prettier_print_width=prettier_print_width,
prettier_tab_width=prettier_tab_width,
prettier_use_tabs=prettier_use_tabs,
request_credentials=request_credentials,
request_global_headers=request_global_headers,
schema_polling_enable=schema_polling_enable,
schema_polling_endpoint_filter=schema_polling_endpoint_filter,
schema_polling_interval=schema_polling_interval,
schema_disable_comments=schema_disable_comments,
tracing_hide_tracing_response=tracing_hide_tracing_response,
tracing_tracing_supported=tracing_tracing_supported,
query_plan_hide_query_plan_response=query_plan_hide_query_plan_response,
)
self.parsed_html = render_template(
PLAYGROUND_HTML,
{
"title": title,
"settings": json.dumps(settings) if settings else None,
},
)
def build_settings(
self,
editor_cursor_shape: Optional[str] = None,
editor_font_family: Optional[str] = None,
editor_font_size: Optional[int] = None,
editor_reuse_headers: Optional[bool] = None,
editor_theme: Optional[str] = None,
general_beta_updates: Optional[bool] = None,
prettier_print_width: Optional[int] = None,
prettier_tab_width: Optional[int] = None,
prettier_use_tabs: Optional[bool] = None,
request_credentials: Optional[str] = None,
request_global_headers: Optional[Dict[str, str]] = None,
schema_polling_enable: Optional[bool] = None,
schema_polling_endpoint_filter: Optional[str] = None,
schema_polling_interval: Optional[int] = None,
schema_disable_comments: Optional[bool] = None,
tracing_hide_tracing_response: Optional[bool] = None,
tracing_tracing_supported: Optional[bool] = None,
query_plan_hide_query_plan_response: Optional[bool] = None,
) -> SettingsDict:
settings: SettingsDict = {}
if editor_cursor_shape:
settings["editor.cursorShape"] = editor_cursor_shape
if editor_font_family:
settings["editor.fontFamily"] = editor_font_family
if editor_font_size:
settings["editor.fontSize"] = editor_font_size
if editor_reuse_headers is not None:
settings["editor.reuseHeaders"] = editor_reuse_headers
if editor_theme:
settings["editor.theme"] = editor_theme
if general_beta_updates is not None:
settings["general.betaUpdates"] = general_beta_updates
if prettier_print_width:
settings["prettier.printWidth"] = prettier_print_width
if prettier_tab_width:
settings["prettier.tabWidth"] = prettier_tab_width
if prettier_use_tabs is not None:
settings["prettier.useTabs"] = prettier_use_tabs
if request_credentials:
settings["request.credentials"] = request_credentials
if request_global_headers:
settings["request.globalHeaders"] = request_global_headers
if schema_polling_enable is not None:
settings["schema.polling.enable"] = schema_polling_enable
if schema_polling_endpoint_filter:
settings["schema.polling.endpointFilter"] = schema_polling_endpoint_filter
if schema_polling_interval:
settings["schema.polling.interval"] = schema_polling_interval
if schema_disable_comments is not None:
settings["schema.disableComments"] = schema_disable_comments
if tracing_hide_tracing_response is not None:
settings["tracing.hideTracingResponse"] = tracing_hide_tracing_response
if tracing_tracing_supported is not None:
settings["tracing.tracingSupported"] = tracing_tracing_supported
if query_plan_hide_query_plan_response is not None:
settings[
"queryPlan.hideQueryPlanResponse"
] = query_plan_hide_query_plan_response
return settings
def METHOD_NAME(self, _):
return self.parsed_html
|
2,464 |
id
|
from enum import IntFlag
from typing import Dict, List
from pyroute2 import MPTCP
from socket import AF_INET, AF_INET6
from lnst.Common.IpAddress import ipaddress, BaseIpAddress
class MPTCPFlags(IntFlag):
# via https://github.com/torvalds/linux/blob/9d31d2338950293ec19d9b095fbaa9030899dcb4/include/uapi/linux/mptcp.h#L73
MPTCP_PM_ADDR_FLAG_SIGNAL = (1 << 0)
MPTCP_PM_ADDR_FLAG_SUBFLOW = (1 << 1)
MPTCP_PM_ADDR_FLAG_BACKUP = (1 << 2)
class MPTCPEndpoint:
@classmethod
def from_netlink(cls, nl_mptcp_ep_msg):
"""
..code py
>>> r = mptcp.endpoint('show')[0]
>>> type(r)
<class 'pyroute2.netlink.generic.mptcp.mptcp_msg'>
>>> r
{'cmd': 3, 'version': 1, 'reserved': 0, 'attrs': [('MPTCP_PM_ATTR_ADDR', {'attrs': [('MPTCP_PM_ADDR_ATTR_FAMILY', 2), ('MPTCP_PM_ADDR_ATTR_ID', 5), ('MPTCP_PM_ADDR_ATTR_FLAGS', 1), ('MPTCP_PM_ADDR_ATTR_ADDR4', '192.168.202.1')]}, 32768)], 'header': {'length': 56, 'type': 27, 'flags': 2, 'sequence_number': 257, 'pid': 26782, 'error': None, 'target': 'localhost', 'stats': Stats(qsize=0, delta=0, delay=0)}}
>>> a = r.get_attr("MPTCP_PM_ATTR_ADDR")
>>> type(a)
<class 'pyroute2.netlink.generic.mptcp.mptcp_msg.pm_addr'>
>>> a
{'attrs': [('MPTCP_PM_ADDR_ATTR_FAMILY', 2), ('MPTCP_PM_ADDR_ATTR_ID', 5), ('MPTCP_PM_ADDR_ATTR_FLAGS', 1), ('MPTCP_PM_ADDR_ATTR_ADDR4', '192.168.202.1')]}
:param nl_mptcp_ep_msg: the netlink message from mptcp.endpoint('show')
:return:
"""
addr = nl_mptcp_ep_msg.get_attr("MPTCP_PM_ATTR_ADDR")
addr_attr = dict(addr['attrs'])
return cls(addr_attr)
def __init__(self, attr: Dict):
self._attr = attr
self._ip = None
self._flags = None
@property
def METHOD_NAME(self):
return self._attr['MPTCP_PM_ADDR_ATTR_ID']
@property
def ip_address(self):
if self._ip is None:
if self.ip_family == AF_INET:
self._ip = ipaddress(self._attr['MPTCP_PM_ADDR_ATTR_ADDR4'])
else:
self._ip = ipaddress(self._attr['MPTCP_PM_ADDR_ATTR_ADDR6'])
return self._ip
@property
def ip_family(self):
return self._attr['MPTCP_PM_ADDR_ATTR_FAMILY']
@property
def flags(self):
if self._flags is None:
self._flags = MPTCPFlags(self._attr['MPTCP_PM_ADDR_ATTR_FLAGS'])
return self._flags
@property
def is_signal(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_SIGNAL in self.flags
@property
def is_subflow(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_SUBFLOW in self.flags
@property
def is_backup(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_BACKUP in self.flags
class MPTCPManager:
def __init__(self):
self._mptcp = MPTCP()
self._endpoints = {}
@property
def endpoints(self):
self._endpoints = {}
nl_eps = self._mptcp.endpoint('show')
for nl_ep in nl_eps:
ep = MPTCPEndpoint.from_netlink(nl_ep)
self._endpoints[ep.METHOD_NAME] = ep
return self._endpoints
@property
def subflows(self):
nl_msg = self._mptcp.limits("show")[0]
return nl_msg.get_attr("MPTCP_PM_ATTR_SUBFLOWS")
@subflows.setter
def subflows(self, n):
self._mptcp.limits("set", subflows=n)
@property
def add_addr_accepted(self):
nl_msg = self._mptcp.limits("show")[0]
return nl_msg.get_attr("MPTCP_PM_ATTR_RCV_ADD_ADDRS")
@add_addr_accepted.setter
def add_addr_accepted(self, n):
self._mptcp.limits("set", add_addr_accepted=n)
def add_endpoints(self, endpoint_ips: List[BaseIpAddress], flags: MPTCPFlags):
for ip in endpoint_ips:
if ip.family == AF_INET:
self._mptcp.endpoint("add", addr4=str(ip), flags=flags)
elif ip.family == AF_INET6:
self._mptcp.endpoint("add", addr6=str(ip), flags=flags)
def delete_all(self):
r = self._mptcp.endpoint("flush")
return r
|
2,465 |
test implicit group by
|
from tests.testmodels import Author, Book
from tortoise.contrib import test
from tortoise.functions import Avg, Count, Sum, Upper
class TestGroupBy(test.TestCase):
async def asyncSetUp(self) -> None:
await super(TestGroupBy, self).asyncSetUp()
self.a1 = await Author.create(name="author1")
self.a2 = await Author.create(name="author2")
for i in range(10):
await Book.create(name=f"book{i}", author=self.a1, rating=i)
for i in range(5):
await Book.create(name=f"book{i}", author=self.a2, rating=i)
async def test_count_group_by(self):
ret = (
await Book.annotate(count=Count("id"))
.group_by("author_id")
.values("author_id", "count")
)
for item in ret:
author_id = item.get("author_id")
count = item.get("count")
if author_id == self.a1.pk:
self.assertEqual(count, 10)
elif author_id == self.a2.pk:
self.assertEqual(count, 5)
async def test_count_group_by_with_join(self):
ret = (
await Book.annotate(count=Count("id"))
.group_by("author__name")
.values("author__name", "count")
)
self.assertListSortEqual(
ret,
[{"author__name": "author1", "count": 10}, {"author__name": "author2", "count": 5}],
sorted_key="author__name",
)
async def test_count_filter_group_by(self):
ret = (
await Book.annotate(count=Count("id"))
.filter(count__gt=6)
.group_by("author_id")
.values("author_id", "count")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].get("count"), 10)
async def test_sum_group_by(self):
ret = (
await Book.annotate(sum=Sum("rating")).group_by("author_id").values("author_id", "sum")
)
for item in ret:
author_id = item.get("author_id")
sum_ = item.get("sum")
if author_id == self.a1.pk:
self.assertEqual(sum_, 45.0)
elif author_id == self.a2.pk:
self.assertEqual(sum_, 10.0)
async def test_sum_group_by_with_join(self):
ret = (
await Book.annotate(sum=Sum("rating"))
.group_by("author__name")
.values("author__name", "sum")
)
self.assertListSortEqual(
ret,
[{"author__name": "author1", "sum": 45.0}, {"author__name": "author2", "sum": 10.0}],
sorted_key="author__name",
)
async def test_sum_filter_group_by(self):
ret = (
await Book.annotate(sum=Sum("rating"))
.filter(sum__gt=11)
.group_by("author_id")
.values("author_id", "sum")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].get("sum"), 45.0)
async def test_avg_group_by(self):
ret = (
await Book.annotate(avg=Avg("rating")).group_by("author_id").values("author_id", "avg")
)
for item in ret:
author_id = item.get("author_id")
avg = item.get("avg")
if author_id == self.a1.pk:
self.assertEqual(avg, 4.5)
elif author_id == self.a2.pk:
self.assertEqual(avg, 2.0)
async def test_avg_group_by_with_join(self):
ret = (
await Book.annotate(avg=Avg("rating"))
.group_by("author__name")
.values("author__name", "avg")
)
self.assertListSortEqual(
ret,
[{"author__name": "author1", "avg": 4.5}, {"author__name": "author2", "avg": 2}],
sorted_key="author__name",
)
async def test_avg_filter_group_by(self):
ret = (
await Book.annotate(avg=Avg("rating"))
.filter(avg__gt=3)
.group_by("author_id")
.values_list("author_id", "avg")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][1], 4.5)
async def test_count_values_list_group_by(self):
ret = (
await Book.annotate(count=Count("id"))
.group_by("author_id")
.values_list("author_id", "count")
)
for item in ret:
author_id = item[0]
count = item[1]
if author_id == self.a1.pk:
self.assertEqual(count, 10)
elif author_id == self.a2.pk:
self.assertEqual(count, 5)
async def test_count_values_list_group_by_with_join(self):
ret = (
await Book.annotate(count=Count("id"))
.group_by("author__name")
.values_list("author__name", "count")
)
self.assertListSortEqual(ret, [("author1", 10), ("author2", 5)])
async def test_count_values_list_filter_group_by(self):
ret = (
await Book.annotate(count=Count("id"))
.filter(count__gt=6)
.group_by("author_id")
.values_list("author_id", "count")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][1], 10)
async def test_sum_values_list_group_by(self):
ret = (
await Book.annotate(sum=Sum("rating"))
.group_by("author_id")
.values_list("author_id", "sum")
)
for item in ret:
author_id = item[0]
sum_ = item[1]
if author_id == self.a1.pk:
self.assertEqual(sum_, 45.0)
elif author_id == self.a2.pk:
self.assertEqual(sum_, 10.0)
async def test_sum_values_list_group_by_with_join(self):
ret = (
await Book.annotate(sum=Sum("rating"))
.group_by("author__name")
.values_list("author__name", "sum")
)
self.assertListSortEqual(ret, [("author1", 45.0), ("author2", 10.0)])
async def test_sum_values_list_filter_group_by(self):
ret = (
await Book.annotate(sum=Sum("rating"))
.filter(sum__gt=11)
.group_by("author_id")
.values_list("author_id", "sum")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][1], 45.0)
async def test_avg_values_list_group_by(self):
ret = (
await Book.annotate(avg=Avg("rating"))
.group_by("author_id")
.values_list("author_id", "avg")
)
for item in ret:
author_id = item[0]
avg = item[1]
if author_id == self.a1.pk:
self.assertEqual(avg, 4.5)
elif author_id == self.a2.pk:
self.assertEqual(avg, 2.0)
async def test_avg_values_list_group_by_with_join(self):
ret = (
await Book.annotate(avg=Avg("rating"))
.group_by("author__name")
.values_list("author__name", "avg")
)
self.assertListSortEqual(ret, [("author1", 4.5), ("author2", 2.0)])
async def test_avg_values_list_filter_group_by(self):
ret = (
await Book.annotate(avg=Avg("rating"))
.filter(avg__gt=3)
.group_by("author_id")
.values_list("author_id", "avg")
)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][1], 4.5)
async def METHOD_NAME(self):
ret = await Author.annotate(count=Count("books")).filter(count__gt=6)
self.assertEqual(ret[0].count, 10)
async def test_group_by_annotate_result(self):
ret = (
await Book.annotate(upper_name=Upper("author__name"), count=Count("id"))
.group_by("upper_name")
.values("upper_name", "count")
)
self.assertListSortEqual(
ret,
[{"upper_name": "AUTHOR1", "count": 10}, {"upper_name": "AUTHOR2", "count": 5}],
sorted_key="upper_name",
)
|
2,466 |
test transform lattice
|
from __future__ import annotations
import unittest
from numpy.testing import assert_allclose
from pymatgen.symmetry.settings import JonesFaithfulTransformation, Lattice, SymmOp
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Apr 2017"
class TestJonesFaithfulTransformation(unittest.TestCase):
def setUp(self):
self.test_strings = [
"a,b,c;0,0,0", # identity
"a-b,a+b,2c;0,0,1/2",
"a/4+b/4-c/2,a/4-b/4,-a/2-b/2;0,0,0",
"a,b,c;1/4,1/2,3/4",
] # pure translation
self.test_Pps = [
([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 0, 0]),
([[1, 1, 0], [-1, 1, 0], [0, 0, 2]], [0, 0, 0.5]),
([[0.25, 0.25, -0.5], [0.25, -0.25, -0.5], [-0.5, 0, 0]], [0, 0, 0]),
([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0.25, 0.5, 0.75]),
]
def test_init(self):
for test_string, test_Pp in zip(self.test_strings, self.test_Pps):
jft = JonesFaithfulTransformation.from_transformation_string(test_string)
jft2 = JonesFaithfulTransformation(test_Pp[0], test_Pp[1])
assert_allclose(jft.P, jft2.P)
assert_allclose(jft.p, jft2.p)
assert test_string == jft.transformation_string
assert test_string == jft2.transformation_string
def test_inverse(self):
for test_string in self.test_strings:
jft = JonesFaithfulTransformation.from_transformation_string(test_string)
assert jft == jft.inverse.inverse
assert jft.transformation_string == jft.inverse.inverse.transformation_string
def METHOD_NAME(self):
lattice = Lattice.cubic(5)
all_ref_lattices = [
[[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]],
[[5.0, 5.0, 0.0], [-5.0, 5.0, 0.0], [0.0, 0.0, 10.0]],
[[1.25, 1.25, -2.5], [1.25, -1.25, -2.5], [-2.5, 0.0, 0.0]],
[[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]],
]
for ref_lattice, (P, p) in zip(all_ref_lattices, self.test_Pps):
jft = JonesFaithfulTransformation(P, p)
assert_allclose(jft.transform_lattice(lattice).matrix, ref_lattice)
def test_transform_coords(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
all_ref_coords = [
[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]],
[[0.0, 0.0, -0.25], [0.0, 0.5, 0.0]],
[[0.0, 0.0, 0.0], [-1.0, 0.0, -1.5]],
[[-0.25, -0.5, -0.75], [0.25, 0.0, -0.25]],
]
for ref_coords, (P, p) in zip(all_ref_coords, self.test_Pps):
jft = JonesFaithfulTransformation(P, p)
transformed_coords = jft.transform_coords(coords)
for coord, ref_coord in zip(transformed_coords, ref_coords):
assert_allclose(coord, ref_coord)
def test_transform_symmops(self):
# reference data for this test taken from GENPOS
# http://cryst.ehu.es/cryst/get_gen.html
# Fm-3m
input_symmops = """x,y,z
-x,-y,z
-x,y,-z
x,-y,-z
z,x,y
z,-x,-y
-z,-x,y
-z,x,-y
y,z,x
-y,z,-x
y,-z,-x
-y,-z,x
y,x,-z
-y,-x,-z
y,-x,z
-y,x,z
x,z,-y
-x,z,y
-x,-z,-y
x,-z,y
z,y,-x
z,-y,x
-z,y,x
-z,-y,-x
-x,-y,-z
x,y,-z
x,-y,z
-x,y,z
-z,-x,-y
-z,x,y
z,x,-y
z,-x,y
-y,-z,-x
y,-z,x
-y,z,x
y,z,-x
-y,-x,z
y,x,z
-y,x,-z
y,-x,-z
-x,-z,y
x,-z,-y
x,z,y
-x,z,-y
-z,-y,x
-z,y,-x
z,-y,-x
z,y,x"""
# Fm-3m transformed by (a-b,a+b,2c;0,0,1/2)
ref_transformed_symmops = """x,y,z
-x,-y,z
-y,-x,-z+1/2
y,x,-z+1/2
-1/2x-1/2y+z+1/4,1/2x+1/2y+z+1/4,-1/2x+1/2y+3/4
1/2x+1/2y+z+1/4,-1/2x-1/2y+z+1/4,1/2x-1/2y+3/4
1/2x+1/2y-z+3/4,-1/2x-1/2y-z+3/4,-1/2x+1/2y+3/4
-1/2x-1/2y-z+3/4,1/2x+1/2y-z+3/4,1/2x-1/2y+3/4
-1/2x+1/2y-z+3/4,-1/2x+1/2y+z+1/4,1/2x+1/2y+3/4
1/2x-1/2y-z+3/4,1/2x-1/2y+z+1/4,-1/2x-1/2y+3/4
-1/2x+1/2y+z+1/4,-1/2x+1/2y-z+3/4,-1/2x-1/2y+3/4
1/2x-1/2y+z+1/4,1/2x-1/2y-z+3/4,1/2x+1/2y+3/4
-x,y,-z+1/2
x,-y,-z+1/2
y,-x,z
-y,x,z
1/2x+1/2y-z+3/4,1/2x+1/2y+z+1/4,1/2x-1/2y+3/4
-1/2x-1/2y-z+3/4,-1/2x-1/2y+z+1/4,-1/2x+1/2y+3/4
-1/2x-1/2y+z+1/4,-1/2x-1/2y-z+3/4,1/2x-1/2y+3/4
1/2x+1/2y+z+1/4,1/2x+1/2y-z+3/4,-1/2x+1/2y+3/4
1/2x-1/2y+z+1/4,-1/2x+1/2y+z+1/4,-1/2x-1/2y+3/4
-1/2x+1/2y+z+1/4,1/2x-1/2y+z+1/4,1/2x+1/2y+3/4
1/2x-1/2y-z+3/4,-1/2x+1/2y-z+3/4,1/2x+1/2y+3/4
-1/2x+1/2y-z+3/4,1/2x-1/2y-z+3/4,-1/2x-1/2y+3/4
-x,-y,-z+1/2
x,y,-z+1/2
y,x,z
-y,-x,z
1/2x+1/2y-z+3/4,-1/2x-1/2y-z+3/4,1/2x-1/2y+3/4
-1/2x-1/2y-z+3/4,1/2x+1/2y-z+3/4,-1/2x+1/2y+3/4
-1/2x-1/2y+z+1/4,1/2x+1/2y+z+1/4,1/2x-1/2y+3/4
1/2x+1/2y+z+1/4,-1/2x-1/2y+z+1/4,-1/2x+1/2y+3/4
1/2x-1/2y+z+1/4,1/2x-1/2y-z+3/4,-1/2x-1/2y+3/4
-1/2x+1/2y+z+1/4,-1/2x+1/2y-z+3/4,1/2x+1/2y+3/4
1/2x-1/2y-z+3/4,1/2x-1/2y+z+1/4,1/2x+1/2y+3/4
-1/2x+1/2y-z+3/4,-1/2x+1/2y+z+1/4,-1/2x-1/2y+3/4
x,-y,z
-x,y,z
-y,x,-z+1/2
y,-x,-z+1/2
-1/2x-1/2y+z+1/4,-1/2x-1/2y-z+3/4,-1/2x+1/2y+3/4
1/2x+1/2y+z+1/4,1/2x+1/2y-z+3/4,1/2x-1/2y+3/4
1/2x+1/2y-z+3/4,1/2x+1/2y+z+1/4,-1/2x+1/2y+3/4
-1/2x-1/2y-z+3/4,-1/2x-1/2y+z+1/4,1/2x-1/2y+3/4
-1/2x+1/2y-z+3/4,1/2x-1/2y-z+3/4,1/2x+1/2y+3/4
1/2x-1/2y-z+3/4,-1/2x+1/2y-z+3/4,-1/2x-1/2y+3/4
-1/2x+1/2y+z+1/4,1/2x-1/2y+z+1/4,-1/2x-1/2y+3/4
1/2x-1/2y+z+1/4,-1/2x+1/2y+z+1/4,1/2x+1/2y+3/4"""
jft = JonesFaithfulTransformation.from_transformation_string(self.test_strings[1])
input_symmops = [SymmOp.from_xyz_string(s) for s in input_symmops.split()]
ref_transformed_symmops = [SymmOp.from_xyz_string(s) for s in ref_transformed_symmops.split()]
transformed_symmops = [jft.transform_symmop(op) for op in input_symmops]
for transformed_op, ref_transformed_op in zip(transformed_symmops, ref_transformed_symmops):
assert transformed_op == ref_transformed_op
|
2,467 |
serialize provider
|
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence
from sentry.api.serializers import Serializer, register, serialize
from sentry.integrations import IntegrationProvider
from sentry.models import Integration, OrganizationIntegration, User
from sentry.services.hybrid_cloud.integration import (
RpcIntegration,
RpcOrganizationIntegration,
integration_service,
)
from sentry.shared_integrations.exceptions import ApiError
from sentry.utils.json import JSONData
logger = logging.getLogger(__name__)
# converts the provider to JSON
def METHOD_NAME(provider: IntegrationProvider) -> Mapping[str, Any]:
return {
"key": provider.key,
"slug": provider.key,
"name": provider.name,
"canAdd": provider.can_add,
"canDisable": provider.can_disable,
"features": sorted(f.value for f in provider.features),
"aspects": provider.metadata.aspects,
}
@register(Integration)
class IntegrationSerializer(Serializer):
def serialize(
self, obj: RpcIntegration, attrs: Mapping[str, Any], user: User, **kwargs: Any
) -> MutableMapping[str, JSONData]:
provider = obj.get_provider()
return {
"id": str(obj.id),
"name": obj.name,
"icon": obj.metadata.get("icon"),
"domainName": obj.metadata.get("domain_name"),
"accountType": obj.metadata.get("account_type"),
"scopes": obj.metadata.get("scopes"),
"status": obj.get_status_display(),
"provider": METHOD_NAME(provider),
}
class IntegrationConfigSerializer(IntegrationSerializer):
def __init__(
self, organization_id: Optional[int] = None, params: Optional[Mapping[str, Any]] = None
) -> None:
self.organization_id = organization_id
self.params = params or {}
def serialize(
self,
obj: RpcIntegration,
attrs: Mapping[str, Any],
user: User,
include_config: bool = True,
**kwargs: Any,
) -> MutableMapping[str, JSONData]:
data = super().serialize(obj, attrs, user)
if not include_config:
return data
data.update({"configOrganization": []})
if not self.organization_id:
return data
try:
install = obj.get_installation(organization_id=self.organization_id)
except NotImplementedError:
# The integration may not implement a Installed Integration object
# representation.
pass
else:
data.update({"configOrganization": install.get_organization_config()})
# Query param "action" only attached in TicketRuleForm modal.
if self.params.get("action") == "create":
# This method comes from IssueBasicMixin within the integration's installation class
data["createIssueConfig"] = install.get_create_issue_config( # type: ignore
None, user, params=self.params
)
return data
@register(OrganizationIntegration)
class OrganizationIntegrationSerializer(Serializer):
def __init__(self, params: Optional[Mapping[str, Any]] = None) -> None:
self.params = params
def get_attrs(
self,
item_list: Sequence[RpcOrganizationIntegration],
user: User,
**kwargs: Any,
) -> MutableMapping[RpcOrganizationIntegration, MutableMapping[str, Any]]:
integrations = integration_service.get_integrations(
integration_ids=[item.integration_id for item in item_list]
)
integrations_by_id: Dict[int, RpcIntegration] = {i.id: i for i in integrations}
return {
item: {"integration": integrations_by_id[item.integration_id]} for item in item_list
}
def serialize(
self,
obj: RpcOrganizationIntegration,
attrs: Mapping[str, Any],
user: User,
include_config: bool = True,
) -> MutableMapping[str, JSONData]:
# XXX(epurkhiser): This is O(n) for integrations, especially since
# we're using the IntegrationConfigSerializer which pulls in the
# integration installation config object which very well may be making
# API request for config options.
integration: RpcIntegration = attrs.get("integration") # type: ignore
serialized_integration: MutableMapping[str, Any] = serialize(
objects=integration,
user=user,
serializer=IntegrationConfigSerializer(obj.organization_id, params=self.params),
include_config=include_config,
)
dynamic_display_information = None
config_data = None
try:
installation = integration.get_installation(organization_id=obj.organization_id)
except NotImplementedError:
# slack doesn't have an installation implementation
config_data = obj.config if include_config else None
else:
try:
# just doing this to avoid querying for an object we already have
installation._org_integration = obj
config_data = installation.get_config_data() if include_config else None # type: ignore
dynamic_display_information = installation.get_dynamic_display_information()
except ApiError as e:
# If there is an ApiError from our 3rd party integration
# providers, assume there is an problem with the configuration
# and set it to disabled.
serialized_integration.update({"status": "disabled"})
name = "sentry.serializers.model.organizationintegration"
log_info = {
"error": str(e),
"integration_id": integration.id,
"integration_provider": integration.provider,
}
logger.info(name, extra=log_info)
serialized_integration.update(
{
"configData": config_data,
"externalId": integration.external_id,
"organizationId": obj.organization_id,
"organizationIntegrationStatus": obj.get_status_display(),
"gracePeriodEnd": obj.grace_period_end,
}
)
if dynamic_display_information:
serialized_integration.update(
{"dynamicDisplayInformation": dynamic_display_information}
)
return serialized_integration
class IntegrationProviderSerializer(Serializer):
def serialize(
self, obj: IntegrationProvider, attrs: Mapping[str, Any], user: User, **kwargs: Any
) -> MutableMapping[str, JSONData]:
org_slug = kwargs.pop("organization").slug
metadata = obj.metadata
metadata = metadata and metadata._asdict() or None
return {
"key": obj.key,
"slug": obj.key,
"name": obj.name,
"metadata": metadata,
"canAdd": obj.can_add,
"canDisable": obj.can_disable,
"features": [f.value for f in obj.features],
"setupDialog": dict(
url=f"/organizations/{org_slug}/integrations/{obj.key}/setup/",
**obj.setup_dialog_config,
),
}
|
2,468 |
create product
|
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestGetWeight(TransactionCase):
"""Test get_weight functions."""
# some helpers
def _create_order(self, customer):
return self.env["sale.order"].create({"partner_id": customer.id})
def _create_order_line(self, order, products):
for product in products:
self.env["sale.order.line"].create(
{"product_id": product.id, "order_id": order.id}
)
def _create_ul(self):
vals = [
{"name": "Cardboard box", "type": "box", "weight": 0.200},
{"name": "Wood box", "type": "box", "weight": 1.30},
]
return [self.env["product.ul"].create(val) for val in vals]
def _create_operation(self, picking, values):
vals = {
"picking_id": picking.id,
"location_id": picking.location_id.id,
"location_dest_id": picking.location_dest_id.id,
}
vals.update(values)
return self.env["stock.move.line"].create(vals)
def METHOD_NAME(self, vals):
return self.env["product.product"].create(vals)
def _get_products(self, weights):
"""A recordset of products without any specific uom.
It means : no uom or kg or unit
Params:
weights: recordset will be size of weights and each
product will get a size according of weights[i]
"""
kg_id = self.env.ref("uom.product_uom_kgm").id
unit_id = self.env.ref("uom.product_uom_unit").id
products = self.env["product.product"].search(
[["uom_id", "in", (False, kg_id, unit_id)]], limit=len(weights)
)
for idx, product in enumerate(products):
# by default there is no weight on products
product.weight = weights[idx]
return products
def _generate_picking(self, products):
"""Create a picking from products."""
customer = self.env["res.partner"].search([], limit=1)
order = self._create_order(customer)
self._create_order_line(order, products)
order.action_confirm()
picking = order.picking_ids
picking.button_validate()
return picking
def test_get_weight(self):
"""Test quant.package.weight computed field and
pack.operation.get_weight."""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package.weight
self.assertEqual(package.weight, sum([product.weight for product in products]))
def test_total_weight(self):
"""Test quant.package.weight computed field when a total
weight is defined"""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
package.shipping_weight = 1542.0
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package.weight
self.assertEqual(package.weight, package.shipping_weight)
def test_get_weight_with_qty(self):
"""Ensure qty are taken in account."""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for idx, product in enumerate(products):
operations |= self._create_operation(
picking,
{
"product_uom_qty": idx, # nice one
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package._weight
self.assertEqual(
package.weight, sum([operation.get_weight() for operation in operations])
)
def test_get_weight_with_uom(self):
"""Check with differents uom."""
# prepare some data
weights = [0.3, 14.01, 0.59]
package = self.env["stock.quant.package"].create({})
tonne_id = self.env.ref("uom.product_uom_ton")
kg_id = self.env.ref("uom.product_uom_kgm")
gr_id = self.env.ref("uom.product_uom_gram")
products = []
products.append(
self.METHOD_NAME(
{
"name": "Expected Odoo dev documentation",
"uom_id": tonne_id.id,
"uom_po_id": tonne_id.id,
"weight": weights[0],
}
)
)
products.append(
self.METHOD_NAME(
{
"name": "OCA documentation",
"uom_id": kg_id.id,
"uom_po_id": kg_id.id,
"weight": weights[1],
}
)
)
products.append(
self.METHOD_NAME(
{
"name": "Actual Odoo dev documentation",
"uom_id": gr_id.id,
"uom_po_id": gr_id.id,
"weight": weights[2],
}
)
)
products_weight = (
weights[0] * 1000 + weights[1] * 1 + weights[2] * 0.01 # tonne # kg # g
)
picking = self._generate_picking(products)
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# because uom conversion is not implemented
self.assertEqual(package.weight, False)
# if one day, uom conversion is implemented:
# self.assertEqual(package.get_weight(), products_weight)
self.assertEqual(products_weight, products_weight) # flak8 warning
|
2,469 |
setup history
|
from __future__ import with_statement
import os.path
import sys
from warnings import warn
import java.lang.reflect.Array
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
try:
_console = sys._jy_console
_reader = _console.reader
except AttributeError:
raise ImportError("Cannot access JLineConsole reader")
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def METHOD_NAME():
# This is obviously not desirable, but avoids O(n) workarounds to
# modify the history (ipython uses the function
# remove_history_item to mutate the history relatively frequently)
global _history_list
history = _reader.history
try:
history_list_field = history.class.getDeclaredField("history")
history_list_field.setAccessible(True)
_history_list = history_list_field.get(history)
except:
pass
METHOD_NAME()
def parse_and_bind(string):
if string == "tab: complete":
try:
keybindings_field = _reader.class.getDeclaredField("keybindings")
keybindings_field.setAccessible(True)
keybindings = keybindings_field.get(_reader)
COMPLETE = _reader.KEYMAP_NAMES.get('COMPLETE')
if java.lang.reflect.Array.getShort(keybindings, 9) != COMPLETE:
java.lang.reflect.Array.setShort(keybindings, 9, COMPLETE)
except:
warn("Cannot bind tab key to complete. You need to do this in a .jlinebindings.properties file instead", SecurityWarning, stacklevel=2)
else:
warn("Cannot bind key %s. You need to do this in a .jlinebindings.properties file instead" % (string,), NotImplementedWarning, stacklevel=2)
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
print "Reading history:", filename
expanded = os.path.expanduser(filename)
new_history = _reader.getHistory().getClass()()
# new_history.clear()
with open(expanded) as f:
for line in f:
new_history.addToHistory(line.rstrip())
_reader.history = new_history
METHOD_NAME()
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.historyList:
f.write(line)
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.history.addToHistory(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return len(_reader.history.historyList)
def get_history_item(index):
# JLine indexes from 0 while readline indexes from 1 (at least in test_readline)
if index>0:
return _reader.history.historyList[index-1]
else:
return None
def remove_history_item(pos):
if _history_list:
_history_list.remove(pos)
else:
warn("Cannot remove history item at position: %s" % (pos,), SecurityWarning, stacklevel=2)
def replace_history_item(pos, line):
if _history_list:
_history_list.set(pos, line)
else:
warn("Cannot replace history item at position: %s" % (pos,), SecurityWarning, stacklevel=2)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
_console.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
for state in xrange(100): # TODO arbitrary, what's the number used by gnu readline?
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompletor(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
|
2,470 |
set volume
|
# ruff: noqa: ARG002
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, ClassVar, Literal, Optional
import pykka
from pykka.typing import ActorMemberMixin, proxy_field, proxy_method
from mopidy import listener
if TYPE_CHECKING:
from typing_extensions import TypeAlias
from mopidy.types import Percentage
logger = logging.getLogger(__name__)
class Mixer:
"""Audio mixer API.
If the mixer has problems during initialization it should raise
:exc:`mopidy.exceptions.MixerError` with a descriptive error message. This
will make Mopidy print the error message and exit so that the user can fix
the issue.
:param config: the entire Mopidy configuration
"""
name: ClassVar[str] = ""
"""
Name of the mixer.
Used when configuring what mixer to use. Should match the
:attr:`~mopidy.ext.Extension.ext_name` of the extension providing the
mixer.
"""
def __init__(self, config: dict) -> None:
pass
def get_volume(self) -> Optional[Percentage]:
"""Get volume level of the mixer on a linear scale from 0 to 100.
Example values:
0:
Minimum volume, usually silent.
100:
Maximum volume.
:class:`None`:
Volume is unknown.
*MAY be implemented by subclass.*
"""
return None
def METHOD_NAME(self, volume: Percentage) -> bool:
"""Set volume level of the mixer.
*MAY be implemented by subclass.*
Returns :class:`True` if successful, :class:`False` otherwise.
:param volume: Volume in the range [0..100]
"""
return False
def trigger_volume_changed(self, volume: Percentage) -> None:
"""Send ``volume_changed`` event to all mixer listeners.
This method should be called by subclasses when the volume is changed,
either because of a call to :meth:`set_volume` or because of any
external entity changing the volume.
"""
logger.debug("Mixer event: volume_changed(volume=%d)", volume)
MixerListener.send("volume_changed", volume=volume)
def get_mute(self) -> Optional[bool]:
"""Get mute state of the mixer.
*MAY be implemented by subclass.*
Returns :class:`True` if muted, :class:`False` if unmuted, and
:class:`None` if unknown.
"""
return None
def set_mute(self, mute: bool) -> bool:
"""Mute or unmute the mixer.
*MAY be implemented by subclass.*
Returns :class:`True` if successful, :class:`False` otherwise.
:param mute: :class:`True` to mute, :class:`False` to unmute
"""
return False
def trigger_mute_changed(self, mute: bool) -> None:
"""Send ``mute_changed`` event to all mixer listeners.
This method should be called by subclasses when the mute state is
changed, either because of a call to :meth:`set_mute` or because of
any external entity changing the mute state.
"""
logger.debug("Mixer event: mute_changed(mute=%s)", mute)
MixerListener.send("mute_changed", mute=mute)
def ping(self) -> bool:
"""Called to check if the actor is still alive."""
return True
MixerEvent: TypeAlias = Literal["mute_changed", "volume_changed"]
class MixerListener(listener.Listener):
"""Marker interface for recipients of events sent by the mixer actor.
Any Pykka actor that mixes in this class will receive calls to the methods
defined here when the corresponding events happen in the mixer actor. This
interface is used both for looking up what actors to notify of the events,
and for providing default implementations for those listeners that are not
interested in all events.
"""
@staticmethod
def send(event: MixerEvent, **kwargs: Any) -> None:
"""Helper to allow calling of mixer listener events."""
listener.send(MixerListener, event, **kwargs)
def volume_changed(self, volume: Percentage) -> None:
"""Called after the volume has changed.
*MAY* be implemented by actor.
:param volume: the new volume
"""
def mute_changed(self, mute: bool) -> None:
"""Called after the mute state has changed.
*MAY* be implemented by actor.
:param mute: :class:`True` if muted, :class:`False` if not muted
:type mute: bool
"""
class MixerActor(pykka.ThreadingActor, Mixer):
pass
class MixerProxy(ActorMemberMixin, pykka.ActorProxy[MixerActor]):
"""Mixer wrapped in a Pykka actor proxy."""
name = proxy_field(MixerActor.name)
get_volume = proxy_method(MixerActor.get_volume)
METHOD_NAME = proxy_method(MixerActor.METHOD_NAME)
trigger_volume_changed = proxy_method(MixerActor.trigger_volume_changed)
get_mute = proxy_method(MixerActor.get_mute)
set_mute = proxy_method(MixerActor.set_mute)
trigger_mute_changed = proxy_method(MixerActor.trigger_mute_changed)
ping = proxy_method(MixerActor.ping)
|
2,471 |
evaluate with adaptive batch size
|
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import keras_tuner
import tensorflow as tf
from packaging.version import parse
from tensorflow import nest
def validate_num_inputs(inputs, num):
inputs = nest.flatten(inputs)
if not len(inputs) == num:
raise ValueError(
"Expected {num} elements in the inputs list "
"but received {len} inputs.".format(num=num, len=len(inputs))
)
def to_snake_case(name):
intermediate = re.sub("(.)([A-Z][a-z0-9]+)", r"\1_\2", name)
insecure = re.sub("([a-z])([A-Z])", r"\1_\2", intermediate).lower()
return insecure
def check_tf_version() -> None:
if parse(tf.__version__) < parse("2.7.0"):
warnings.warn(
"The Tensorflow package version needs to be at least 2.7.0 \n"
"for AutoKeras to run. Currently, your TensorFlow version is \n"
f"{tf.__version__}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def check_kt_version() -> None:
if parse(keras_tuner.__version__) < parse("1.1.0"):
warnings.warn(
"The Keras Tuner package version needs to be at least 1.1.0 \n"
"for AutoKeras to run. Currently, your Keras Tuner version is \n"
f"{keras_tuner.__version__}. Please upgrade with \n"
"`$ pip install --upgrade keras-tuner`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def contain_instance(instance_list, instance_type):
return any(
[isinstance(instance, instance_type) for instance in instance_list]
)
def METHOD_NAME(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.evaluate(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def predict_with_adaptive_batch_size(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.predict(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def fit_with_adaptive_batch_size(model, batch_size, **fit_kwargs):
history = run_with_adaptive_batch_size(
batch_size, lambda **kwargs: model.fit(**kwargs), **fit_kwargs
)
return model, history
def run_with_adaptive_batch_size(batch_size, func, **fit_kwargs):
x = fit_kwargs.pop("x")
validation_data = None
if "validation_data" in fit_kwargs:
validation_data = fit_kwargs.pop("validation_data")
while batch_size > 0:
try:
history = func(x=x, validation_data=validation_data, **fit_kwargs)
break
except tf.errors.ResourceExhaustedError as e:
if batch_size == 1:
raise e
batch_size //= 2
print(
"Not enough memory, reduce batch size to {batch_size}.".format(
batch_size=batch_size
)
)
x = x.unbatch().batch(batch_size)
if validation_data is not None:
validation_data = validation_data.unbatch().batch(batch_size)
return history
def get_hyperparameter(value, hp, dtype):
if value is None:
return hp
return value
def add_to_hp(hp, hps, name=None):
"""Add the HyperParameter (self) to the HyperParameters.
# Arguments
hp: keras_tuner.HyperParameters.
name: String. If left unspecified, the hp name is used.
"""
if not isinstance(hp, keras_tuner.engine.hyperparameters.HyperParameter):
return hp
kwargs = hp.get_config()
if name is None:
name = hp.name
kwargs.pop("conditions")
kwargs.pop("name")
class_name = hp.__class__.__name__
func = getattr(hps, class_name)
return func(name=name, **kwargs)
def serialize_keras_object(obj):
if hasattr(tf.keras.utils, "legacy"):
return tf.keras.utils.legacy.serialize_keras_object(
obj
) # pragma: no cover
else:
return tf.keras.utils.serialize_keras_object(obj) # pragma: no cover
def deserialize_keras_object(
config, module_objects=None, custom_objects=None, printable_module_name=None
):
if hasattr(tf.keras.utils, "legacy"):
return (
tf.keras.utils.legacy.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
)
else:
return tf.keras.utils.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
|
2,472 |
temp dir
|
"""Helpers for writing unit tests."""
from collections.abc import Iterable
from io import BytesIO
import os
import re
import shutil
import sys
import tempfile
from unittest import TestCase as _TestCase
from fontTools.config import Config
from fontTools.misc.textTools import tobytes
from fontTools.misc.xmlWriter import XMLWriter
def parseXML(xmlSnippet):
"""Parses a snippet of XML.
Input can be either a single string (unicode or UTF-8 bytes), or a
a sequence of strings.
The result is in the same format that would be returned by
XMLReader, but the parser imposes no constraints on the root
element so it can be called on small snippets of TTX files.
"""
# To support snippets with multiple elements, we add a fake root.
reader = TestXMLReader_()
xml = b"<root>"
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else:
raise TypeError(
"expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__
)
xml += b"</root>"
reader.parser.Parse(xml, 0)
return reader.root[2]
def parseXmlInto(font, parseInto, xmlSnippet):
parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]
for name, attrs, content in parsed_xml:
parseInto.fromXML(name, attrs, content, font)
parseInto.populateDefaults()
return parseInto
class FakeFont:
def __init__(self, glyphs):
self.glyphOrder_ = glyphs
self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}
self.lazy = False
self.tables = {}
self.cfg = Config()
def __getitem__(self, tag):
return self.tables[tag]
def __setitem__(self, tag, table):
self.tables[tag] = table
def get(self, tag, default=None):
return self.tables.get(tag, default)
def getGlyphID(self, name):
return self.reverseGlyphOrderDict_[name]
def getGlyphIDMany(self, lst):
return [self.getGlyphID(gid) for gid in lst]
def getGlyphName(self, glyphID):
if glyphID < len(self.glyphOrder_):
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst]
def getGlyphOrder(self):
return self.glyphOrder_
def getReverseGlyphMap(self):
return self.reverseGlyphOrderDict_
def getGlyphNames(self):
return sorted(self.getGlyphOrder())
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
self.parser.CharacterDataHandler = self.addCharacterData_
self.root = None
self.stack = []
def startElement_(self, name, attrs):
element = (name, attrs, [])
if self.stack:
self.stack[-1][2].append(element)
else:
self.root = element
self.stack.append(element)
def endElement_(self, name):
self.stack.pop()
def addCharacterData_(self, data):
self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
writer.file.seek(0)
writer.file.truncate()
return writer
def getXML(func, ttFont=None):
"""Call the passed toXML function and return the written content as a
list of lines (unicode strings).
Result is stripped of XML declaration and OS-specific newline characters.
"""
writer = makeXMLWriter()
func(writer, ttFont)
xml = writer.file.getvalue().decode("utf-8")
# toXML methods must always end with a writer.newline()
assert xml.endswith("\n")
return xml.splitlines()
def stripVariableItemsFromTTX(
string: str,
ttLibVersion: bool = True,
checkSumAdjustment: bool = True,
modified: bool = True,
created: bool = True,
sfntVersion: bool = False, # opt-in only
) -> str:
"""Strip stuff like ttLibVersion, checksums, timestamps, etc. from TTX dumps."""
# ttlib changes with the fontTools version
if ttLibVersion:
string = re.sub(' ttLibVersion="[^"]+"', "", string)
# sometimes (e.g. some subsetter tests) we don't care whether it's OTF or TTF
if sfntVersion:
string = re.sub(' sfntVersion="[^"]+"', "", string)
# head table checksum and creation and mod date changes with each save.
if checkSumAdjustment:
string = re.sub('<checkSumAdjustment value="[^"]+"/>', "", string)
if modified:
string = re.sub('<modified value="[^"]+"/>', "", string)
if created:
string = re.sub('<created value="[^"]+"/>', "", string)
return string
class MockFont(object):
"""A font-like object that automatically adds any looked up glyphname
to its glyphOrder."""
def __init__(self):
self._glyphOrder = [".notdef"]
class AllocatingDict(dict):
def __missing__(reverseDict, key):
self._glyphOrder.append(key)
gid = len(reverseDict)
reverseDict[key] = gid
return gid
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False
def getGlyphID(self, glyph):
gid = self._reverseGlyphOrder[glyph]
return gid
def getReverseGlyphMap(self):
return self._reverseGlyphOrder
def getGlyphName(self, gid):
return self._glyphOrder[gid]
def getGlyphOrder(self):
return self._glyphOrder
class TestCase(_TestCase):
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
class DataFilesHandler(TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def getpath(self, testfile):
folder = os.path.dirname(sys.modules[self.__module__].__file__)
return os.path.join(folder, "data", testfile)
def METHOD_NAME(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
def temp_font(self, font_path, file_name):
self.METHOD_NAME()
temppath = os.path.join(self.tempdir, file_name)
shutil.copy2(font_path, temppath)
return temppath
|
2,473 |
bulk index
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019-2022 RERO
# Copyright (C) 2019-2022 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""API for manipulating budgets."""
from functools import partial
from elasticsearch import NotFoundError
from rero_ils.modules.acquisition.acq_accounts.api import AcqAccount, \
AcqAccountsSearch
from rero_ils.modules.acquisition.api import AcquisitionIlsRecord
from rero_ils.modules.api import IlsRecordsIndexer, IlsRecordsSearch
from rero_ils.modules.fetchers import id_fetcher
from rero_ils.modules.minters import id_minter
from rero_ils.modules.organisations.api import Organisation
from rero_ils.modules.providers import Provider
from rero_ils.modules.utils import sorted_pids
from .models import BudgetIdentifier, BudgetMetadata
# provider
BudgetProvider = type(
'BudgetProvider',
(Provider,),
dict(identifier=BudgetIdentifier, pid_type='budg')
)
# minter
budget_id_minter = partial(id_minter, provider=BudgetProvider)
# fetcher
budget_id_fetcher = partial(id_fetcher, provider=BudgetProvider)
class BudgetsSearch(IlsRecordsSearch):
"""BudgetsSearch."""
class Meta:
"""Search only on budget index."""
index = 'budgets'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
class Budget(AcquisitionIlsRecord):
"""Budget class."""
minter = budget_id_minter
fetcher = budget_id_fetcher
provider = BudgetProvider
model_cls = BudgetMetadata
pids_exist_check = {
'required': {
'org': 'organisation'
}
}
@property
def name(self):
"""Shortcut for budget name."""
return self.get('name')
@property
def is_active(self):
"""Check if the budget should be considered as active."""
return self.get('is_active', False)
def get_related_accounts(self):
"""Get account related to this budget.
:rtype: an `AcqAccount` generator
"""
query = AcqAccountsSearch() \
.filter('term', budget__pid=self.pid) \
.source(False)
for hit in query.scan():
yield AcqAccount.get_record(hit.meta.id)
def get_links_to_me(self, get_pids=False):
"""Record links.
:param get_pids: if True list of linked pids
if False count of linked records
"""
links = {}
query = AcqAccountsSearch().filter('term', budget__pid=self.pid)
acq_accounts = sorted_pids(query) if get_pids else query.count()
if acq_accounts:
links['acq_accounts'] = acq_accounts
return links
def reasons_not_to_delete(self):
"""Get reasons not to delete record."""
cannot_delete = {}
# Note: not possible to delete records attached to rolled_over budget.
if not self.is_active:
cannot_delete['links'] = {'rolled_over': True}
return cannot_delete
if others := self.reasons_to_keep():
cannot_delete['others'] = others
if links := self.get_links_to_me():
cannot_delete['links'] = links
return cannot_delete
def reasons_to_keep(self):
"""Reasons aside from record_links to keep a budget."""
others = {}
organisation = Organisation.get_record_by_pid(self.organisation_pid)
if organisation.get('current_budget_pid') == self.pid:
others['is_default'] = True
return others
class BudgetsIndexer(IlsRecordsIndexer):
"""Indexing documents in Elasticsearch."""
record_cls = Budget
def index(self, record):
"""Indexing an budget record."""
BudgetsIndexer._check_is_active_changed(record)
return super().index(record)
def METHOD_NAME(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
super().METHOD_NAME(record_id_iterator, doc_type='budg')
@classmethod
def _check_is_active_changed(cls, record):
"""Detect is `is_active` field changed.
In this case, we need to reindex related accounts to set them as
inactive into the AcqAccount ES index.
:param record: the record to index.
"""
try:
original_record = BudgetsSearch().get_record_by_pid(record.pid)
if record.is_active != original_record['is_active']:
for account in record.get_related_accounts():
account.reindex()
except NotFoundError:
pass
|
2,474 |
biosample characterization 1
|
import pytest
@pytest.fixture
def biosample_characterization_no_review(testapp, award, lab, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization(testapp, award, lab, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_2nd_opinion(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'requires secondary opinion',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_exempt(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'exempt from standards',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_not_compliant(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'not compliant',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_compliant(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'compliant',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def METHOD_NAME(biosample_characterization_base):
item = biosample_characterization_base.copy()
item.update({
'schema_version': '2',
'status': 'APPROVED',
'characterization_method': 'immunofluorescence',
})
return item
@pytest.fixture
def biosample_characterization_2(biosample_characterization_base):
item = biosample_characterization_base.copy()
item.update({
'schema_version': '3',
'status': 'IN PROGRESS',
'award': '1a4d6443-8e29-4b4a-99dd-f93e72d42418'
})
return item
@pytest.fixture
def antibody_characterization_3(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'schema_version': '4',
'characterization_method': 'immunoblot',
})
return item
@pytest.fixture
def biosample_characterization_4(root, biosample_characterization, publication):
item = root.get_by_uuid(biosample_characterization['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '4',
'references': [publication['identifiers'][0]],
})
return properties
@pytest.fixture
def antibody_characterization_10(antibody_characterization_1):
item = antibody_characterization_1.copy()
item.update({
'status': 'pending dcc review',
'characterization_method': 'immunoprecipitation followed by mass spectrometry',
'comment': 'We tried really hard to characterize this antibody.',
'notes': 'Your plea has been noted.'
})
return item
@pytest.fixture
def antibody_characterization_11(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'K562',
'biosample_term_id': 'EFO:0002067',
'lane_status': 'exempt from standards',
'biosample_type': 'immortalized cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item
@pytest.fixture
def antibody_characterization_13(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'HUES62',
'biosample_term_id': 'EFO:0007087',
'lane_status': 'exempt from standards',
'biosample_type': 'induced pluripotent stem cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item
@pytest.fixture
def antibody_characterization_14(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'A549',
'biosample_term_id': 'EFO:0001086',
'lane_status': 'exempt from standards',
'biosample_type': 'cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item
|
2,475 |
decrypt
|
'''
Brent Waters (Pairing-based)
| From: "Functional Encryption for Regular Languages".
| Published in: 2012
| Available from: http://eprint.iacr.org/2012/384
| Notes:
| Security Assumption:
|
| type: functional encryption ("public index")
| setting: Pairing
:Authors: J Ayo Akinyele
:Date: 12/2012
'''
from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair
from charm.toolbox.DFA import DFA
debug = False
class FE_DFA:
def __init__(self, _groupObj, _dfaObj):
global group, dfaObj
group = _groupObj
dfaObj = _dfaObj
def setup(self, alphabet):
g, z, h_start, h_end = group.random(G1, 4)
h = {'start':h_start, 'end':h_end }
for sigma in alphabet:
h[str(sigma)] = group.random(G1)
alpha = group.random(ZR)
msk = g ** -alpha
mpk = {'egg':pair(g, g) ** alpha, 'g':g, 'z':z, 'h':h }
return (mpk, msk)
def keygen(self, mpk, msk, dfaM):
Q, S, T, q0, F = dfaM
q = len(Q)
# associate D_i with each state q_i in Q
D = group.random(G1, q+1) # [0, q] including q-th index
r_start = group.random(ZR)
K = {}
K['start1'] = D[0] * (mpk['h']['start'] ** r_start)
K['start2'] = mpk['g'] ** r_start
for t in T: # for each tuple, t in transition list
r = group.random(ZR)
(x, y, sigma) = t
K[str(t)] = {}
K[str(t)][1] = (D[x] ** -1) * (mpk['z'] ** r)
K[str(t)][2] = mpk['g'] ** r
K[str(t)][3] = D[y] * ((mpk['h'][str(sigma)]) ** r)
# for each accept state in the set of all accept states
K['end'] = {}
for x in F:
rx = group.random(ZR)
K['end'][str(x)] = {}
K['end'][str(x)][1] = msk * D[x] * (mpk['h']['end'] ** rx)
K['end'][str(x)][2] = mpk['g'] ** rx
sk = {'K':K, 'dfaM':dfaM }
return sk
def encrypt(self, mpk, w, M):
l = len(w) # symbols of string
s = group.random(ZR, l+1) # l+1 b/c it includes 'l'-th index
C = {}
C['m'] = M * (mpk['egg'] ** s[l])
C[0] = {}
C[0][1] = mpk['g'] ** s[0]
C[0][2] = mpk['h']['start'] ** s[0]
for i in range(1, l+1):
C[i] = {}
C[i][1] = mpk['g'] ** s[i]
C[i][2] = (mpk['h'][ str(w[i]) ] ** s[i]) * (mpk['z'] ** s[i-1])
C['end1'] = mpk['g'] ** s[l]
C['end2'] = mpk['h']['end'] ** s[l]
ct = {'C':C, 'w':w}
return ct
def METHOD_NAME(self, sk, ct):
K, dfaM = sk['K'], sk['dfaM']
C, w = ct['C'], ct['w']
l = len(w)
B = {}
# if DFA does not accept string, return immediately
if not dfaObj.accept(dfaM, w):
print("DFA rejects: ", w)
return False
Ti = dfaObj.getTransitions(dfaM, w) # returns a tuple of transitions
B[0] = pair(C[0][1], K['start1']) * (pair(C[0][2], K['start2']) ** -1)
for i in range(1, l+1):
ti = Ti[i]
if debug: print("transition: ", ti)
B[i] = B[i-1] * pair(C[i-1][1], K[str(ti)][1]) * (pair(C[i][2], K[str(ti)][2]) ** -1) * pair(C[i][1], K[str(ti)][3])
x = dfaObj.getAcceptState(Ti) # retrieve accept state
Bend = B[l] * (pair(C['end1'], K['end'][str(x)][1]) ** -1) * pair(C['end2'], K['end'][str(x)][2])
M = C['m'] / Bend
return M
def main():
global group
group = PairingGroup("SS512")
alphabet = {'a', 'b'}
dfa = DFA("ab*a", alphabet)
dfaM = dfa.constructDFA()
fe = FE_DFA(group, dfa)
(mpk, msk) = fe.setup(alphabet)
if debug: print("mpk :=>", mpk, "\n\n")
sk = fe.keygen(mpk, msk, dfaM)
if debug: print("sk :=>", sk)
w = dfa.getSymbols("abba")
M = group.random(GT)
ct = fe.encrypt(mpk, w, M)
origM = fe.METHOD_NAME(sk, ct)
assert M == origM, "failed decryption!"
if debug: print("Successful Decryption!!!!!")
if __name__ == "__main__":
debug = True
main()
|
2,476 |
remove all children
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
class PFListPane(wx.ScrolledWindow):
def __init__(self, parent):
wx.ScrolledWindow.__init__(self, parent, pos=wx.DefaultPosition, style=wx.TAB_TRAVERSAL)
self._wList = []
self._wCount = 0
self.itemsHeight = 1
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
self.SetVirtualSize((1, 1))
self.SetScrollRate(0, 1)
self.Bind(wx.EVT_SCROLLWIN_LINEUP, self.MScrollUp)
self.Bind(wx.EVT_SCROLLWIN_LINEDOWN, self.MScrollDown)
# self.Bind(wx.EVT_CHILD_FOCUS, self.OnChildFocus)
# self.Bind(wx.EVT_LEFT_DOWN, self.ForceFocus)
self.SetFocus()
# self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self.ForceFocus)
self.Bind(wx.EVT_SCROLLWIN_THUMBRELEASE, self.ForceFocus)
def ForceFocus(self, event):
if self.FindFocus() and self.FindFocus().Parent != self:
self.SetFocus()
event.Skip()
def OnChildFocus(self, event):
event.Skip()
child = event.GetWindow()
self.ScrollChildIntoView(child)
def MScrollUp(self, event):
posy = self.GetScrollPos(wx.VERTICAL)
posy -= self.itemsHeight
self.Scroll(0, posy)
event.Skip()
def MScrollDown(self, event):
posy = self.GetScrollPos(wx.VERTICAL)
posy += self.itemsHeight
self.Scroll(0, posy)
event.Skip()
def ScrollChildIntoView(self, child):
"""
Scrolls the panel such that the specified child window is in view.
"""
sppu_x, sppu_y = self.GetScrollPixelsPerUnit()
vs_x, vs_y = self.GetViewStart()
cr = child.GetRect()
clntsz = self.GetSize()
new_vs_x, new_vs_y = -1, -1
# is it before the left edge?
if cr.x < 0 < sppu_x:
new_vs_x = vs_x + (cr.x / sppu_x)
# is it above the top?
if cr.y < 0 < sppu_y:
new_vs_y = vs_y + (cr.y / sppu_y)
# For the right and bottom edges, scroll enough to show the
# whole control if possible, but if not just scroll such that
# the top/left edges are still visible
# is it past the right edge ?
if cr.right > clntsz.width and sppu_x > 0:
diff = (cr.right - clntsz.width + 1) / sppu_x
if cr.x - diff * sppu_x > 0:
new_vs_x = vs_x + diff
else:
new_vs_x = vs_x + (cr.x / sppu_x)
# is it below the bottom ?
if cr.bottom > clntsz.height and sppu_y > 0:
diff = (cr.bottom - clntsz.height + 1) / sppu_y
if cr.y - diff * sppu_y > 0:
new_vs_y = vs_y + diff
else:
new_vs_y = vs_y + (cr.y / sppu_y)
# if we need to adjust
if new_vs_x != -1 or new_vs_y != -1:
self.Scroll(new_vs_x, new_vs_y)
def AddWidget(self, widget):
widget.Reparent(self)
self._wList.append(widget)
self._wCount += 1
def GetWidgetList(self):
return self._wList
# Override this method if needed ( return False by default if we do not want to scroll to selected widget)
def IsWidgetSelectedByContext(self, widget):
return False
def RefreshList(self, doRefresh=False, doFocus=False):
maxy = 0
selected = None
for i in range(len(self._wList)):
iwidth, iheight = self._wList[i].GetSize()
xa, ya = self.CalcScrolledPosition((0, maxy))
self._wList[i].SetPosition((xa, ya))
if self.IsWidgetSelectedByContext(i):
selected = self._wList[i]
maxy += iheight
self.SetVirtualSize((1, maxy))
cwidth, cheight = self.GetVirtualSize()
if selected:
self.ScrollChildIntoView(selected)
# selected.SetFocus()
elif doFocus:
self.SetFocus()
for i in range(len(self._wList)):
iwidth, iheight = self._wList[i].GetSize()
self._wList[i].SetSize((cwidth, iheight))
if doRefresh is True:
self._wList[i].Refresh()
self.itemsHeight = iheight
# This is needed as under GTK wx does not emit scroll up/scroll down
# events, see issue #1909 for more info
if 'wxGTK' in wx.PlatformInfo:
self.SetScrollRate(0, self.itemsHeight)
def RemoveWidget(self, child):
child.Destroy()
self._wList.remove(child)
def METHOD_NAME(self):
for widget in self._wList:
widget.Destroy()
self.Scroll(0, 0)
self._wList = []
|
2,477 |
get numpy dtype info
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon pytest utility functions """
from typing import List, Optional, Union
import collections
import numpy as np
def get_test_id(*test_params, test_param_descs: List[Optional[str]] = None) -> str:
"""
An opinionated alternative to pytest's default algorithm for generating a
test's ID string. Intended to make it easier for human readers to
interpret the test IDs.
'test_params': The sequence of pytest parameter values supplied to some unit
test.
'test_param_descs': An (optional) means to provide additional text for some/all of the
paramuments in 'test_params'.
If provided, then len(test_params) must equal len(test_param_descs).
Each element test_param_descs that is a non-empty string will be used
in some sensible way in this function's returned string.
"""
assert len(test_params) > 0
if test_param_descs is None:
test_param_descs = [None] * len(test_params)
else:
assert len(test_param_descs) == len(test_params)
def get_single_param_chunk(param_val, param_desc: Optional[str]):
if isinstance(param_val, list):
# Like str(list), but avoid the whitespace padding.
val_str = "[" + ",".join(str(x) for x in param_val) + "]"
need_prefix_separator = False
elif isinstance(param_val, bool):
if param_val:
val_str = "T"
else:
val_str = "F"
need_prefix_separator = True
elif isinstance(param_val, TensorContentConstant):
val_str = f"const[{param_val.elem_value}]"
need_prefix_separator = True
elif isinstance(param_val, TensorContentDtypeMin):
val_str = "min"
need_prefix_separator = True
elif isinstance(param_val, TensorContentDtypeMax):
val_str = "max"
need_prefix_separator = True
elif isinstance(param_val, TensorContentRandom):
val_str = "random"
need_prefix_separator = True
elif isinstance(param_val, TensorContentSequentialCOrder):
val_str = f"seqC[start:{param_val.start_value},inc:{param_val.increment}]"
need_prefix_separator = True
else:
val_str = str(param_val)
need_prefix_separator = True
if param_desc and need_prefix_separator:
return f"{param_desc}:{val_str}"
elif param_desc and not need_prefix_separator:
return f"{param_desc}{val_str}"
else:
return val_str
chunks = [
get_single_param_chunk(param_val, param_desc)
for param_val, param_desc in zip(test_params, test_param_descs)
]
return "-".join(chunks)
def get_multitest_ids(
multitest_params_list: List[List], param_descs: Optional[List[Optional[str]]]
) -> List[str]:
"""
A convenience function for classes that use both 'tvm.testing.parameters' and 'get_test_id'.
This function provides a workaround for a specific quirk in Python, where list-comprehension
can't necessarily access the value of another class-variable, discused here:
https://stackoverflow.com/q/13905741
"""
return [
get_test_id(*single_test_param_list, test_param_descs=param_descs)
for single_test_param_list in multitest_params_list
]
def METHOD_NAME(dtype) -> Union[np.finfo, np.iinfo]:
"""
Return an appropriate 'np.iinfo' or 'np.finfo' object corresponding to
the specified Numpy dtype.
'dtype' must be a value that 'numpy.dtype(...)' can handle.
"""
np_dtype = np.dtype(dtype)
kind = np_dtype.kind
if kind == "f":
return np.finfo(np_dtype)
elif kind == "i":
return np.iinfo(np_dtype)
else:
raise TypeError(f"dtype ({dtype}) must indicate some floating-point or integral data type")
TensorContentConstant = collections.namedtuple("TensorContentConstant", ["elem_value"])
TensorContentSequentialCOrder = collections.namedtuple(
"TensorContentSequentialCOrder", ["start_value", "increment"]
)
TensorContentRandom = collections.namedtuple("TensorContentRandom", [])
TensorContentDtypeMin = collections.namedtuple("TensorContentDtypeMin", [])
TensorContentDtypeMax = collections.namedtuple("TensorContentDtypeMax", [])
def create_populated_numpy_ndarray(
input_shape: Union[list, tuple], dtype: str, input_tensor_populator
) -> np.ndarray:
"""
Create a numpy tensor with the specified shape, dtype, and content.
"""
itp = input_tensor_populator # just for brevity
if isinstance(itp, TensorContentConstant):
return np.full(tuple(input_shape), itp.elem_value, dtype=dtype)
elif isinstance(itp, TensorContentDtypeMin):
info = METHOD_NAME(dtype)
return np.full(tuple(input_shape), info.min, dtype=dtype)
elif isinstance(itp, TensorContentDtypeMax):
info = METHOD_NAME(dtype)
return np.full(tuple(input_shape), info.max, dtype=dtype)
elif isinstance(itp, TensorContentRandom):
return np.random.random(input_shape).astype(dtype)
elif isinstance(itp, TensorContentSequentialCOrder):
a = np.empty(tuple(input_shape), dtype)
with np.nditer(a, op_flags=["writeonly"], order="C") as iterator:
next_elem_val = itp.start_value
for elem in iterator:
elem[...] = next_elem_val
next_elem_val += itp.increment
return a
else:
raise ValueError(f"Unexpected input_tensor_populator type: {type(itp)}")
|
2,478 |
generate setup
|
"""
Generates setups for tracer advection-diffusion MMS test
"""
import sympy
from sympy import init_printing
init_printing()
# coordinates
x, y, z = sympy.symbols('x y z')
# domain lenght, x in [0, Lx], y in [0, Ly]
lx, ly = sympy.symbols('lx ly')
def is_constant(u):
"""
True if u does not depend on x,y,z
"""
out = 0
for i in (x, y, z):
out += sympy.diff(u, i)
return out == 0
def get_ufl_expr(u):
"""Generates string that can be evaluated as a UFL expression"""
fmt = 'Constant({:})' if is_constant(u) else '{:}'
return fmt.format(str(u))
def get_scalar_entry(name, u, *args):
"""Generates an entry for a scalar expression"""
t = """
def {name}(self, {args}):
return {u}\n"""
args_str = ', '.join(args)
return t.format(name=name, u=get_ufl_expr(u), args=args_str)
def get_vector_entry(name, u, v, w, *args):
"""Generates an entry for a 2d vector expression"""
t = """
def {name}(self, {args}):
return as_vector(
[
{:},
{:},
{:},
])\n"""
args_str = ', '.join(args)
uvw = map(get_ufl_expr, (u, v, w))
return t.format(*uvw, name=name, args=args_str)
def get_header(name, description):
t = '''class {name}:
"""
{txt}
"""'''
return t.format(name=name, txt=description)
def compute_residual(h, eta, u, v, w, kappa, tracer):
"""Compute residual of advection-diffusion equation"""
adv = sympy.diff(tracer*u, x) + sympy.diff(tracer*v, y) + sympy.diff(tracer*w, z)
stress = kappa*(sympy.diff(tracer, x) + sympy.diff(tracer, y))
diff = sympy.diff(stress, x) + sympy.diff(stress, y)
res = adv + diff
return res
def compute_w(eta, u, v, h):
"""Solves w from continuity equation"""
div_uv = sympy.diff(u, x) + sympy.diff(v, y)
c = u*sympy.diff(h, x) + v*sympy.diff(h, y)
w = -sympy.integrate(div_uv, (z, -h, z)) - c
return w
def METHOD_NAME(name, description, h, eta, u, v, kappa, tracer):
"""
Generates setup function that can be copied to mms test.
"""
w = compute_w(eta, u, v, h)
residual = compute_residual(h, eta, u, v, w, kappa, tracer)
txt = ''
txt += get_header(name, description)
args_2d = 'x', 'y', 'lx', 'ly'
args_3d = 'x', 'y', 'z', 'lx', 'ly'
txt += get_scalar_entry('bath', h, *args_2d)
txt += get_scalar_entry('elev', eta, *args_2d)
txt += get_vector_entry('uv', u, v, 0, *args_3d)
txt += get_vector_entry('w', 0, 0, w, *args_3d)
txt += get_scalar_entry('kappa', kappa, *args_3d)
txt += get_scalar_entry('tracer', tracer, *args_3d)
txt += get_scalar_entry('residual', residual, *args_3d)
print('')
print(txt)
name = 'Setup1'
description = """Constant bathymetry and u velocty, zero diffusivity, non-trivial tracer"""
h = 40.0
eta = 0.0
u = 1.0
v = 0.0
kappa = 0.0
tracer = sympy.sin(0.2*sympy.pi*(3.0*x + 1.0*y)/lx)
METHOD_NAME(name, description, h, eta, u, v, kappa, tracer)
name = 'Setup2'
description = """Constant bathymetry, zero velocity, constant kappa, x-varying T"""
h = 40.0
eta = 0.0
u = 1.0
v = 0.0
kappa = 50.0
tracer = sympy.sin(3*sympy.pi*x/lx)
METHOD_NAME(name, description, h, eta, u, v, kappa, tracer)
name = 'Setup3'
description = """Constant bathymetry, zero kappa, non-trivial velocity and T"""
h = 40.0
eta = 0.0
u = sympy.sin(sympy.pi*(y/ly + 2*x/lx))*sympy.sin(sympy.pi*z/40)
v = sympy.sin(sympy.pi*(0.3*y/ly + 0.3*x/lx))*sympy.sin(sympy.pi*z/40)
kappa = 0.0
tracer = (0.8*sympy.cos(0.5*sympy.pi*z/40) + 0.2)*sympy.cos(sympy.pi*(0.75*y/ly + 1.5*x/lx))
METHOD_NAME(name, description, h, eta, u, v, kappa, tracer)
name = 'Setup4'
description = """Constant bathymetry, constant kappa, non-trivial velocity and T"""
h = 40.0
eta = 0.0
u = sympy.sin(sympy.pi*(y/ly + 2*x/lx))*sympy.sin(sympy.pi*z/40)
v = sympy.sin(sympy.pi*(0.3*y/ly + 0.3*x/lx))*sympy.sin(sympy.pi*z/40)
kappa = 50.0
tracer = (0.8*sympy.cos(0.5*sympy.pi*z/40) + 0.2)*sympy.cos(sympy.pi*(0.75*y/ly + 1.5*x/lx))
METHOD_NAME(name, description, h, eta, u, v, kappa, tracer)
|
2,479 |
get voltage
|
import os
from Components.config import config, ConfigSubList, ConfigSubsection, ConfigSlider
from Components.SystemInfo import BoxInfo
from Tools.BoundFunction import boundFunction
import NavigationInstance
from enigma import iRecordableService, pNavigation
class FanControl:
# ATM there's only support for one fan
def __init__(self):
if os.path.exists("/proc/stb/fp/fan_vlt") or os.path.exists("/proc/stb/fp/fan_pwm") or os.path.exists("/proc/stb/fp/fan_speed"):
self.fancount = 1
else:
self.fancount = 0
self.createConfig()
config.misc.standbyCounter.addNotifier(self.standbyCounterChanged, initial_call=False)
def setVoltage_PWM(self):
for fanid in list(range(self.getFanCount())):
cfg = self.getConfig(fanid)
self.setVoltage(fanid, cfg.vlt.value)
self.setPWM(fanid, cfg.pwm.value)
print("[FanControl]: setting fan values: fanid = %d, voltage = %d, pwm = %d" % (fanid, cfg.vlt.value, cfg.pwm.value))
def setVoltage_PWM_Standby(self):
for fanid in list(range(self.getFanCount())):
cfg = self.getConfig(fanid)
self.setVoltage(fanid, cfg.vlt_standby.value)
self.setPWM(fanid, cfg.pwm_standby.value)
print("[FanControl]: setting fan values (standby mode): fanid = %d, voltage = %d, pwm = %d" % (fanid, cfg.vlt_standby.value, cfg.pwm_standby.value))
def getRecordEvent(self, recservice, event):
recordings = len(NavigationInstance.instance.getRecordings(False, pNavigation.isRealRecording))
if event == iRecordableService.evEnd:
if recordings == 0:
self.setVoltage_PWM_Standby()
elif event == iRecordableService.evStart:
if recordings == 1:
self.setVoltage_PWM()
def leaveStandby(self):
NavigationInstance.instance.record_event.remove(self.getRecordEvent)
recordings = NavigationInstance.instance.getRecordings(False, pNavigation.isRealRecording)
if not recordings:
self.setVoltage_PWM()
def standbyCounterChanged(self, configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(self.leaveStandby)
recordings = NavigationInstance.instance.getRecordings(False, pNavigation.isRealRecording)
NavigationInstance.instance.record_event.append(self.getRecordEvent)
if not recordings:
self.setVoltage_PWM_Standby()
def createConfig(self):
def setVlt(fancontrol, fanid, configElement):
fancontrol.setVoltage(fanid, configElement.value)
def setPWM(fancontrol, fanid, configElement):
fancontrol.setPWM(fanid, configElement.value)
config.fans = ConfigSubList()
for fanid in list(range(self.getFanCount())):
fan = ConfigSubsection()
fan.vlt = ConfigSlider(default=15, increment=5, limits=(0, 255))
if BoxInfo.getItem("machinebuild") == 'tm2t':
fan.pwm = ConfigSlider(default=150, increment=5, limits=(0, 255))
if BoxInfo.getItem("machinebuild") == 'tmsingle':
fan.pwm = ConfigSlider(default=100, increment=5, limits=(0, 255))
else:
fan.pwm = ConfigSlider(default=50, increment=5, limits=(0, 255))
fan.vlt_standby = ConfigSlider(default=5, increment=5, limits=(0, 255))
fan.pwm_standby = ConfigSlider(default=0, increment=5, limits=(0, 255))
fan.vlt.addNotifier(boundFunction(setVlt, self, fanid))
fan.pwm.addNotifier(boundFunction(setPWM, self, fanid))
config.fans.append(fan)
def getConfig(self, fanid):
return config.fans[fanid]
def getFanCount(self):
return self.fancount
def hasRPMSensor(self, fanid):
return os.path.exists("/proc/stb/fp/fan_speed")
def hasFanControl(self, fanid):
return os.path.exists("/proc/stb/fp/fan_vlt") or os.path.exists("/proc/stb/fp/fan_pwm")
def getFanSpeed(self, fanid):
return int(open("/proc/stb/fp/fan_speed").readline().strip()[:-4])
def METHOD_NAME(self, fanid):
return int(open("/proc/stb/fp/fan_vlt").readline().strip(), 16)
def setVoltage(self, fanid, value):
if value > 255:
return
open("/proc/stb/fp/fan_vlt", "w").write("%x" % value)
def getPWM(self, fanid):
return int(open("/proc/stb/fp/fan_pwm").readline().strip(), 16)
def setPWM(self, fanid, value):
if value > 255:
return
open("/proc/stb/fp/fan_pwm", "w").write("%x" % value)
fancontrol = FanControl()
|
2,480 |
qhull pkgconfig name
|
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
from conan.tools.microsoft import is_msvc
import os
required_conan_version = ">=1.53.0"
class QhullConan(ConanFile):
name = "qhull"
description = "Qhull computes the convex hull, Delaunay triangulation, " \
"Voronoi diagram, halfspace intersection about a point, " \
"furthest-site Delaunay triangulation, and furthest-site " \
"Voronoi diagram."
license = "Qhull"
topics = ("geometry", "convex", "triangulation", "intersection")
homepage = "http://www.qhull.org"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"reentrant": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"reentrant": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
cmake_layout(self, src_folder="src")
def package_id(self):
del self.info.options.reentrant
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "COPYING.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "doc"))
rmdir(self, os.path.join(self.package_folder, "man"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "Qhull")
self.cpp_info.set_property("cmake_target_name", f"Qhull::{self._qhull_cmake_name}")
self.cpp_info.set_property("pkg_config_name", self.METHOD_NAME)
# TODO: back to global scope once cmake_find_package* generators removed
self.cpp_info.components["libqhull"].libs = [self._qhull_lib_name]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["libqhull"].system_libs.append("m")
if is_msvc(self) and self.options.shared:
self.cpp_info.components["libqhull"].defines.extend(["qh_dllimport"])
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
self.cpp_info.names["cmake_find_package"] = "Qhull"
self.cpp_info.names["cmake_find_package_multi"] = "Qhull"
self.cpp_info.names["pkg_config"] = self.METHOD_NAME
self.cpp_info.components["libqhull"].names["cmake_find_package"] = self._qhull_cmake_name
self.cpp_info.components["libqhull"].names["cmake_find_package_multi"] = self._qhull_cmake_name
self.cpp_info.components["libqhull"].names["pkg_config"] = self.METHOD_NAME
self.cpp_info.components["libqhull"].set_property("cmake_target_name", f"Qhull::{self._qhull_cmake_name}")
self.cpp_info.components["libqhull"].set_property("pkg_config_name", self.METHOD_NAME)
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
@property
def _qhull_cmake_name(self):
name = ""
if self.options.reentrant:
name = "qhull_r" if self.options.shared else "qhullstatic_r"
else:
name = "libqhull" if self.options.shared else "qhullstatic"
return name
@property
def METHOD_NAME(self):
name = "qhull"
if not self.options.shared:
name += "static"
if self.options.reentrant:
name += "_r"
return name
@property
def _qhull_lib_name(self):
name = "qhull"
if not self.options.shared:
name += "static"
if self.settings.build_type == "Debug" or self.options.reentrant:
name += "_"
if self.options.reentrant:
name += "r"
if self.settings.build_type == "Debug":
name += "d"
return name
|
2,481 |
enricher rules
|
import logging
from typing import TYPE_CHECKING, Any, Callable
from rotkehlchen.accounting.structures.types import HistoryEventSubType, HistoryEventType
from rotkehlchen.assets.asset import EvmToken
from rotkehlchen.chain.ethereum.modules.balancer.constants import BALANCER_LABEL, CPT_BALANCER_V2
from rotkehlchen.chain.ethereum.utils import asset_normalized_value
from rotkehlchen.chain.evm.decoding.interfaces import DecoderInterface
from rotkehlchen.chain.evm.decoding.structures import (
DEFAULT_DECODING_OUTPUT,
FAILED_ENRICHMENT_OUTPUT,
ActionItem,
DecoderContext,
DecodingOutput,
EnricherContext,
TransferEnrichmentOutput,
)
from rotkehlchen.chain.evm.decoding.types import CounterpartyDetails, EventCategory
from rotkehlchen.chain.evm.types import string_to_evm_address
from rotkehlchen.constants.assets import A_ETH, A_WETH
from rotkehlchen.constants.resolver import ethaddress_to_identifier
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import ChecksumEvmAddress, DecoderEventMappingType
from rotkehlchen.utils.misc import hex_or_bytes_to_address, hex_or_bytes_to_int
if TYPE_CHECKING:
from rotkehlchen.chain.evm.decoding.base import BaseDecoderTools
from rotkehlchen.chain.evm.node_inquirer import EvmNodeInquirer
from rotkehlchen.user_messages import MessagesAggregator
V2_SWAP = b'!p\xc7A\xc4\x151\xae\xc2\x0e|\x10|$\xee\xcf\xdd\x15\xe6\x9c\x9b\xb0\xa8\xdd7\xb1\x84\x0b\x9e\x0b {' # noqa: E501
VAULT_ADDRESS = string_to_evm_address('0xBA12222222228d8Ba445958a75a0704d566BF2C8')
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class Balancerv2Decoder(DecoderInterface):
def __init__(
self,
evm_inquirer: 'EvmNodeInquirer',
base_tools: 'BaseDecoderTools',
msg_aggregator: 'MessagesAggregator',
) -> None:
super().__init__(
evm_inquirer=evm_inquirer,
base_tools=base_tools,
msg_aggregator=msg_aggregator,
)
self.eth = A_ETH.resolve_to_crypto_asset()
self.weth = A_WETH.resolve_to_evm_token()
def decode_swap_creation(self, context: DecoderContext) -> DecodingOutput:
"""
Decode swap in Balancer v2. At the beggining of the transaction a SWAP event is created
with the information of the tokens and amounts and later some transfers are executed.
We need to detect this swap event and then match the transferred amounts with the ones
in the swap event. A special case is the swap of ETH that is wrapped before being sent.
In this case the token is WETH but we have a tranfer of ETH from the user.
"""
if context.tx_log.topics[0] != V2_SWAP:
return DEFAULT_DECODING_OUTPUT
# The transfer event appears after the swap event, so we need to propagate information
from_token_address = hex_or_bytes_to_address(context.tx_log.topics[2])
to_token_address = hex_or_bytes_to_address(context.tx_log.topics[3])
amount_in = hex_or_bytes_to_int(context.tx_log.data[0:32])
amount_out = hex_or_bytes_to_int(context.tx_log.data[32:64])
# Create action item to propagate the information about the swap to the transfer enrichers
to_token = EvmToken(ethaddress_to_identifier(to_token_address))
to_amount = asset_normalized_value(
amount=amount_out,
asset=to_token,
)
action_item = ActionItem(
action='skip & keep',
sequence_index=context.tx_log.log_index,
from_event_type=HistoryEventType.RECEIVE,
from_event_subtype=HistoryEventSubType.NONE,
asset=to_token,
amount=to_amount,
to_event_type=None,
to_event_subtype=None,
to_counterparty=CPT_BALANCER_V2,
to_notes=None,
extra_data={
'from_token': from_token_address,
'amount_in': amount_in,
},
)
# When ETH is swapped it is wrapped to WETH and the ETH transfer happens before the SWAP
# event. We need to detect it if we haven't done it yet.
if len(context.action_items) == 0 and from_token_address == self.weth.evm_address:
# when swapping eth the transfer event appears before the V2_SWAP event so we need
# to check if the asset swapped was ETH or not.
amount_of_eth = asset_normalized_value(
amount=amount_in,
asset=self.eth,
)
for event in context.decoded_events:
if (
event.asset == A_ETH and event.balance.amount == amount_of_eth and
event.event_type == HistoryEventType.SPEND and
event.event_subtype == HistoryEventSubType.NONE
):
event.event_type = HistoryEventType.TRADE
event.event_subtype = HistoryEventSubType.SPEND
event.notes = f'Swap {event.balance.amount} {self.eth.symbol} in Balancer v2' # noqa: E501
event.counterparty = CPT_BALANCER_V2
return DecodingOutput(action_items=[action_item])
def _maybe_enrich_balancer_v2_transfers(
self,
context: EnricherContext,
) -> TransferEnrichmentOutput:
"""
Enrich tranfer transactions to account for swaps in balancer v2 protocol.
May raise:
- UnknownAsset
- WrongAssetType
"""
if context.action_items is None or len(context.action_items) == 0 or context.transaction.to_address != VAULT_ADDRESS: # noqa: E501
return FAILED_ENRICHMENT_OUTPUT
if context.action_items[-1].extra_data is None:
return FAILED_ENRICHMENT_OUTPUT
asset = context.event.asset.resolve_to_evm_token()
if (
isinstance(context.action_items[-1].asset, EvmToken) is False or
context.action_items[-1].asset.evm_address != context.tx_log.address or # type: ignore[attr-defined] # noqa: E501 mypy fails to understand that due the previous statmenet in the or this check won't be evaluated if the asset isn't a token
context.action_items[-1].amount != context.event.balance.amount
):
return FAILED_ENRICHMENT_OUTPUT
context.event.counterparty = CPT_BALANCER_V2
context.event.event_type = HistoryEventType.TRADE
if asset == context.event.asset:
context.event.event_subtype = HistoryEventSubType.RECEIVE
context.event.notes = f'Receive {context.event.balance.amount} {asset.symbol} from Balancer v2' # noqa: E501
else:
context.event.event_subtype = HistoryEventSubType.SPEND
return TransferEnrichmentOutput(matched_counterparty=CPT_BALANCER_V2)
# -- DecoderInterface methods
def possible_events(self) -> DecoderEventMappingType:
return {
CPT_BALANCER_V2: {
HistoryEventType.TRADE: {
HistoryEventSubType.SPEND: EventCategory.SWAP_OUT,
HistoryEventSubType.RECEIVE: EventCategory.SWAP_IN,
},
},
}
def addresses_to_decoders(self) -> dict[ChecksumEvmAddress, tuple[Any, ...]]:
return {
VAULT_ADDRESS: (self.decode_swap_creation,),
}
def METHOD_NAME(self) -> list[Callable]:
return [
self._maybe_enrich_balancer_v2_transfers,
]
def counterparties(self) -> list[CounterpartyDetails]:
return [CounterpartyDetails(
identifier=CPT_BALANCER_V2,
label=BALANCER_LABEL,
image='balancer.svg',
)]
|
2,482 |
request
|
# Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to bitcoind.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def METHOD_NAME(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self.METHOD_NAME('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self.METHOD_NAME('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
|
2,483 |
pyinit
|
#-------------------------------------------------------------------------------
# PorousStrengthModel
#-------------------------------------------------------------------------------
from PYB11Generator import *
from StrengthModel import *
from StrengthModelAbstractMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralSolidMaterial")
class PorousStrengthModel(StrengthModel):
"""PorousStrengthModel
An implementation of strain-alpha porosity model described in
Wunnemann, Collins, & Melosh, Icarus, 180, 514-527 (2006)
"A strain-based porosity model for use in hydrocode simulations of impacts
and implications for transient crater growth in porous targets"
This model assumes you will provide a solid EOS which will be modified.
The underlying actualy solid EOS should provide the reference density, which
will be treated here as the compacted true solid reference density.
Note this model introduces a new state variable, the distention (alpha), which
the pressure now depends on. This implies our usual definition of P(rho, eps)
now becomes P(rho, eps, alpha). Our EOS interface does not recognize this
this parameter, so we store alpha locally and only allow Field updates of the
pressure (forbidding the single value P lookup the EOS usually allows)."""
PYB11typedefs = """
using Scalar = typename %(Dimension)s::Scalar;
using SymTensor = typename %(Dimension)s::SymTensor;
using ScalarField = Field<%(Dimension)s, Scalar>;
"""
#...........................................................................
# Constructors
def METHOD_NAME(self,
solidStrength = "const StrengthModel<%(Dimension)s>&"):
"Construct with the strength model we're modifying"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11const
def providesSoundSpeed(self):
return "bool"
@PYB11virtual
@PYB11const
def providesBulkModulus(self):
return "bool"
@PYB11virtual
@PYB11const
def soundSpeed(self,
soundSpeed = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specificThermalEnergy = "const Field<%(Dimension)s, Scalar>&",
pressure = "const Field<%(Dimension)s, Scalar>&",
fluidSoundSpeed = "const Field<%(Dimension)s, Scalar>&",
damage = "const Field<%(Dimension)s, SymTensor>&"):
return "void"
@PYB11virtual
@PYB11const
def bulkModulus(self,
bulkModulus = "Field<%(Dimension)s, Scalar>&",
massDensity = "const Field<%(Dimension)s, Scalar>&",
specificThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
@PYB11virtual
@PYB11const
def meltSpecificEnergy(self,
meltSpecificEnergy = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specficThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
@PYB11virtual
@PYB11const
def coldSpecificEnergy(self,
coldSpecificEnergy = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specficThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
#...........................................................................
# Properties
solidStrength = PYB11property("const StrengthModel<%(Dimension)s>&", returnpolicy="reference_internal")
alpha = PYB11property("const Field<%(Dimension)s, Scalar>&", "alpha", "alpha", returnpolicy="reference_internal")
#-------------------------------------------------------------------------------
# Inject abstract interface
#-------------------------------------------------------------------------------
PYB11inject(StrengthModelAbstractMethods, PorousStrengthModel, virtual=True, pure_virtual=False)
|
2,484 |
test or
|
from itertools import islice
import pytest
from pkgcore.ebuild.eapi import get_eapi
from pkgcore.ebuild.ebuild_src import base as ebuild
from pkgcore.restrictions.required_use import find_constraint_satisfaction as solver
def parse(required_use):
o = ebuild(None, "dev-util/diffball-0.1-r1")
object.__setattr__(o, "eapi", get_eapi("8", suppress_unsupported=True))
object.__setattr__(o, "data", {"REQUIRED_USE": required_use})
return o.required_use
def test_simple():
required_use = parse(required_use="bar foo")
assert tuple(solver(required_use, {"bar", "foo"})) == ({"bar": True, "foo": True},)
def test_negative_simple():
required_use = parse(required_use="!bar foo")
assert tuple(solver(required_use, {"bar", "foo"})) == ({"bar": False, "foo": True},)
def test_missing_iuse():
required_use = parse(required_use="!bar foo? ( bar )")
assert tuple(solver(required_use, {"bar"})) == ({"bar": False, "foo": False},)
@pytest.mark.parametrize(
("required_use", "exclude"),
(
("bar? ( foo )", {"bar": True, "foo": False}),
("bar? ( !foo )", {"bar": True, "foo": True}),
("!bar? ( foo )", {"bar": False, "foo": False}),
("!bar? ( !foo )", {"bar": False, "foo": True}),
),
)
def test_condition(required_use, exclude):
required_use = parse(required_use=required_use)
solutions = tuple(solver(required_use, {"bar", "foo"}))
assert len(solutions) == 3
assert exclude not in solutions
@pytest.mark.parametrize(
("required_use", "exclude"),
(
("?? ( bar foo )", {"bar": True, "foo": True}),
("?? ( !bar foo )", {"bar": False, "foo": True}),
("?? ( bar !foo )", {"bar": True, "foo": False}),
("?? ( !bar !foo )", {"bar": False, "foo": False}),
),
)
def test_at_most(required_use, exclude):
required_use = parse(required_use=required_use)
solutions = tuple(solver(required_use, {"bar", "foo"}))
assert len(solutions) == 3
assert exclude not in solutions
@pytest.mark.parametrize(
("required_use", "exclude"),
(
("|| ( bar foo )", {"bar": False, "foo": False}),
("|| ( !bar foo )", {"bar": True, "foo": False}),
("|| ( bar !foo )", {"bar": False, "foo": True}),
("|| ( !bar !foo )", {"bar": True, "foo": True}),
),
)
def METHOD_NAME(required_use, exclude):
required_use = parse(required_use=required_use)
solutions = tuple(solver(required_use, {"bar", "foo"}))
assert len(solutions) == 3
assert exclude not in solutions
@pytest.mark.parametrize(
("required_use", "include"),
(
("bar foo", {"bar": True, "foo": True}),
("!bar foo", {"bar": False, "foo": True}),
("bar !foo", {"bar": True, "foo": False}),
("!bar !foo", {"bar": False, "foo": False}),
),
)
def test_and(required_use, include):
required_use = parse(required_use=required_use)
solutions = tuple(solver(required_use, {"bar", "foo"}))
assert solutions == (include,)
@pytest.mark.parametrize(
("required_use", "iuse", "force_true"),
(
pytest.param(
"test? ( jpeg jpeg2k tiff truetype )",
{
"examples",
"imagequant",
"jpeg",
"jpeg2k",
"lcms",
"test",
"tiff",
"tk",
"truetype",
"webp",
"xcb",
"zlib",
},
{"test"},
id="pillow",
),
pytest.param(
"test? ( cuda gpl? ( openssl? ( bindist ) fdk? ( bindist ) ) ) cuda? ( nvenc ) ^^ ( openssl fdk )",
{"cuda", "gpl", "openssl", "bindist", "fdk", "test", "nvenc"},
{"test", "fdk"},
id="ffmpeg",
),
pytest.param(
"|| ( openssl ( gnutls ssl ) ) ssl? ( ( gnutls openssl ) )",
{"openssl", "gnutls", "ssl"},
{"ssl"},
id="weird",
),
pytest.param(
"|| ( ssl ( gnutls? ( openssl ) ) )",
{"openssl", "gnutls", "ssl"},
{"gnutls"},
id="weird2",
),
),
)
def test_complex_force_true(required_use, iuse, force_true):
required_use = parse(required_use=required_use)
solution = None
for solution in islice(solver(required_use, iuse, force_true=force_true), 20):
assert all(solution[flag] for flag in force_true)
use_flags = tuple(k for k, v in solution.items() if v)
misses = [
restrict
for restrict in required_use.evaluate_depset(use_flags)
if not restrict.match(use_flags)
]
assert not misses
assert solution is not None
@pytest.mark.parametrize(
("required_use", "iuse", "force_false"),
(
pytest.param(
"|| ( openssl ( gnutls ssl ) )",
{"openssl", "gnutls", "ssl"},
{"openssl"},
id="custom",
),
),
)
def test_complex_force_false(required_use, iuse, force_false):
required_use = parse(required_use=required_use)
solution = None
for solution in islice(solver(required_use, iuse, force_false=force_false), 20):
assert all(not solution[flag] for flag in force_false)
use_flags = tuple(k for k, v in solution.items() if v)
misses = [
restrict
for restrict in required_use.evaluate_depset(use_flags)
if not restrict.match(use_flags)
]
assert not misses
assert solution is not None
|
2,485 |
init ui
|
# (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Defines an ArrayViewEditor for displaying 1-d or 2-d arrays of values.
"""
# -- Imports --------------------------------------------------------------
from traits.api import Instance, Property, List, Str, Bool
from ..api import View, Item, TabularEditor, BasicEditorFactory
from ..tabular_adapter import TabularAdapter
from ..toolkit import toolkit_object
from ..toolkit_traits import Font
from ..ui_editor import UIEditor
# -- Tabular Adapter Definition -------------------------------------------
class ArrayViewAdapter(TabularAdapter):
#: Is the array 1D or 2D?
is_2d = Bool(True)
#: Should array rows and columns be transposed:
transpose = Bool(False)
alignment = "right"
index_text = Property()
def _get_index_text(self):
return str(self.row)
def _get_content(self):
if self.is_2d:
return self.item[self.column_id]
return self.item
def get_item(self, object, trait, row):
"""Returns the value of the *object.trait[row]* item."""
if self.is_2d:
if self.transpose:
return getattr(object, trait)[:, row]
return super().get_item(object, trait, row)
return getattr(object, trait)[row]
def len(self, object, trait):
"""Returns the number of items in the specified *object.trait* list."""
if self.transpose:
return getattr(object, trait).shape[1]
return super().len(object, trait)
# Define the actual abstract Traits UI array view editor (each backend should
# implement its own editor that inherits from this class.
class _ArrayViewEditor(UIEditor):
#: Indicate that the editor is scrollable/resizable:
scrollable = True
#: Should column titles be displayed:
show_titles = Bool(False)
#: The tabular adapter being used for the editor view:
adapter = Instance(ArrayViewAdapter)
# -- Private Methods ------------------------------------------------------
def _array_view(self):
"""Return the view used by the editor."""
return View(
Item(
"object.object." + self.name,
id="tabular_editor",
show_label=False,
editor=TabularEditor(
show_titles=self.show_titles,
editable=False,
adapter=self.adapter,
),
),
id="array_view_editor",
resizable=True,
)
def METHOD_NAME(self, parent):
"""Creates the Traits UI for displaying the array."""
# Make sure that the value is an array of the correct shape:
shape = self.value.shape
len_shape = len(shape)
if (len_shape == 0) or (len_shape > 2):
raise ValueError(
"ArrayViewEditor can only display 1D or 2D " "arrays"
)
factory = self.factory
cols = 1
titles = factory.titles
n = len(titles)
self.show_titles = n > 0
is_2d = len_shape == 2
if is_2d:
index = 1
if factory.transpose:
index = 0
cols = shape[index]
if self.show_titles:
if n > cols:
titles = titles[:cols]
elif n < cols:
if (cols % n) == 0:
titles, old_titles, i = [], titles, 0
while len(titles) < cols:
titles.extend(
"%s%d" % (title, i) for title in old_titles
)
i += 1
else:
titles.extend([""] * (cols - n))
else:
titles = ["Data %d" % i for i in range(cols)]
columns = [(title, i) for i, title in enumerate(titles)]
if factory.show_index:
columns.insert(0, ("Index", "index"))
self.adapter = ArrayViewAdapter(
is_2d=is_2d,
columns=columns,
transpose=factory.transpose,
format=factory.format,
font=factory.font,
)
return self.edit_traits(
view="_array_view", parent=parent, kind="subpanel"
)
# Define the ArrayViewEditor class used by client code:
class ArrayViewEditor(BasicEditorFactory):
#: The editor implementation class:
klass = Property()
#: Should an index column be displayed:
show_index = Bool(True)
#: List of (optional) column titles:
titles = List(Str)
#: Should the array be logically transposed:
transpose = Bool(False)
#: The format used to display each array element:
format = Str("%s")
#: The font to use for displaying each array element:
font = Font("Courier 10")
def _get_klass(self):
"""The class used to construct editor objects."""
return toolkit_object("array_view_editor:_ArrayViewEditor")
|
2,486 |
set up
|
"""Integration tests for covidcast's metadata caching."""
# standard library
import json
import unittest
# third party
import mysql.connector
import requests
# first party
from delphi_utils import Nans
from delphi.epidata.client.delphi_epidata import Epidata
import delphi.operations.secrets as secrets
import delphi.epidata.acquisition.covidcast.database as live
from delphi.epidata.maintenance.covidcast_meta_cache_updater import main
# py3tester coverage target (equivalent to `import *`)
__test_target__ = (
'delphi.epidata.acquisition.covidcast.'
'covidcast_meta_cache_updater'
)
# use the local instance of the Epidata API
BASE_URL = 'http://delphi_web_epidata/epidata/api.php'
class CovidcastMetaCacheTests(unittest.TestCase):
"""Tests covidcast metadata caching."""
def METHOD_NAME(self):
"""Perform per-test setup."""
# connect to the `epidata` database
cnx = mysql.connector.connect(
user='user',
password='pass',
host='delphi_database_epidata',
database='covid')
cur = cnx.cursor()
# clear all tables
cur.execute("truncate table epimetric_load")
cur.execute("truncate table epimetric_full")
cur.execute("truncate table epimetric_latest")
cur.execute("truncate table geo_dim")
cur.execute("truncate table signal_dim")
# reset the `covidcast_meta_cache` table (it should always have one row)
cur.execute('update covidcast_meta_cache set timestamp = 0, epidata = "[]"')
cnx.commit()
cur.close()
# make connection and cursor available to test cases
self.cnx = cnx
self.cur = cnx.cursor()
# use the local instance of the epidata database
secrets.db.host = 'delphi_database_epidata'
secrets.db.epi = ('user', 'pass')
# use the local instance of the Epidata API
Epidata.BASE_URL = BASE_URL
Epidata.auth = ('epidata', 'key')
def tearDown(self):
"""Perform per-test teardown."""
self.cur.close()
self.cnx.close()
@staticmethod
def _make_request():
params = {'endpoint': 'covidcast_meta', 'cached': 'true'}
response = requests.get(Epidata.BASE_URL, params=params, auth=Epidata.auth)
response.raise_for_status()
return response.json()
def test_caching(self):
"""Populate, query, cache, query, and verify the cache."""
# insert dummy data
self.cur.execute(f'''
INSERT INTO `signal_dim` (`signal_key_id`, `source`, `signal`)
VALUES
(42, 'src', 'sig');
''')
self.cur.execute(f'''
INSERT INTO `geo_dim` (`geo_key_id`, `geo_type`, `geo_value`)
VALUES
(96, 'state', 'pa'),
(97, 'state', 'wa');
''')
self.cur.execute(f'''
INSERT INTO
`epimetric_latest` (`epimetric_id`, `signal_key_id`, `geo_key_id`, `time_type`,
`time_value`, `value_updated_timestamp`,
`value`, `stderr`, `sample_size`,
`issue`, `lag`, `missing_value`,
`missing_stderr`,`missing_sample_size`)
VALUES
(15, 42, 96, 'day', 20200422,
123, 1, 2, 3, 20200422, 0, {Nans.NOT_MISSING}, {Nans.NOT_MISSING}, {Nans.NOT_MISSING}),
(16, 42, 97, 'day', 20200422,
789, 1, 2, 3, 20200423, 1, {Nans.NOT_MISSING}, {Nans.NOT_MISSING}, {Nans.NOT_MISSING})
''')
self.cnx.commit()
# make sure the live utility is serving something sensible
cvc_database = live.Database()
cvc_database.connect()
epidata1 = cvc_database.compute_covidcast_meta()
cvc_database.disconnect(False)
self.assertEqual(len(epidata1),1)
self.assertEqual(epidata1, [
{
'data_source': 'src',
'signal': 'sig',
'time_type': 'day',
'geo_type': 'state',
'min_time': 20200422,
'max_time': 20200422,
'num_locations': 2,
'last_update': 789,
'min_value': 1,
'max_value': 1,
'mean_value': 1,
'stdev_value': 0,
'max_issue': 20200423,
'min_lag': 0,
'max_lag': 1,
}
])
epidata1={'result':1, 'message':'success', 'epidata':epidata1}
# make sure the API covidcast_meta is still blank, since it only serves
# the cached version and we haven't cached anything yet
epidata2 = Epidata.covidcast_meta()
self.assertEqual(epidata2['result'], -2, json.dumps(epidata2))
# update the cache
args = None
main(args)
# fetch the cached version
epidata3 = Epidata.covidcast_meta()
# cached version should now equal live version
self.assertEqual(epidata1, epidata3)
# insert dummy data timestamped as of now
self.cur.execute('''
update covidcast_meta_cache set
timestamp = UNIX_TIMESTAMP(NOW()),
epidata = '[{"hello": "world"}]'
''')
self.cnx.commit()
# fetch the cached version (manually)
epidata4 = self._make_request()
# make sure the cache was actually served
self.assertEqual(epidata4, {
'result': 1,
'epidata': [{
'hello': 'world',
}],
'message': 'success',
})
# insert dummy data timestamped as 2 hours old
self.cur.execute('''
update covidcast_meta_cache set
timestamp = UNIX_TIMESTAMP(NOW()) - 3600 * 2,
epidata = '[{"hello": "world"}]'
''')
self.cnx.commit()
# fetch the cached version (manually)
epidata5 = self._make_request()
# make sure the cache was returned anyhow
self.assertEqual(epidata4, epidata5)
|
2,487 |
default resolver
|
import sys
import warnings
from . import constants
from .exceptions import AsdfDeprecationWarning
class Resolver:
"""
A class that can be used to map strings with a particular prefix
to another.
"""
def __init__(self, mappings, prefix):
"""
Parameters
----------
mappings : list of tuple or callable
A list of mappings to try, in order.
For each entry:
- If a callable, must take a string and return a remapped
string. Should return `None` if the mapping does not
apply to the input.
- If a tuple, the first item is a string prefix to match.
The second item specifies how to create the new result
in Python string formatting syntax. The following
formatting tokens are available, where ``X`` relates to
the ``prefix`` argument:
- ``{X}``: The entire string passed in.
- ``{X_prefix}``: The prefix of the string that was
matched.
- ``{X_suffix}``: The part of the string following the
prefix.
prefix : str
The prefix to use for the Python formatting token names.
"""
self._mappings = self._validate_mappings(mappings)
self._prefix = prefix
def add_mapping(self, mappings, prefix=""):
# Deprecating this because Resolver is used as part of a dictionary key
# and so shouldn't be mutable.
warnings.warn("The 'add_mapping' method is deprecated.", AsdfDeprecationWarning)
if prefix != self._prefix:
msg = f"Prefix '{prefix}' does not match the Resolver prefix '{self._prefix}'"
raise ValueError(msg)
self._mappings = self._mappings + self._validate_mappings(mappings)
def _perform_mapping(self, mapping, input_):
if callable(mapping):
output = mapping(input_)
if output is not None:
return (sys.maxsize, mapping(input_))
return None
if input_.startswith(mapping[0]):
format_tokens = {
self._prefix: input_,
self._prefix + "_prefix": mapping[0],
self._prefix + "_suffix": input_[len(mapping[0]) :],
}
return len(mapping[0]), mapping[1].format(**format_tokens)
return None
def _validate_mappings(self, mappings):
normalized = []
for mapping in mappings:
if callable(mapping):
normalized.append(mapping)
elif (
isinstance(mapping, (list, tuple))
and len(mapping) == 2
and isinstance(mapping[0], str)
and isinstance(mapping[1], str)
):
normalized.append(tuple(mapping))
else:
msg = f"Invalid mapping '{mapping}'"
raise ValueError(msg)
return tuple(normalized)
def __call__(self, input_):
candidates = [(0, input_)]
for mapping in self._mappings:
output = self._perform_mapping(mapping, input_)
if output is not None:
candidates.append(output)
candidates.sort()
return candidates[-1][1]
def __hash__(self):
return hash(self._mappings)
def __eq__(self, other):
if not isinstance(other, Resolver):
return NotImplemented
return self._mappings == other._mappings
class ResolverChain:
"""
A chain of Resolvers, each of which is called with the previous Resolver's
output to produce the final transformed string.
"""
def __init__(self, *resolvers):
"""
Parameters
----------
*resolvers : list of Resolver
Resolvers to include in the chain.
"""
self._resolvers = tuple(resolvers)
def __call__(self, input_):
for resolver in self._resolvers:
input_ = resolver(input_)
return input_
def __hash__(self):
return hash(self._resolvers)
def __eq__(self, other):
if not isinstance(other, ResolverChain):
return NotImplemented
return self._resolvers == other._resolvers
DEFAULT_URL_MAPPING = []
DEFAULT_TAG_TO_URL_MAPPING = [(constants.STSCI_SCHEMA_TAG_BASE, "http://stsci.edu/schemas/asdf{tag_suffix}")]
def default_url_mapping(uri):
warnings.warn("'default_url_mapping' is deprecated.", AsdfDeprecationWarning)
return default_url_mapping._resolver(uri)
default_url_mapping._resolver = Resolver(DEFAULT_URL_MAPPING, "url")
def default_tag_to_url_mapping(uri):
warnings.warn("'default_tag_to_url_mapping' is deprecated.", AsdfDeprecationWarning)
return default_tag_to_url_mapping._resolver(uri)
default_tag_to_url_mapping._resolver = Resolver(DEFAULT_TAG_TO_URL_MAPPING, "tag")
def METHOD_NAME(uri):
warnings.warn(
"The 'default_resolver(...)' function is deprecated. Use 'asdf.extension.get_default_resolver()(...)' instead.",
AsdfDeprecationWarning,
)
return METHOD_NAME._resolver(uri)
METHOD_NAME._resolver = ResolverChain(default_tag_to_url_mapping._resolver, default_url_mapping._resolver)
|
2,488 |
refresh state
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import ClassVar, Dict, Generic, Optional, Tuple, Type, TypeVar, TYPE_CHECKING, Any, Union
import re
from .item import Item
from .._types import ClientT
__all__ = ('DynamicItem',)
BaseT = TypeVar('BaseT', bound='Item[Any]', covariant=True)
if TYPE_CHECKING:
from typing_extensions import TypeVar, Self
from ..interactions import Interaction
from ..components import Component
from ..enums import ComponentType
from .view import View
V = TypeVar('V', bound='View', covariant=True, default=View)
else:
V = TypeVar('V', bound='View', covariant=True)
class DynamicItem(Generic[BaseT], Item['View']):
"""Represents an item with a dynamic ``custom_id`` that can be used to store state within
that ``custom_id``.
The ``custom_id`` parsing is done using the ``re`` module by passing a ``template``
parameter to the class parameter list.
This item is generated every time the component is dispatched. This means that
any variable that holds an instance of this class will eventually be out of date
and should not be used long term. Their only purpose is to act as a "template"
for the actual dispatched item.
When this item is generated, :attr:`view` is set to a regular :class:`View` instance
from the original message given from the interaction. This means that custom view
subclasses cannot be accessed from this item.
.. versionadded:: 2.4
Parameters
------------
item: :class:`Item`
The item to wrap with dynamic custom ID parsing.
template: Union[:class:`str`, ``re.Pattern``]
The template to use for parsing the ``custom_id``. This can be a string or a compiled
regular expression. This must be passed as a keyword argument to the class creation.
row: Optional[:class:`int`]
The relative row this button belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
Attributes
-----------
item: :class:`Item`
The item that is wrapped with dynamic custom ID parsing.
"""
__item_repr_attributes__: Tuple[str, ...] = (
'item',
'template',
)
__discord_ui_compiled_template__: ClassVar[re.Pattern[str]]
def __init_subclass__(cls, *, template: Union[str, re.Pattern[str]]) -> None:
super().__init_subclass__()
cls.__discord_ui_compiled_template__ = re.compile(template) if isinstance(template, str) else template
if not isinstance(cls.__discord_ui_compiled_template__, re.Pattern):
raise TypeError('template must be a str or a re.Pattern')
def __init__(
self,
item: BaseT,
*,
row: Optional[int] = None,
) -> None:
super().__init__()
self.item: BaseT = item
self.row = row
if not self.item.is_dispatchable():
raise TypeError('item must be dispatchable, e.g. not a URL button')
if not self.template.match(self.custom_id):
raise ValueError(f'item custom_id must match the template {self.template.pattern!r}')
@property
def template(self) -> re.Pattern[str]:
"""``re.Pattern``: The compiled regular expression that is used to parse the ``custom_id``."""
return self.__class__.__discord_ui_compiled_template__
def to_component_dict(self) -> Dict[str, Any]:
return self.item.to_component_dict()
def _refresh_component(self, component: Component) -> None:
self.item._refresh_component(component)
def METHOD_NAME(self, interaction: Interaction, data: Dict[str, Any]) -> None:
self.item.METHOD_NAME(interaction, data)
@classmethod
def from_component(cls: Type[Self], component: Component) -> Self:
raise TypeError('Dynamic items cannot be created from components')
@property
def type(self) -> ComponentType:
return self.item.type
def is_dispatchable(self) -> bool:
return self.item.is_dispatchable()
def is_persistent(self) -> bool:
return True
@property
def custom_id(self) -> str:
""":class:`str`: The ID of the dynamic item that gets received during an interaction."""
return self.item.custom_id # type: ignore # This attribute exists for dispatchable items
@custom_id.setter
def custom_id(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError('custom_id must be a str')
if not self.template.match(value):
raise ValueError(f'custom_id must match the template {self.template.pattern!r}')
self.item.custom_id = value # type: ignore # This attribute exists for dispatchable items
self._provided_custom_id = True
@property
def row(self) -> Optional[int]:
return self.item._row
@row.setter
def row(self, value: Optional[int]) -> None:
self.item.row = value
@property
def width(self) -> int:
return self.item.width
@classmethod
async def from_custom_id(
cls: Type[Self], interaction: Interaction[ClientT], item: Item[Any], match: re.Match[str], /
) -> Self:
"""|coro|
A classmethod that is called when the ``custom_id`` of a component matches the
``template`` of the class. This is called when the component is dispatched.
It must return a new instance of the :class:`DynamicItem`.
Subclasses *must* implement this method.
Exceptions raised in this method are logged and ignored.
.. warning::
This method is called before the callback is dispatched, therefore
it means that it is subject to the same timing restrictions as the callback.
Ergo, you must reply to an interaction within 3 seconds of it being
dispatched.
Parameters
------------
interaction: :class:`~discord.Interaction`
The interaction that the component belongs to.
item: :class:`~discord.ui.Item`
The base item that is being dispatched.
match: ``re.Match``
The match object that was created from the ``template``
matching the ``custom_id``.
Returns
--------
:class:`DynamicItem`
The new instance of the :class:`DynamicItem` with information
from the ``match`` object.
"""
raise NotImplementedError
|
2,489 |
get function defaults
|
import builtins
import operator
import types
import unittest
from _typeshed import IdentityFunction, Unused, _KT_contra, _VT_co
from builtins import next as next
from collections.abc import Callable, ItemsView, Iterable, Iterator as _Iterator, KeysView, Mapping, ValuesView
from functools import wraps as wraps
from importlib.util import spec_from_loader as spec_from_loader
from io import BytesIO as BytesIO, StringIO as StringIO
from re import Pattern
from typing import Any, AnyStr, NoReturn, Protocol, TypeVar, overload
from typing_extensions import Literal
from six import moves as moves
# TODO: We should switch to the _typeshed version of SupportsGetItem
# once mypy updates its vendored copy of typeshed and makes a new release
class _SupportsGetItem(Protocol[_KT_contra, _VT_co]):
def __contains__(self, __x: Any) -> bool: ...
def __getitem__(self, __key: _KT_contra) -> _VT_co: ...
_T = TypeVar("_T")
_K = TypeVar("_K")
_V = TypeVar("_V")
__author__: str
__version__: str
PY2: Literal[False]
PY3: Literal[True]
PY34: Literal[True]
string_types: tuple[type[str]]
integer_types: tuple[type[int]]
class_types: tuple[type[type]]
text_type = str
binary_type = bytes
MAXSIZE: int
callable = builtins.callable
def get_unbound_function(unbound: types.FunctionType) -> types.FunctionType: ...
create_bound_method = types.MethodType
def create_unbound_method(func: types.FunctionType, cls: type) -> types.FunctionType: ...
Iterator = object
def get_method_function(meth: types.MethodType) -> types.FunctionType: ...
def get_method_self(meth: types.MethodType) -> object: ...
def get_function_closure(fun: types.FunctionType) -> tuple[types._Cell, ...] | None: ...
def get_function_code(fun: types.FunctionType) -> types.CodeType: ...
def METHOD_NAME(fun: types.FunctionType) -> tuple[Any, ...] | None: ...
def get_function_globals(fun: types.FunctionType) -> dict[str, Any]: ...
def iterkeys(d: Mapping[_K, Any]) -> _Iterator[_K]: ...
def itervalues(d: Mapping[Any, _V]) -> _Iterator[_V]: ...
def iteritems(d: Mapping[_K, _V]) -> _Iterator[tuple[_K, _V]]: ...
def viewkeys(d: Mapping[_K, Any]) -> KeysView[_K]: ...
def viewvalues(d: Mapping[Any, _V]) -> ValuesView[_V]: ...
def viewitems(d: Mapping[_K, _V]) -> ItemsView[_K, _V]: ...
def b(s: str) -> bytes: ...
def u(s: str) -> str: ...
unichr = chr
def int2byte(i: int) -> bytes: ...
# Should be `byte2int: operator.itemgetter[int]`. But a bug in mypy prevents using TypeVar in itemgetter.__call__
def byte2int(obj: _SupportsGetItem[int, _T]) -> _T: ...
indexbytes = operator.getitem
iterbytes = iter
def assertCountEqual(self: unittest.TestCase, first: Iterable[_T], second: Iterable[_T], msg: str | None = ...) -> None: ...
@overload
def assertRaisesRegex(self: unittest.TestCase, msg: str | None = ...) -> Any: ...
@overload
def assertRaisesRegex(self: unittest.TestCase, callable_obj: Callable[..., object], *args: Any, **kwargs: Any) -> Any: ...
def assertRegex(self: unittest.TestCase, text: AnyStr, expected_regex: AnyStr | Pattern[AnyStr], msg: Any = ...) -> None: ...
def assertNotRegex(self: unittest.TestCase, text: AnyStr, expected_regex: AnyStr | Pattern[AnyStr], msg: Any = ...) -> None: ...
exec_ = exec
def reraise(tp: type[BaseException] | None, value: BaseException | None, tb: types.TracebackType | None = None) -> NoReturn: ...
def raise_from(value: BaseException | type[BaseException], from_value: BaseException | None) -> NoReturn: ...
print_ = print
def with_metaclass(meta: type, *bases: type) -> type: ...
def add_metaclass(metaclass: type) -> IdentityFunction: ...
def ensure_binary(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> bytes: ...
def ensure_str(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def ensure_text(s: bytes | str, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def python_2_unicode_compatible(klass: _T) -> _T: ...
class _LazyDescr:
name: str
def __init__(self, name: str) -> None: ...
def __get__(self, obj: object, tp: Unused) -> Any: ...
class MovedModule(_LazyDescr):
mod: str
def __init__(self, name: str, old: str, new: str | None = None) -> None: ...
def __getattr__(self, attr: str) -> Any: ...
class MovedAttribute(_LazyDescr):
mod: str
attr: str
def __init__(
self, name: str, old_mod: str, new_mod: str, old_attr: str | None = None, new_attr: str | None = None
) -> None: ...
def add_move(move: MovedModule | MovedAttribute) -> None: ...
def remove_move(name: str) -> None: ...
|
2,490 |
refresh columns
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2022-2023 NV Access Limited
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from typing import (
Optional,
)
import wx
from gui import (
guiHelper,
nvdaControls,
)
from gui.dpiScalingHelper import DpiScalingHelperMixinWithoutInit
from logHandler import log
from .actions import _ActionsContextMenu
from ..viewModels.addonList import AddonListVM
class AddonVirtualList(
nvdaControls.AutoWidthColumnListCtrl,
DpiScalingHelperMixinWithoutInit,
):
def __init__(
self,
parent: wx.Window,
addonsListVM: AddonListVM,
actionsContextMenu: _ActionsContextMenu,
):
super().__init__(
parent,
style=(
wx.LC_REPORT # Single or multicolumn report view, with optional header.
| wx.LC_VIRTUAL # The application provides items text on demand. May only be used with LC_REPORT.
| wx.LC_SINGLE_SEL # Single selection (default is multiple).
| wx.LC_HRULES # Draws light horizontal rules between rows in report mode.
| wx.LC_VRULES # Draws light vertical rules between columns in report mode.
),
autoSizeColumn=1,
)
self._addonsListVM = addonsListVM
self._actionsContextMenu = actionsContextMenu
self.SetMinSize(self.scaleSize((500, 500)))
self.METHOD_NAME()
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected)
self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick)
self.Bind(event=wx.EVT_CONTEXT_MENU, handler=self._popupContextMenuFromList)
self.SetItemCount(addonsListVM.getCount())
selIndex = self._addonsListVM.getSelectedIndex()
if selIndex is not None:
self.Select(selIndex)
self.Focus(selIndex)
self._addonsListVM.itemUpdated.register(self._itemDataUpdated)
self._addonsListVM.updated.register(self._doRefresh)
def METHOD_NAME(self):
self.ClearAll()
for colIndex, col in enumerate(self._addonsListVM.presentedFields):
self.InsertColumn(colIndex, col.displayString, width=self.scaleSize(col.width))
self.Layout()
def _getListSelectionPosition(self) -> Optional[wx.Position]:
firstSelectedIndex: int = self.GetFirstSelected()
if firstSelectedIndex < 0:
return None
itemRect: wx.Rect = self.GetItemRect(firstSelectedIndex)
return itemRect.GetBottomLeft()
def _popupContextMenuFromList(self, evt: wx.ContextMenuEvent):
listSelectionPosition = self._getListSelectionPosition()
if listSelectionPosition is None:
return
eventPosition: wx.Position = evt.GetPosition()
if eventPosition == wx.DefaultPosition:
# keyboard triggered context menu (due to "applications" key)
# don't have position set. It must be fetched from the selected item.
self._actionsContextMenu.popupContextMenuFromPosition(self, listSelectionPosition)
else:
# Mouse (right click) triggered context menu.
# In this case the menu is positioned better with GetPopupMenuSelectionFromUser.
self._actionsContextMenu.popupContextMenuFromPosition(self)
def _itemDataUpdated(self, index: int):
log.debug(f"index: {index}")
self.RefreshItem(index)
def OnItemSelected(self, evt: wx.ListEvent):
newIndex = evt.GetIndex()
log.debug(f"item selected: {newIndex}")
self._addonsListVM.setSelection(index=newIndex)
def OnItemActivated(self, evt: wx.ListEvent):
position = self._getListSelectionPosition()
self._actionsContextMenu.popupContextMenuFromPosition(self, position)
log.debug(f"item activated: {evt.GetIndex()}")
def OnItemDeselected(self, evt: wx.ListEvent):
log.debug(f"item deselected")
self._addonsListVM.setSelection(None)
def OnGetItemText(self, itemIndex: int, colIndex: int) -> str:
dataItem = self._addonsListVM.getAddonFieldText(
itemIndex,
self._addonsListVM.presentedFields[colIndex]
)
if dataItem is None:
# Failed to get dataItem, index may have been lost in refresh.
return ''
return str(dataItem)
def OnColClick(self, evt: wx.ListEvent):
colIndex = evt.GetColumn()
log.debug(f"col clicked: {colIndex}")
self._addonsListVM.setSortField(self._addonsListVM.presentedFields[colIndex])
def _doRefresh(self):
with guiHelper.autoThaw(self):
newCount = self._addonsListVM.getCount()
self.SetItemCount(newCount)
self._refreshSelection()
def _refreshSelection(self):
selected = self.GetFirstSelected()
newSelectedIndex = self._addonsListVM.getSelectedIndex()
log.debug(f"_refreshSelection {newSelectedIndex}")
if newSelectedIndex is not None:
self.Select(newSelectedIndex)
self.Focus(newSelectedIndex)
# wx.ListCtrl doesn't send a selection event if the index hasn't changed,
# however, the item at that index may have changed as a result of filtering.
# To ensure parent dialogs are notified, explicitly send an event.
if selected == newSelectedIndex:
evt = wx.ListEvent(wx.wxEVT_LIST_ITEM_SELECTED, self.GetId())
evt.SetIndex(newSelectedIndex)
evt.SetClientObject(self._addonsListVM.getSelection())
self.GetEventHandler().ProcessEvent(evt)
elif newSelectedIndex is None:
# wx.ListCtrl doesn't send a deselection event when the list is emptied.
# To ensure parent dialogs are notified, explicitly send an event.
self.Select(selected, on=0)
evt = wx.ListEvent(wx.wxEVT_LIST_ITEM_DESELECTED, self.GetId())
evt.SetIndex(-1)
evt.SetClientObject(None)
self.GetEventHandler().ProcessEvent(evt)
|
2,491 |
close
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import ServiceBusManagementClientConfiguration
from .operations import (
DisasterRecoveryConfigsOperations,
EventHubsOperations,
MigrationConfigsOperations,
NamespacesOperations,
Operations,
PremiumMessagingRegionsOperations,
QueuesOperations,
RegionsOperations,
RulesOperations,
SubscriptionsOperations,
TopicsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ServiceBusManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Azure Service Bus client.
:ivar namespaces: NamespacesOperations operations
:vartype namespaces: azure.mgmt.servicebus.v2017_04_01.operations.NamespacesOperations
:ivar queues: QueuesOperations operations
:vartype queues: azure.mgmt.servicebus.v2017_04_01.operations.QueuesOperations
:ivar topics: TopicsOperations operations
:vartype topics: azure.mgmt.servicebus.v2017_04_01.operations.TopicsOperations
:ivar disaster_recovery_configs: DisasterRecoveryConfigsOperations operations
:vartype disaster_recovery_configs:
azure.mgmt.servicebus.v2017_04_01.operations.DisasterRecoveryConfigsOperations
:ivar event_hubs: EventHubsOperations operations
:vartype event_hubs: azure.mgmt.servicebus.v2017_04_01.operations.EventHubsOperations
:ivar migration_configs: MigrationConfigsOperations operations
:vartype migration_configs:
azure.mgmt.servicebus.v2017_04_01.operations.MigrationConfigsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.servicebus.v2017_04_01.operations.Operations
:ivar premium_messaging_regions: PremiumMessagingRegionsOperations operations
:vartype premium_messaging_regions:
azure.mgmt.servicebus.v2017_04_01.operations.PremiumMessagingRegionsOperations
:ivar rules: RulesOperations operations
:vartype rules: azure.mgmt.servicebus.v2017_04_01.operations.RulesOperations
:ivar regions: RegionsOperations operations
:vartype regions: azure.mgmt.servicebus.v2017_04_01.operations.RegionsOperations
:ivar subscriptions: SubscriptionsOperations operations
:vartype subscriptions: azure.mgmt.servicebus.v2017_04_01.operations.SubscriptionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2017-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ServiceBusManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.namespaces = NamespacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.queues = QueuesOperations(self._client, self._config, self._serialize, self._deserialize)
self.topics = TopicsOperations(self._client, self._config, self._serialize, self._deserialize)
self.disaster_recovery_configs = DisasterRecoveryConfigsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.event_hubs = EventHubsOperations(self._client, self._config, self._serialize, self._deserialize)
self.migration_configs = MigrationConfigsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.premium_messaging_regions = PremiumMessagingRegionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.rules = RulesOperations(self._client, self._config, self._serialize, self._deserialize)
self.regions = RegionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.subscriptions = SubscriptionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME()
def __enter__(self) -> "ServiceBusManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
2,492 |
lowest mu
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
lib_acquisition_function.py
"""
import sys
import numpy
from scipy.stats import norm
from scipy.optimize import minimize
from . import lib_data
def next_hyperparameter_expected_improvement(fun_prediction,
fun_prediction_args,
x_bounds, x_types,
samples_y_aggregation,
minimize_starting_points,
minimize_constraints_fun=None):
"""
"Expected Improvement" acquisition function
"""
best_x = None
best_acquisition_value = None
x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]
x_bounds_minmax = numpy.array(x_bounds_minmax)
for starting_point in numpy.array(minimize_starting_points):
res = minimize(fun=_expected_improvement,
x0=starting_point.reshape(1, -1),
bounds=x_bounds_minmax,
method="L-BFGS-B",
args=(fun_prediction,
fun_prediction_args,
x_bounds,
x_types,
samples_y_aggregation,
minimize_constraints_fun))
if (best_acquisition_value is None) or \
(res.fun < best_acquisition_value):
res.x = numpy.ndarray.tolist(res.x)
res.x = lib_data.match_val_type(res.x, x_bounds, x_types)
if (minimize_constraints_fun is None) or \
(minimize_constraints_fun(res.x) is True):
best_acquisition_value = res.fun
best_x = res.x
outputs = None
if best_x is not None:
mu, sigma = fun_prediction(best_x, *fun_prediction_args)
outputs = {'hyperparameter': best_x, 'expected_mu': mu,
'expected_sigma': sigma, 'acquisition_func': "ei"}
return outputs
def _expected_improvement(x, fun_prediction, fun_prediction_args,
x_bounds, x_types, samples_y_aggregation,
minimize_constraints_fun):
# This is only for step-wise optimization
x = lib_data.match_val_type(x, x_bounds, x_types)
expected_improvement = sys.maxsize
if (minimize_constraints_fun is None) or (
minimize_constraints_fun(x) is True):
mu, sigma = fun_prediction(x, *fun_prediction_args)
loss_optimum = min(samples_y_aggregation)
scaling_factor = -1
# In case sigma equals zero
with numpy.errstate(divide="ignore"):
Z = scaling_factor * (mu - loss_optimum) / sigma
expected_improvement = scaling_factor * (mu - loss_optimum) * \
norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement = 0.0 if sigma == 0.0 else expected_improvement
# We want expected_improvement to be as large as possible
# (i.e., as small as possible for minimize(...))
expected_improvement = -1 * expected_improvement
return expected_improvement
def next_hyperparameter_lowest_confidence(fun_prediction,
fun_prediction_args,
x_bounds, x_types,
minimize_starting_points,
minimize_constraints_fun=None):
"""
"Lowest Confidence" acquisition function
"""
best_x = None
best_acquisition_value = None
x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]
x_bounds_minmax = numpy.array(x_bounds_minmax)
for starting_point in numpy.array(minimize_starting_points):
res = minimize(fun=_lowest_confidence,
x0=starting_point.reshape(1, -1),
bounds=x_bounds_minmax,
method="L-BFGS-B",
args=(fun_prediction,
fun_prediction_args,
x_bounds,
x_types,
minimize_constraints_fun))
if (best_acquisition_value) is None or (
res.fun < best_acquisition_value):
res.x = numpy.ndarray.tolist(res.x)
res.x = lib_data.match_val_type(res.x, x_bounds, x_types)
if (minimize_constraints_fun is None) or (
minimize_constraints_fun(res.x) is True):
best_acquisition_value = res.fun
best_x = res.x
outputs = None
if best_x is not None:
mu, sigma = fun_prediction(best_x, *fun_prediction_args)
outputs = {'hyperparameter': best_x, 'expected_mu': mu,
'expected_sigma': sigma, 'acquisition_func': "lc"}
return outputs
def _lowest_confidence(x, fun_prediction, fun_prediction_args,
x_bounds, x_types, minimize_constraints_fun):
# This is only for step-wise optimization
x = lib_data.match_val_type(x, x_bounds, x_types)
ci = sys.maxsize
if (minimize_constraints_fun is None) or (
minimize_constraints_fun(x) is True):
mu, sigma = fun_prediction(x, *fun_prediction_args)
ci = (sigma * 1.96 * 2) / mu
# We want ci to be as large as possible
# (i.e., as small as possible for minimize(...),
# because this would mean lowest confidence
ci = -1 * ci
return ci
def next_hyperparameter_lowest_mu(fun_prediction,
fun_prediction_args,
x_bounds, x_types,
minimize_starting_points,
minimize_constraints_fun=None):
"""
"Lowest Mu" acquisition function
"""
best_x = None
best_acquisition_value = None
x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]
x_bounds_minmax = numpy.array(x_bounds_minmax)
for starting_point in numpy.array(minimize_starting_points):
res = minimize(fun=METHOD_NAME,
x0=starting_point.reshape(1, -1),
bounds=x_bounds_minmax,
method="L-BFGS-B",
args=(fun_prediction, fun_prediction_args,
x_bounds, x_types, minimize_constraints_fun))
if (best_acquisition_value is None) or (
res.fun < best_acquisition_value):
res.x = numpy.ndarray.tolist(res.x)
res.x = lib_data.match_val_type(res.x, x_bounds, x_types)
if (minimize_constraints_fun is None) or (
minimize_constraints_fun(res.x) is True):
best_acquisition_value = res.fun
best_x = res.x
outputs = None
if best_x is not None:
mu, sigma = fun_prediction(best_x, *fun_prediction_args)
outputs = {'hyperparameter': best_x, 'expected_mu': mu,
'expected_sigma': sigma, 'acquisition_func': "lm"}
return outputs
def METHOD_NAME(x, fun_prediction, fun_prediction_args,
x_bounds, x_types, minimize_constraints_fun):
"""
Calculate the lowest mu
"""
# This is only for step-wise optimization
x = lib_data.match_val_type(x, x_bounds, x_types)
mu = sys.maxsize
if (minimize_constraints_fun is None) or (
minimize_constraints_fun(x) is True):
mu, _ = fun_prediction(x, *fun_prediction_args)
return mu
|
2,493 |
test from address w
|
import unittest
from ctypes import *
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = range(15, 25)
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = range(42, 42+alen)
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray("a", "b", "c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], "a")
self.assertEqual(ca[1], "b")
self.assertEqual(ca[2], "c")
self.assertEqual(ca[-3], "a")
self.assertEqual(ca[-2], "b")
self.assertEqual(ca[-1], "c")
self.assertEqual(len(ca), 3)
# slicing is now supported, but not extended slicing (3-argument)!
from operator import getslice, delitem
self.assertRaises(TypeError, getslice, ca, 0, 1, -1)
# cannot delete items
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4))
self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer("foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def METHOD_NAME(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertTrue(t1 is t2)
if __name__ == '__main__':
unittest.main()
|
2,494 |
check sudo
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from typing import Optional, Dict, Any
import logging
import subprocess
import psutil
logger = logging.getLogger(__name__)
_BOOL_DICT = {'no': 0, 'yes': 1}
class NASLCli:
"""Class for calling nasl-cli executable"""
@staticmethod
def load_vts_into_redis() -> bool:
"""Loads all VTs into the redis database"""
try:
subprocess.check_call(
['nasl-cli', 'feed', 'update'], stdout=subprocess.DEVNULL
)
return True
except (subprocess.SubprocessError, OSError) as err:
logger.error('nasl-cli failed to load VTs. %s', err)
return False
class Openvas:
"""Class for calling the openvas executable"""
@staticmethod
def _get_version_output() -> Optional[str]:
try:
result = subprocess.check_output(
['openvas', '-V'], stderr=subprocess.STDOUT
)
return result.decode('ascii')
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'Is was not possible to call openvas to get the version '
'information. Reason %s',
e,
)
return None
@staticmethod
def check() -> bool:
"""Checks that openvas command line tool is found and
is executable.
"""
try:
subprocess.check_call(['openvas', '-V'], stdout=subprocess.DEVNULL)
return True
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'It was not possible to call the openvas executable. Reason %s',
e,
)
return False
@staticmethod
def METHOD_NAME() -> bool:
"""Checks if openvas can be run with sudo"""
try:
subprocess.check_call(
['sudo', '-n', 'openvas', '-s'], stdout=subprocess.DEVNULL
)
return True
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'It was not possible to call openvas with sudo. '
'The scanner will run as non-root user. Reason %s',
e,
)
return False
@classmethod
def get_version(cls) -> Optional[str]:
"""Returns the version string of the openvas executable"""
result = cls._get_version_output()
if result is None:
return None
version = result.split('\n')
if version[0].find('OpenVAS') < 0:
return None
return version[0]
@staticmethod
def get_settings() -> Dict[str, Any]:
"""Parses the current settings of the openvas executable"""
param_list = dict()
try:
result = subprocess.check_output(['openvas', '-s'])
result = result.decode('ascii')
except (subprocess.SubprocessError, OSError, UnicodeDecodeError) as e:
logger.warning('Could not gather openvas settings. Reason %s', e)
return param_list
for conf in result.split('\n'):
if not conf:
continue
try:
key, value = conf.split('=', 1)
except ValueError:
logger.warning("Could not parse openvas setting '%s'", conf)
continue
key = key.strip()
value = value.strip()
if value:
value = _BOOL_DICT.get(value, value)
param_list[key] = value
return param_list
@staticmethod
def load_vts_into_redis() -> bool:
"""Loads all VTs into the redis database"""
logger.debug('Loading VTs into Redis DB...')
try:
subprocess.check_call(
['openvas', '--update-vt-info'], stdout=subprocess.DEVNULL
)
logger.debug('Finished loading VTs into Redis DB')
return True
except (subprocess.SubprocessError, OSError) as err:
logger.error('OpenVAS Scanner failed to load VTs. %s', err)
return False
@staticmethod
def start_scan(
scan_id: str,
sudo: bool = False,
niceness: int = None,
) -> Optional[psutil.Popen]:
"""Calls openvas to start a scan process"""
cmd = []
if niceness:
cmd += ['nice', '-n', niceness]
logger.debug("Starting scan with niceness %s", niceness)
if sudo:
cmd += ['sudo', '-n']
cmd += ['openvas', '--scan-start', scan_id]
try:
return psutil.Popen(cmd, shell=False)
except (psutil.Error, OSError, FileNotFoundError) as e:
# the command is not available
logger.warning("Could not start scan process. Reason %s", e)
return None
@staticmethod
def stop_scan(scan_id: str, sudo: bool = False) -> bool:
"""Calls openvas to stop a scan process"""
cmd = []
if sudo:
cmd += ['sudo', '-n']
cmd += ['openvas', '--scan-stop', scan_id]
try:
subprocess.check_call(cmd)
return True
except (subprocess.SubprocessError, OSError) as e:
# the command is not available
logger.warning(
'Not possible to stop scan: %s. Reason %s',
scan_id,
e,
)
return False
|
2,495 |
test cannot reserve seats waiting list if
|
from datetime import timedelta
import pytest
from django.utils.timezone import localtime
from rest_framework import status
from events.tests.utils import versioned_reverse as reverse
from registrations.models import SeatReservationCode
from registrations.tests.test_seatsreservation_post import assert_reserve_seats
def update_seats_reservation(api_client, pk, reservation_data):
detail_url = reverse("seatreservationcode-detail", kwargs={"pk": pk})
response = api_client.put(detail_url, reservation_data, format="json")
return response
def assert_update_seats_reservation(api_client, pk, reservation_data):
response = update_seats_reservation(api_client, pk, reservation_data)
assert response.status_code == status.HTTP_200_OK
assert response.data["seats"] == reservation_data["seats"]
return response
@pytest.mark.django_db
def test_update_seats_reservation(api_client, event, registration):
registration.maximum_attendee_capacity = 2
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 2,
"registration": registration.id,
"code": reservation.code,
}
assert_update_seats_reservation(api_client, reservation.id, reservation_data)
@pytest.mark.django_db
def test_seats_amount_has_not_limit_if_maximum_attendee_capacity_is_none(
api_client, event, registration
):
registration.maximum_attendee_capacity = None
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 10000,
"registration": registration.id,
"code": reservation.code,
}
assert_update_seats_reservation(api_client, reservation.id, reservation_data)
@pytest.mark.django_db
def test_seats_value_is_required(api_client, event, registration):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"registration": registration.id,
"code": reservation.code,
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["seats"][0].code == "required"
@pytest.mark.django_db
def test_code_value_is_required(api_client, event, registration):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 1,
"registration": registration.id,
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["code"][0].code == "required"
@pytest.mark.django_db
def test_code_value_must_match(api_client, event, registration):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 1,
"registration": registration.id,
"code": "invalid_code",
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["code"][0] == "The value doesn't match."
@pytest.mark.django_db
def test_cannot_update_registration(api_client, event, registration, registration2):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 1,
"code": reservation.code,
"registration": registration2.id,
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.data["registration"] == registration.id
@pytest.mark.django_db
def test_cannot_update_expired_reservation(api_client, event, registration):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation.timestamp = localtime() - timedelta(days=1)
reservation.save()
reservation_data = {
"seats": 1,
"registration": registration.id,
"code": reservation.code,
}
response = assert_reserve_seats(api_client, reservation_data)
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_409_CONFLICT
assert response.data["detail"] == "Cannot update expired seats reservation."
@pytest.mark.django_db
def test_cannot_update_timestamp(api_client, event, registration):
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
timestamp = reservation.timestamp
reservation_data = {
"seats": 2,
"registration": registration.id,
"code": reservation.code,
"timestamp": localtime() + timedelta(minutes=15),
}
assert_update_seats_reservation(api_client, reservation.id, reservation_data)
updated_reservation = SeatReservationCode.objects.get(id=reservation.id)
assert updated_reservation.seats == 2
assert updated_reservation.timestamp == timestamp
@pytest.mark.django_db
def test_cannot_reserve_seats_if_there_are_not_enough_seats_available(
api_client, event, registration
):
registration.maximum_attendee_capacity = 2
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 3,
"registration": registration.id,
"code": reservation.code,
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["seats"][0] == "Not enough seats available. Capacity left: 2."
@pytest.mark.django_db
def test_update_seats_reservation_in_waiting_list(
api_client, event, registration, signup, signup2
):
registration.maximum_attendee_capacity = 2
registration.waiting_list_capacity = 2
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 2,
"registration": registration.id,
"code": reservation.code,
}
response = assert_update_seats_reservation(
api_client, reservation.id, reservation_data
)
assert response.data["in_waitlist"] == True
@pytest.mark.django_db
def test_waiting_list_seats_amount_has_not_limit_if_waiting_list_capacity_is_none(
api_client, event, registration, signup, signup2
):
registration.maximum_attendee_capacity = 2
registration.waiting_list_capacity = None
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 10000,
"registration": registration.id,
"code": reservation.code,
}
response = assert_update_seats_reservation(
api_client, reservation.id, reservation_data
)
assert response.data["in_waitlist"] == True
@pytest.mark.django_db
def METHOD_NAME(
api_client, event, registration, signup, signup2
):
registration.maximum_attendee_capacity = 2
registration.waiting_list_capacity = 2
registration.save()
reservation = SeatReservationCode.objects.create(seats=1, registration=registration)
reservation_data = {
"seats": 3,
"registration": registration.id,
"code": reservation.code,
}
response = update_seats_reservation(api_client, reservation.id, reservation_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert (
response.data["seats"][0]
== "Not enough capacity in the waiting list. Capacity left: 2."
)
|
2,496 |
acceptance fn divide
|
# Data Parallel Control (dpctl)
#
# Copyright 2020-2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dpctl.tensor as dpt
import dpctl.tensor._tensor_impl as ti
def _all_data_types(_fp16, _fp64):
if _fp64:
if _fp16:
return [
dpt.bool,
dpt.int8,
dpt.uint8,
dpt.int16,
dpt.uint16,
dpt.int32,
dpt.uint32,
dpt.int64,
dpt.uint64,
dpt.float16,
dpt.float32,
dpt.float64,
dpt.complex64,
dpt.complex128,
]
else:
return [
dpt.bool,
dpt.int8,
dpt.uint8,
dpt.int16,
dpt.uint16,
dpt.int32,
dpt.uint32,
dpt.int64,
dpt.uint64,
dpt.float32,
dpt.float64,
dpt.complex64,
dpt.complex128,
]
else:
if _fp16:
return [
dpt.bool,
dpt.int8,
dpt.uint8,
dpt.int16,
dpt.uint16,
dpt.int32,
dpt.uint32,
dpt.int64,
dpt.uint64,
dpt.float16,
dpt.float32,
dpt.complex64,
]
else:
return [
dpt.bool,
dpt.int8,
dpt.uint8,
dpt.int16,
dpt.uint16,
dpt.int32,
dpt.uint32,
dpt.int64,
dpt.uint64,
dpt.float32,
dpt.complex64,
]
def _is_maximal_inexact_type(dt: dpt.dtype, _fp16: bool, _fp64: bool):
"""
Return True if data type `dt` is the
maximal size inexact data type
"""
if _fp64:
return dt in [dpt.float64, dpt.complex128]
return dt in [dpt.float32, dpt.complex64]
def _can_cast(from_: dpt.dtype, to_: dpt.dtype, _fp16: bool, _fp64: bool):
"""
Can `from_` be cast to `to_` safely on a device with
fp16 and fp64 aspects as given?
"""
can_cast_v = dpt.can_cast(from_, to_) # ask NumPy
if _fp16 and _fp64:
return can_cast_v
if not can_cast_v:
if (
from_.kind in "biu"
and to_.kind in "fc"
and _is_maximal_inexact_type(to_, _fp16, _fp64)
):
return True
return can_cast_v
def _to_device_supported_dtype(dt, dev):
has_fp16 = dev.has_aspect_fp16
has_fp64 = dev.has_aspect_fp64
if has_fp64:
if not has_fp16:
if dt is dpt.float16:
return dpt.float32
else:
if dt is dpt.float64:
return dpt.float32
elif dt is dpt.complex128:
return dpt.complex64
if not has_fp16 and dt is dpt.float16:
return dpt.float32
return dt
def _find_buf_dtype(arg_dtype, query_fn, sycl_dev):
res_dt = query_fn(arg_dtype)
if res_dt:
return None, res_dt
_fp16 = sycl_dev.has_aspect_fp16
_fp64 = sycl_dev.has_aspect_fp64
all_dts = _all_data_types(_fp16, _fp64)
for buf_dt in all_dts:
if _can_cast(arg_dtype, buf_dt, _fp16, _fp64):
res_dt = query_fn(buf_dt)
if res_dt:
return buf_dt, res_dt
return None, None
def _get_device_default_dtype(dt_kind, sycl_dev):
if dt_kind == "b":
return dpt.dtype(ti.default_device_bool_type(sycl_dev))
elif dt_kind == "i":
return dpt.dtype(ti.default_device_int_type(sycl_dev))
elif dt_kind == "u":
return dpt.dtype(ti.default_device_int_type(sycl_dev).upper())
elif dt_kind == "f":
return dpt.dtype(ti.default_device_fp_type(sycl_dev))
elif dt_kind == "c":
return dpt.dtype(ti.default_device_complex_type(sycl_dev))
raise RuntimeError
def _acceptance_fn_default(
arg1_dtype, arg2_dtype, ret_buf1_dt, ret_buf2_dt, res_dt, sycl_dev
):
return True
def METHOD_NAME(
arg1_dtype, arg2_dtype, ret_buf1_dt, ret_buf2_dt, res_dt, sycl_dev
):
# both are being promoted, if the kind of result is
# different than the kind of original input dtypes,
# we use default dtype for the resulting kind.
# This covers, e.g. (array_dtype_i1 / array_dtype_u1)
# result of which in divide is double (in NumPy), but
# regular type promotion rules peg at float16
if (ret_buf1_dt.kind != arg1_dtype.kind) and (
ret_buf2_dt.kind != arg2_dtype.kind
):
default_dt = _get_device_default_dtype(res_dt.kind, sycl_dev)
if res_dt == default_dt:
return True
else:
return False
else:
return True
def _find_buf_dtype2(arg1_dtype, arg2_dtype, query_fn, sycl_dev, acceptance_fn):
res_dt = query_fn(arg1_dtype, arg2_dtype)
if res_dt:
return None, None, res_dt
_fp16 = sycl_dev.has_aspect_fp16
_fp64 = sycl_dev.has_aspect_fp64
all_dts = _all_data_types(_fp16, _fp64)
for buf1_dt in all_dts:
for buf2_dt in all_dts:
if _can_cast(arg1_dtype, buf1_dt, _fp16, _fp64) and _can_cast(
arg2_dtype, buf2_dt, _fp16, _fp64
):
res_dt = query_fn(buf1_dt, buf2_dt)
if res_dt:
ret_buf1_dt = None if buf1_dt == arg1_dtype else buf1_dt
ret_buf2_dt = None if buf2_dt == arg2_dtype else buf2_dt
if ret_buf1_dt is None or ret_buf2_dt is None:
return ret_buf1_dt, ret_buf2_dt, res_dt
else:
acceptable = acceptance_fn(
arg1_dtype,
arg2_dtype,
ret_buf1_dt,
ret_buf2_dt,
res_dt,
sycl_dev,
)
if acceptable:
return ret_buf1_dt, ret_buf2_dt, res_dt
else:
continue
return None, None, None
__all__ = [
"_find_buf_dtype",
"_find_buf_dtype2",
"_to_device_supported_dtype",
"_acceptance_fn_default",
"_acceptance_fn_divide",
]
|
2,497 |
test context manager replace
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import numpy as np
from .utils import SynHandlerMod, SynHandlerEcho
import uuid
import pytest
logger = logging.getLogger(__name__)
def test_context():
with SynHandlerMod('', (4, 2)) as hand:
for j in range(1, 5):
assert np.all(hand(j) < j)
def test_register_fail(fs):
fsa = fs
with fsa.handler_context({'syn-mod': SynHandlerMod}):
# shouldn't raise, it is a no-op as it is regiristering
# the same class with the same name
fsa.register_handler('syn-mod', SynHandlerMod)
# should raise as it is trying to change the registered class
with pytest.raises(RuntimeError):
fsa.register_handler('syn-mod', SynHandlerEcho)
def METHOD_NAME(fs):
fsa = fs
# nuke anything already registered, just to be safe.
while len(fs.handler_reg):
for k in list(fs.handler_reg):
fs.deregister_handler(k)
# check syn-mod not in the registry
assert 'syn-mod' not in fsa.handler_reg
# put syn-mod in with context manager
with fsa.handler_context({'syn-mod': SynHandlerMod}):
# check that it is the version we expect
assert fsa.handler_reg['syn-mod'] is SynHandlerMod
# over-ride syn-mod with a second context manager
with fsa.handler_context({'syn-mod': SynHandlerEcho}):
# check that we get the second one
assert fsa.handler_reg['syn-mod'] is SynHandlerEcho
# and that it correctly rollsback to first value
assert fsa.handler_reg['syn-mod'] is SynHandlerMod
# and is empty again when we are done.
assert 'syn-mod' not in fsa.handler_reg
def test_deregister(fs):
test_reg = fs.handler_reg
test_spec_name = str(uuid.uuid4())
fs.register_handler(test_spec_name, SynHandlerMod)
assert test_reg[test_spec_name] is SynHandlerMod
fs.deregister_handler(test_spec_name)
assert test_spec_name not in test_reg
|
2,498 |
test mine operation status reason code validate
|
import pytest
from app.api.constants import MINE_OPERATION_STATUS, MINE_OPERATION_STATUS_REASON, MINE_OPERATION_STATUS_SUB_REASON
from app.api.mines.status.models.mine_operation_status_code import MineOperationStatusCode
from app.api.mines.status.models.mine_operation_status_reason_code import MineOperationStatusReasonCode
from app.api.mines.status.models.mine_operation_status_sub_reason_code import MineOperationStatusSubReasonCode
# MineOperationStatusCode Model
def test_mine_operation_status_code_find_by_mine_operation_status_code(db_session):
mine_operation_status_code = MineOperationStatusCode.find_by_mine_operation_status_code(MINE_OPERATION_STATUS['closed']['value'])
assert mine_operation_status_code.mine_operation_status_code == MINE_OPERATION_STATUS['closed']['value']
def test_mine_operation_status_code_validate_mine_operation_status_code_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusCode(
mine_operation_status_code='',
description='test_description',
display_order=1
)
assert 'Mine operation status code is not provided.' in str(e.value)
def test_mine_operation_status_code_validate_mine_operation_status_code_max_char():
with pytest.raises(AssertionError) as e:
MineOperationStatusCode(
mine_operation_status_code='1234',
description='test_description',
display_order=1
)
assert 'Mine operation status code must not exceed 3 characters.' in str(e.value)
def test_mine_operation_status_code_validate_description_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusCode(
mine_operation_status_code='123',
description='',
display_order=1
)
assert 'Mine operation status code description is not provided.' in str(e.value)
def test_mine_operation_status_code_validate_description_max_char():
with pytest.raises(AssertionError) as e:
MineOperationStatusCode(
mine_operation_status_code='123',
description='a' * 101,
display_order=1
)
assert 'Mine operation status code description must not exceed 100 characters.' in str(e.value)
# MineOperationStatusReasonCode
def test_mine_operation_status_reason_code_find_by_mine_operation_status_reason_code(db_session):
mine_operation_status_reason_code = MineOperationStatusReasonCode.find_by_mine_operation_status_reason_code(MINE_OPERATION_STATUS_REASON['reclamation']['value'])
assert mine_operation_status_reason_code.mine_operation_status_reason_code == MINE_OPERATION_STATUS_REASON['reclamation']['value']
def test_mine_operation_status_code_validate_mine_operation_status_reason_code_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusReasonCode(
mine_operation_status_reason_code='',
description='test_description',
display_order=1
)
assert 'Mine operation status reason code is not provided.' in str(e.value)
def test_mine_operation_status_code_validate_mine_operation_status_reason_code_max_char():
with pytest.raises(AssertionError) as e:
MineOperationStatusReasonCode(
mine_operation_status_reason_code='1234',
description='test_description',
display_order=1
)
assert 'Mine operation status reason code must not exceed 3 characters.' in str(e.value)
def test_mine_operation_status_reason_code_validate_description_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusReasonCode(
mine_operation_status_reason_code='123',
description='',
display_order=1
)
assert 'Mine operation status reason code description is not provided.' in str(e.value)
def METHOD_NAME():
with pytest.raises(AssertionError) as e:
MineOperationStatusReasonCode(
mine_operation_status_reason_code='123',
description='a'*101,
display_order=1
)
assert 'Mine operation status reason code description must not exceed 100 characters.' in str(e.value)
# MineOperationStatusSubReasonCode
def test_mine_operation_status_reason_code_find_by_mine_operation_status_sub_reason_code(db_session):
mine_operation_status_sub_reason_code = MineOperationStatusSubReasonCode.find_by_mine_operation_status_sub_reason_code(MINE_OPERATION_STATUS_SUB_REASON['long_term_maintenance']['value'])
assert mine_operation_status_sub_reason_code.mine_operation_status_sub_reason_code == MINE_OPERATION_STATUS_SUB_REASON['long_term_maintenance']['value']
def test_mine_operation_status_code_validate_mine_operation_status_sub_reason_code_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusSubReasonCode(
mine_operation_status_sub_reason_code='',
description='test_description',
display_order=1
)
assert 'Mine operation status sub reason code is not provided.' in str(e.value)
def test_mine_operation_status_code_validate_mine_operation_status_sub_reason_code_max_char():
with pytest.raises(AssertionError) as e:
MineOperationStatusSubReasonCode(
mine_operation_status_sub_reason_code='1234',
description='test_description',
display_order=1
)
assert 'Mine operation status sub reason code must not exceed 3 characters.' in str(e.value)
def test_mine_operation_status_sub_reason_code_validate_description_not_provided():
with pytest.raises(AssertionError) as e:
MineOperationStatusSubReasonCode(
mine_operation_status_sub_reason_code='123',
description='',
display_order=1
)
assert 'Mine operation status sub reason code description is not provided.' in str(e.value)
def test_mine_operation_status_sub_reason_code_validate_description_max_char():
with pytest.raises(AssertionError) as e:
MineOperationStatusSubReasonCode(
mine_operation_status_sub_reason_code='123',
description='a' * 101,
display_order=1
)
assert 'Mine operation status sub reason code description must not exceed 100 characters.' in str(e.value)
|
2,499 |
ensure no l3 drops
|
import logging
import re
import json
import pytest
from tests.common.utilities import wait_until
logger = logging.getLogger(__name__)
# CLI commands to obtain drop counters.
NAMESPACE_PREFIX = "sudo ip netns exec {} "
NAMESPACE_SUFFIX = "-n {} "
GET_L2_COUNTERS = "portstat -j "
GET_L3_COUNTERS = "intfstat -j "
ACL_COUNTERS_UPDATE_INTERVAL = 10
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
LOG_EXPECT_PORT_ADMIN_DOWN_RE = ".*Configure {} admin status to down.*"
LOG_EXPECT_PORT_ADMIN_UP_RE = ".*Port {} oper state set from down to up.*"
RX_DRP = "RX_DRP"
RX_ERR = "RX_ERR"
COMBINED_L2L3_DROP_COUNTER = False
COMBINED_ACL_DROP_COUNTER = False
def get_pkt_drops(duthost, cli_cmd, asic_index=None):
"""
@summary: Parse output of "portstat" or "intfstat" commands and convert it to the dictionary.
@param module: The AnsibleModule object
@param cli_cmd: one of supported CLI commands - "portstat -j" or "intfstat -j"
@return: Return dictionary of parsed counters
"""
# Get namespace from asic_index.
result = {}
for asic_id in duthost.get_asic_ids():
if asic_index is not None and asic_index != asic_id:
continue
namespace = duthost.get_namespace_from_asic_id(asic_id)
# Frame the correct cli command
# the L2 commands need _SUFFIX and L3 commands need _PREFIX
if cli_cmd == GET_L3_COUNTERS:
CMD_PREFIX = NAMESPACE_PREFIX if (namespace is not None and duthost.is_multi_asic) else ''
cli_cmd = CMD_PREFIX + cli_cmd
elif cli_cmd == GET_L2_COUNTERS:
CMD_SUFFIX = NAMESPACE_SUFFIX if (namespace is not None and duthost.is_multi_asic) else ''
cli_cmd = cli_cmd + CMD_SUFFIX
stdout = duthost.command(cli_cmd.format(namespace))
stdout = stdout["stdout"]
match = re.search("Last cached time was.*\n", stdout)
if match:
stdout = re.sub("Last cached time was.*\n", "", stdout)
try:
namespace_result = json.loads(stdout)
result.update(namespace_result)
except Exception as err:
raise Exception("Failed to parse output of '{}', err={}".format(cli_cmd, str(err)))
return result
def METHOD_NAME(duthost, packets_count):
""" Verify L3 drop counters were not incremented """
intf_l3_counters = get_pkt_drops(duthost, GET_L3_COUNTERS)
unexpected_drops = {}
for iface, value in list(intf_l3_counters.items()):
try:
rx_err_value = int(value[RX_ERR])
except ValueError as err:
logger.info("Unable to verify L3 drops on iface {}, L3 counters may not be supported on this platform\n{}"
.format(iface, err))
continue
if rx_err_value >= packets_count:
unexpected_drops[iface] = rx_err_value
if unexpected_drops:
pytest.fail("L3 'RX_ERR' was incremented for the following interfaces:\n{}".format(unexpected_drops))
def ensure_no_l2_drops(duthost, packets_count):
""" Verify L2 drop counters were not incremented """
intf_l2_counters = get_pkt_drops(duthost, GET_L2_COUNTERS)
unexpected_drops = {}
for iface, value in list(intf_l2_counters.items()):
try:
rx_drp_value = int(value[RX_DRP])
except ValueError as err:
logger.warning("Unable to verify L2 drops on iface {}\n{}".format(iface, err))
continue
if rx_drp_value >= packets_count:
unexpected_drops[iface] = rx_drp_value
if unexpected_drops:
pytest.fail("L2 'RX_DRP' was incremented for the following interfaces:\n{}".format(unexpected_drops))
def verify_drop_counters(duthosts, asic_index, dut_iface, get_cnt_cli_cmd, column_key, packets_count):
""" Verify drop counter incremented on specific interface """
def _get_drops_across_all_duthosts():
drop_list = []
for duthost in duthosts.frontend_nodes:
pkt_drops = get_pkt_drops(duthost, get_cnt_cli_cmd)
# we cannot assume the iface name will be same on all the devices for SONiC chassis
# if the dut_iface is not found ignore this device
if dut_iface not in pkt_drops:
continue
drop_list.append(int(pkt_drops[dut_iface][column_key].replace(",", "")))
return drop_list
def _check_drops_on_dut():
return packets_count in _get_drops_across_all_duthosts()
if not wait_until(25, 1, 0, _check_drops_on_dut):
# The actual Drop count should always be equal or 1 or 2 packets more than what is expected
# due to some other drop may occur over the interface being examined.
# When that happens if looking onlyu for exact count it will be a false positive failure.
# So do one more check to allow up to 2 packets more dropped than what was expected as an allowed case.
actual_drop = _get_drops_across_all_duthosts()
if ((packets_count+2) in actual_drop) or ((packets_count+1) in actual_drop):
logger.warning("Actual drops {} exceeded expected drops {} on iface {}\n"
.format(actual_drop, packets_count, dut_iface))
else:
fail_msg = "'{}' drop counter was not incremented on iface {}. DUT {} == {}; Sent == {}".format(
column_key, dut_iface, column_key, actual_drop, packets_count)
pytest.fail(fail_msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.