id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,400 |
add argument before
|
# SPDX-License-Identifier: LGPL-3.0-or-other
# Copyright (C) 2021 Contributors to the SLS Detector Package
"""
Utility functions that are useful for testing and troubleshooting
but not directly used in controlling the detector
"""
from collections import namedtuple
import _slsdet #C++ lib
import functools
import datetime as dt
import pathlib
import os
from pathlib import Path
Geometry = namedtuple('Geometry', ['x', 'y'])
def is_iterable(item):
try:
iter(item)
except TypeError:
return False
return True
def get_set_bits(mask):
"""
Return a list of the set bits in a python integer
"""
return [i for i in range(mask.bit_length()) if (mask >> i) & 1]
def list_to_bitmask(values):
"""
Convert a list of integers to a bitmask with set bits
where the list indicates
"""
mask = int(0)
values = list(set(values)) #Remove duplicates
for v in values:
mask += 1 << v
return mask
def make_bitmask(args):
if isinstance(args, (list,tuple)):
return list_to_bitmask(args)
elif isinstance(args, dict):
return {key: list_to_bitmask(value) for key, value in args.items()}
else:
raise ValueError("Cannot convert arg to bitmask")
def to_geo(value):
if isinstance(value, _slsdet.xy):
return Geometry(x=value.x, y=value.y)
else:
raise ValueError("Can only convert slsdet.xy")
def all_equal(mylist):
"""If all elements are equal return true otherwise false"""
return all(x == mylist[0] for x in mylist)
def element_if_equal(mylist):
"""If all elements are equal return only one element"""
if not is_iterable(mylist):
return mylist
if all_equal(mylist):
if len(mylist) == 0:
return None
else:
return mylist[0]
else:
return mylist
def reduce_time(mylist):
res = element_if_equal(element_if_equal(mylist))
if isinstance(res, (dt.timedelta, _slsdet.DurationWrapper)):
return res.total_seconds()
elif isinstance(res[0], list):
return [[item.total_seconds() for item in subl] for subl in res]
else:
return [r.total_seconds() for r in res]
def element(func):
"""
Wrapper to return either list or element
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
return element_if_equal(func(self, *args, **kwargs))
return wrapper
def eiger_register_to_time(register):
"""
Decode register value and return time in s. Values are stored in
a 32bit register with bits 2->0 containing the exponent and bits
31->3 containing the significand (int value)
"""
clocks = register >> 3
exponent = register & 0b111
return clocks * 10**exponent / 100e6
def make_timedelta(t):
if isinstance(t, dt.timedelta):
return t
else:
return dt.timedelta(seconds=t)
def _make_string_path(path):
"""
Accepts either a pathlib.Path or a string, expands ~ to user and convert
Path to str
"""
if isinstance(path, pathlib.Path):
return path.expanduser().as_posix()
elif isinstance(path, str):
return os.path.expanduser(path)
else:
raise ValueError("Cannot convert argument to posix path")
def make_string_path(path):
return _make(path, _make_string_path)
def make_ip(arg):
return _make(arg, _slsdet.IpAddr)
def make_mac(arg):
return _make(arg, _slsdet.MacAddr)
def make_path(arg):
return _make(arg, Path)
def _make(arg, transform):
"""Helper function for make_mac and make_ip special cases for
dict, list and tuple. Otherwise just calls transform"""
if isinstance(arg, dict):
return {key: transform(value) for key, value in arg.items()}
elif isinstance(arg, list):
return [transform(a) for a in arg]
elif isinstance(arg, tuple):
return tuple(transform(a) for a in arg)
else:
return transform(arg)
def set_using_dict(func, *args):
if len(args) == 1 and isinstance(args[0], dict) and all(
isinstance(k, int) for k in args[0].keys()):
for key, value in args[0].items():
if not isinstance(value, tuple):
value = (value,)
try:
func(*value, [key])
except TypeError:
func(*value, key)
else:
func(*args)
def set_time_using_dict(func, args):
if isinstance(args, dict) and all(isinstance(k, int) for k in args.keys()):
for key, value in args.items():
if isinstance(value, int):
value = float(value)
func(value, [key])
else:
if isinstance(args, int):
args = float(args)
func(args)
def lhex(iterable):
return [hex(item) for item in iterable]
def lpath(iterable):
return [Path(item) for item in iterable]
def METHOD_NAME(a, args):
"""Add a before the other arguments. Also works with
dict that holds args to several modules. Always puts the
args in a dict to be compatible with set_using_dict"""
if isinstance(args, tuple):
return (a, *args)
elif isinstance(args, dict):
ret = {}
for key, value in args.items():
if isinstance(value, tuple):
ret[key] = (a, *value)
else:
ret[key] = (a, value)
return (ret,)
return a, args
def add_argument_after(args, a):
"""Add a before the other arguments. Also works with
dict that holds args to several modules. Always puts the
args in a dict to be compatible with set_using_dict"""
if isinstance(args, tuple):
return (*args, a)
elif isinstance(args, dict):
ret = {}
for key, value in args.items():
if isinstance(value, tuple):
ret[key] = (*value, a)
else:
ret[key] = (value, a)
return (ret,)
return args, a
def pop_dict(args):
for i,a in enumerate(args):
if isinstance(a, dict):
return args.pop(i), i
def tuplify(args):
if not isinstance(args, tuple):
return (args, )
else:
return args
def merge_args(*args):
n_dict = sum(isinstance(a, dict) for a in args)
if n_dict == 0: #no dict just make a tuple of arguments
ret = []
for a in args:
if isinstance(a, tuple):
ret.extend(a)
else:
ret.append(a)
return tuple(ret)
elif n_dict == 1:
args = [a for a in args] #these are the args to be added
values,pos = pop_dict(args)
ret = {}
for k, v in values.items():
v = tuplify(v)
items = [a for a in args]
items[pos:pos] = v
ret[k] = tuple(items)
return (ret,)
else:
raise ValueError("Multiple dictionaries passes cannot merge args")
def hostname_list(args):
"""
Generates a list from a hostname string
* Lists are passed through
* as are tuples (conversion in pybind11 to vector)
* if + is found it splits the string
"""
if isinstance(args, (list, tuple)):
return args
elif(isinstance(args, str)):
hosts = args.split('+')
hosts = [it for it in hosts if len(it)]
return hosts
else:
raise ValueError("hostname needs to be string or list of strings")
|
3,401 |
method1
|
# pylint: disable=invalid-name,unnecessary-pass,no-else-return,useless-else-on-loop
# pylint: disable=undefined-variable,consider-using-sys-exit,unused-variable,too-many-return-statements
# pylint: disable=redefined-outer-name,using-constant-test,unused-argument
# pylint: disable=broad-except, not-context-manager, no-method-argument, unspecified-encoding, broad-exception-raised
"""Checks use of "too-complex" check"""
def f1(): # [too-complex]
"""McCabe rating: 1"""
pass
def f2(n): # [too-complex]
"""McCabe rating: 1"""
k = n + 4
s = k + n
return s
def f3(n): # [too-complex]
"""McCabe rating: 3"""
if n > 3:
return "bigger than three"
elif n > 4:
return "is never executed"
else:
return "smaller than or equal to three"
def f4(): # [too-complex]
"""McCabe rating: 2"""
for i in range(10):
print(i)
def f5(mylist): # [too-complex]
"""McCabe rating: 2"""
for i in mylist:
print(i)
else:
print(None)
def f6(n): # [too-complex]
"""McCabe rating: 2"""
if n > 4:
return f(n - 1)
else:
return n
def f7(): # [too-complex]
"""McCabe rating: 3"""
def b():
"""McCabe rating: 2"""
def c():
"""McCabe rating: 1"""
pass
c()
b()
def f8(): # [too-complex]
"""McCabe rating: 4"""
try:
print(1)
except TypeA:
print(2)
except TypeB:
print(3)
else:
print(4)
def f9(): # [too-complex]
"""McCabe rating: 9"""
myint = 2
if myint > 5:
pass
else:
if myint <= 5:
pass
else:
myint = 3
if myint > 2:
if myint > 3:
pass
elif myint == 3:
pass
elif myint < 3:
pass
else:
if myint:
pass
else:
if myint:
pass
myint = 4
def f10(): # [too-complex]
"""McCabe rating: 11"""
myint = 2
if myint == 5:
return myint
elif myint == 6:
return myint
elif myint == 7:
return myint
elif myint == 8:
return myint
elif myint == 9:
return myint
elif myint == 10:
if myint == 8:
while True:
return True
elif myint == 8:
with myint:
return 8
else:
if myint == 2:
return myint
return myint
return myint
class MyClass1:
"""Class of example to test mccabe"""
_name = "MyClass" # To force a tail.node=None
def METHOD_NAME(): # [too-complex]
"""McCabe rating: 1"""
pass
def method2(self, param1): # [too-complex, too-many-branches]
"""McCabe rating: 15"""
if not param1:
pass
pass
if param1:
pass
else:
pass
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
for value in range(5):
pass
pass
for count in range(6):
with open("myfile") as fp:
count += 1
pass
pass
try:
pass
if not param1:
pass
else:
pass
if param1:
raise BaseException("Error")
with open("myfile2") as fp2:
pass
pass
finally:
if param1 is not None:
pass
for count2 in range(8):
try:
pass
except BaseException("Error2"):
pass
return param1
for count in range(10): # [too-complex]
if count == 1:
exit(0)
elif count == 2:
exit(1)
else:
exit(2)
def method3(self): # [too-complex]
"""McCabe rating: 3"""
try:
if True:
pass
else:
pass
finally:
pass
return True
|
3,402 |
created by
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'LogAnalyticsQueryPackQueryPropertiesResponseRelated',
'SystemDataResponse',
]
@pulumi.output_type
class LogAnalyticsQueryPackQueryPropertiesResponseRelated(dict):
"""
The related metadata items for the function.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceTypes":
suggest = "resource_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LogAnalyticsQueryPackQueryPropertiesResponseRelated. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LogAnalyticsQueryPackQueryPropertiesResponseRelated.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LogAnalyticsQueryPackQueryPropertiesResponseRelated.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
categories: Optional[Sequence[str]] = None,
resource_types: Optional[Sequence[str]] = None,
solutions: Optional[Sequence[str]] = None):
"""
The related metadata items for the function.
:param Sequence[str] categories: The related categories for the function.
:param Sequence[str] resource_types: The related resource types for the function.
:param Sequence[str] solutions: The related Log Analytics solutions for the function.
"""
if categories is not None:
pulumi.set(__self__, "categories", categories)
if resource_types is not None:
pulumi.set(__self__, "resource_types", resource_types)
if solutions is not None:
pulumi.set(__self__, "solutions", solutions)
@property
@pulumi.getter
def categories(self) -> Optional[Sequence[str]]:
"""
The related categories for the function.
"""
return pulumi.get(self, "categories")
@property
@pulumi.getter(name="resourceTypes")
def resource_types(self) -> Optional[Sequence[str]]:
"""
The related resource types for the function.
"""
return pulumi.get(self, "resource_types")
@property
@pulumi.getter
def solutions(self) -> Optional[Sequence[str]]:
"""
The related Log Analytics solutions for the function.
"""
return pulumi.get(self, "solutions")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Read only system data
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Read only system data
:param str created_at: The timestamp of resource creation (UTC)
:param str created_by: An identifier for the identity that created the resource
:param str created_by_type: The type of identity that created the resource
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: An identifier for the identity that last modified the resource
:param str last_modified_by_type: The type of identity that last modified the resource
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if METHOD_NAME is not None:
pulumi.set(__self__, "created_by", METHOD_NAME)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC)
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def METHOD_NAME(self) -> Optional[str]:
"""
An identifier for the identity that created the resource
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
An identifier for the identity that last modified the resource
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource
"""
return pulumi.get(self, "last_modified_by_type")
|
3,403 |
test no interpreter set
|
# (c) 2017, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import pytest
import ansible.errors
from ansible.executor import module_common as amc
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
class TestStripComments:
def test_no_changes(self):
no_comments = u"""def some_code():
return False"""
assert amc._strip_comments(no_comments) == no_comments
def test_all_comments(self):
all_comments = u"""# This is a test
# Being as it is
# To be
"""
assert amc._strip_comments(all_comments) == u""
def test_all_whitespace(self):
all_whitespace = (
'\n'
' \n'
'\n'
' \n'
'\t\t\r\n'
'\n'
' '
)
assert amc._strip_comments(all_whitespace) == u""
def test_somewhat_normal(self):
mixed = u"""#!/usr/bin/python
# here we go
def test(arg):
# this is a thing
thing = '# test'
return thing
# End
"""
mixed_results = u"""def test(arg):
thing = '# test'
return thing"""
assert amc._strip_comments(mixed) == mixed_results
class TestSlurp:
def test_slurp_nonexistent(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: False)
with pytest.raises(ansible.errors.AnsibleError):
amc._slurp('no_file')
def test_slurp_file(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: True)
m = mocker.mock_open(read_data='This is a test')
mocker.patch('builtins.open', m)
assert amc._slurp('some_file') == 'This is a test'
def test_slurp_file_with_newlines(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: True)
m = mocker.mock_open(read_data='#!/usr/bin/python\ndef test(args):\nprint("hi")\n')
mocker.patch('builtins.open', m)
assert amc._slurp('some_file') == '#!/usr/bin/python\ndef test(args):\nprint("hi")\n'
class TestGetShebang:
"""Note: We may want to change the API of this function in the future. It isn't a great API"""
def METHOD_NAME(self, templar):
# normally this would return /usr/bin/python, but so long as we're defaulting to auto python discovery, we'll get
# an InterpreterDiscoveryRequiredError here instead
with pytest.raises(InterpreterDiscoveryRequiredError):
amc._get_shebang(u'/usr/bin/python', {}, templar)
def test_python_interpreter(self, templar):
assert amc._get_shebang(u'/usr/bin/python3.8', {}, templar) == ('#!/usr/bin/python3.8', u'/usr/bin/python3.8')
def test_non_python_interpreter(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {}, templar) == ('#!/usr/bin/ruby', u'/usr/bin/ruby')
def test_interpreter_set_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/pypy'}, templar) == \
(u'#!/usr/bin/pypy', u'/usr/bin/pypy')
def test_non_python_interpreter_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {u'ansible_ruby_interpreter': u'/usr/local/bin/ruby'}, templar) == \
(u'#!/usr/local/bin/ruby', u'/usr/local/bin/ruby')
def test_with_args(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/python3'}, templar, args=('-tt', '-OO')) == \
(u'#!/usr/bin/python3 -tt -OO', u'/usr/bin/python3')
def test_python_via_env(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/env python'}, templar) == \
(u'#!/usr/bin/env python', u'/usr/bin/env python')
class TestDetectionRegexes:
ANSIBLE_MODULE_UTIL_STRINGS = (
# Absolute collection imports
b'import ansible_collections.my_ns.my_col.plugins.module_utils.my_util',
b'from ansible_collections.my_ns.my_col.plugins.module_utils import my_util',
b'from ansible_collections.my_ns.my_col.plugins.module_utils.my_util import my_func',
# Absolute core imports
b'import ansible.module_utils.basic',
b'from ansible.module_utils import basic',
b'from ansible.module_utils.basic import AnsibleModule',
# Relative imports
b'from ..module_utils import basic',
b'from .. module_utils import basic',
b'from ....module_utils import basic',
b'from ..module_utils.basic import AnsibleModule',
)
NOT_ANSIBLE_MODULE_UTIL_STRINGS = (
b'from ansible import release',
b'from ..release import __version__',
b'from .. import release',
b'from ansible.modules.system import ping',
b'from ansible_collecitons.my_ns.my_col.plugins.modules import function',
)
OFFSET = os.path.dirname(os.path.dirname(amc.__file__))
CORE_PATHS = (
('%s/modules/from_role.py' % OFFSET, 'ansible/modules/from_role'),
('%s/modules/system/ping.py' % OFFSET, 'ansible/modules/system/ping'),
('%s/modules/cloud/amazon/s3.py' % OFFSET, 'ansible/modules/cloud/amazon/s3'),
)
COLLECTION_PATHS = (
('/root/ansible_collections/ns/col/plugins/modules/ping.py',
'ansible_collections/ns/col/plugins/modules/ping'),
('/root/ansible_collections/ns/col/plugins/modules/subdir/ping.py',
'ansible_collections/ns/col/plugins/modules/subdir/ping'),
)
@pytest.mark.parametrize('testcase', ANSIBLE_MODULE_UTIL_STRINGS)
def test_detect_new_style_python_module_re(self, testcase):
assert amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
@pytest.mark.parametrize('testcase', NOT_ANSIBLE_MODULE_UTIL_STRINGS)
def test_no_detect_new_style_python_module_re(self, testcase):
assert not amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('testcase, result', CORE_PATHS) # pylint: disable=undefined-variable
def test_detect_core_library_path_re(self, testcase, result):
assert amc.CORE_LIBRARY_PATH_RE.search(testcase).group('path') == result
@pytest.mark.parametrize('testcase', (p[0] for p in COLLECTION_PATHS)) # pylint: disable=undefined-variable
def test_no_detect_core_library_path_re(self, testcase):
assert not amc.CORE_LIBRARY_PATH_RE.search(testcase)
@pytest.mark.parametrize('testcase, result', COLLECTION_PATHS) # pylint: disable=undefined-variable
def test_detect_collection_path_re(self, testcase, result):
assert amc.COLLECTION_PATH_RE.search(testcase).group('path') == result
@pytest.mark.parametrize('testcase', (p[0] for p in CORE_PATHS)) # pylint: disable=undefined-variable
def test_no_detect_collection_path_re(self, testcase):
assert not amc.COLLECTION_PATH_RE.search(testcase)
|
3,404 |
nunit
|
from align.cell_fabric import SingleGrid, EnclosureGrid
from align.cell_fabric import CenterLineGrid, UncoloredCenterLineGrid
from align.cell_fabric import Canvas as AbstractCanvas
from align.cell_fabric import Wire, Via, Region
class Canvas(AbstractCanvas):
def __init__( self):
super().__init__()
self.finsPerUnitCell = 14
self.m2PerUnitCell = 7
ndPitch = 360
pdPitch = 360
m2Pitch = 720
self.unitCellHeight = self.m2PerUnitCell*m2Pitch
pcPitch = self.unitCellHeight//2
m1Pitch = 864
m3Pitch = 720
self.unitCellWidth = 2*m1Pitch
plPitch = m1Pitch
plOffset = plPitch//2
dcPitch = m1Pitch
pcWidth = 200
m1Width = 400
m2Width = 400
m3Width = 400
dcWidth = 200
plWidth = 200
ndWidth = 120
ndPitch = 360
self.pl = self.addGen( Wire( 'pl', 'poly', 'v',
clg=CenterLineGrid(),
spg=EnclosureGrid( pitch=m2Pitch//2, stoppoint=16)))
for i in range(5):
self.pl.clg.addCenterLine( i*plPitch//2, plWidth, i % 2 == 1)
self.pl.clg.semantic()
self.nd = self.addGen( Region( 'nd', 'ndiff',
h_grid=SingleGrid( pitch=ndPitch),
v_grid=self.pl.clg))
self.pd = self.addGen( Region( 'pd', 'pdiff',
h_grid=SingleGrid( pitch=pdPitch),
v_grid=self.pl.clg))
self.pc = self.addGen( Wire( 'pc', 'polycon', 'h',
clg=UncoloredCenterLineGrid( width=pcWidth, pitch=pcPitch),
spg=EnclosureGrid( pitch=dcPitch, stoppoint=plOffset-plWidth//2)))
self.m1 = self.addGen( Wire( 'm1', 'M1', 'v',
clg=UncoloredCenterLineGrid( width=m1Width, pitch=m1Pitch, repeat=2),
spg=EnclosureGrid( pitch=m2Pitch, stoppoint=m2Width//2)))
self.m2 = self.addGen( Wire( 'm2', 'M2', 'h',
clg=UncoloredCenterLineGrid( width=m2Width, pitch=m2Pitch, repeat=self.m2PerUnitCell),
spg=EnclosureGrid( pitch=2*m1Pitch, stoppoint=m1Pitch//2)))
self.m3 = self.addGen( Wire( 'm3', 'M3', 'v',
clg=UncoloredCenterLineGrid( width=m3Width, pitch=m3Pitch),
spg=EnclosureGrid( pitch=self.unitCellHeight, stoppoint=self.unitCellHeight//2-m2Pitch)))
self.dc = self.addGen( Wire( 'dc', 'diffcon', 'v',
clg=CenterLineGrid(),
spg=EnclosureGrid( pitch=m2Pitch//2, stoppoint=0)))
for i in range(5):
self.dc.clg.addCenterLine( i*dcPitch//2, dcWidth, i % 2 == 0)
self.dc.clg.semantic()
self.v0 = self.addGen( Via( 'v0', 'via0', v_clg=self.m1.clg, h_clg=self.pc.clg))
self.v1 = self.addGen( Via( 'v1', 'via1', v_clg=self.m1.clg, h_clg=self.m2.clg))
self.v2 = self.addGen( Via( 'v2', 'via2', v_clg=self.m3.clg, h_clg=self.m2.clg))
def METHOD_NAME( self):
h = 2*self.m2PerUnitCell
(ds0, ds1) = ('s', 'd')
self.addRegion( self.nd, None, (0, -1), (0*h, 2), (1, 1), (0*h, 6))
self.addRegion( self.pd, None, (0, -1), (1*h, -6), (1, 1), (1*h, -2))
self.addWire( self.dc, ds0, (0, 0), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.dc, ds0, (0, 0), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.pl, None, (0,-1), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.pl, 'g', (0, 1), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.pl, 'g', (1,-1), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.pl, None, (1, 1), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.pl, None, (0,-1), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.pl, 'g', (0, 1), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.pl, 'g', (1,-1), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.pl, None, (1, 1), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.dc, ds1, (1, 0), (0*h + 2, -1), (0*h + 6, 1))
self.addWire( self.dc, ds1, (1, 0), (0*h + 8, -1), (0*h + 12, 1))
self.addWire( self.m1, ds0, (0, 0), (0*self.m2PerUnitCell + 1,-1), (1*self.m2PerUnitCell - 1, 1))
self.addWire( self.m1, 'g', (0, 1), (0*self.m2PerUnitCell + 1,-1), (1*self.m2PerUnitCell - 1, 1))
self.addWire( self.m1, ds1, (1, 0), (0*self.m2PerUnitCell + 1,-1), (1*self.m2PerUnitCell - 1, 1))
assert self.m2PerUnitCell % 2 == 1
for o in range(0,self.m2PerUnitCell+1):
self.addWire( self.m2, None, (0, o), (0, -1), (1, 1))
|
3,405 |
deploy prereq
|
"""
This module provides base class for OCP deployment.
"""
import logging
import os
import json
import pytest
import yaml
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.openshift_ops import OCP
from ocs_ci.utility import utils, templating, system
from ocs_ci.deployment.disconnected import (
get_ocp_release_image,
mirror_ocp_release_images,
)
logger = logging.getLogger(__name__)
class OCPDeployment:
def __init__(self):
"""
Constructor for OCPDeployment class
"""
self.pull_secret = {}
self.metadata = {}
self.deployment_platform = config.ENV_DATA["platform"].lower()
self.sno = config.ENV_DATA["sno"]
self.deployment_type = config.ENV_DATA["deployment_type"].lower()
if not hasattr(self, "flexy_deployment"):
self.flexy_deployment = False
ibmcloud_managed_deployment = (
self.deployment_platform == constants.IBMCLOUD_PLATFORM
and self.deployment_type == "managed"
)
if not self.flexy_deployment and not ibmcloud_managed_deployment:
self.installer = self.download_installer()
self.cluster_path = config.ENV_DATA["cluster_path"]
self.cluster_name = config.ENV_DATA["cluster_name"]
def download_installer(self):
"""
Method to download installer
Returns:
str: path to the installer
"""
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_installer"]
)
return utils.get_openshift_installer(
config.DEPLOYMENT["installer_version"], force_download=force_download
)
def get_pull_secret(self):
"""
Load pull secret file
Returns:
dict: content of pull secret
"""
pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
with open(pull_secret_path, "r") as f:
# Parse, then unparse, the JSON file.
# We do this for two reasons: to ensure it is well-formatted, and
# also to ensure it ends up as a single line.
return json.dumps(json.loads(f.read()))
def get_ssh_key(self):
"""
Loads public ssh to be used for deployment
Returns:
str: public ssh key or empty string if not found
"""
ssh_key = os.path.expanduser(config.DEPLOYMENT.get("ssh_key"))
if not os.path.isfile(ssh_key):
return ""
with open(ssh_key, "r") as fs:
lines = fs.readlines()
return lines[0].rstrip("\n") if lines else ""
def METHOD_NAME(self):
"""
Perform generic prereq before calling openshift-installer
This method performs all the basic steps necessary before invoking the
installer
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"]["teardown"]
if teardown and not deploy:
msg = "Attempting teardown of non-accessible cluster: "
msg += f"{self.cluster_path}"
pytest.fail(msg)
elif not deploy and not teardown:
msg = "The given cluster can not be connected to: {}. ".format(
self.cluster_path
)
msg += (
"Provide a valid --cluster-path or use --deploy to "
"deploy a new cluster"
)
pytest.fail(msg)
elif not system.is_path_empty(self.cluster_path) and deploy:
msg = "The given cluster path is not empty: {}. ".format(self.cluster_path)
msg += (
"Provide an empty --cluster-path and --deploy to deploy "
"a new cluster"
)
pytest.fail(msg)
else:
logger.info(
"A testing cluster will be deployed and cluster information "
"stored at: %s",
self.cluster_path,
)
if not self.flexy_deployment and config.DEPLOYMENT.get("disconnected"):
ocp_relase_image = get_ocp_release_image()
if constants.SHA_SEPARATOR in ocp_relase_image:
ocp_image_path, ocp_version = ocp_relase_image.split("@")
else:
ocp_image_path, ocp_version = ocp_relase_image.split(":")
_, _, ics, _ = mirror_ocp_release_images(ocp_image_path, ocp_version)
config.RUN["imageContentSources"] = ics
if (
not self.flexy_deployment
and config.ENV_DATA["deployment_type"] != "managed"
):
self.create_config()
def create_config(self):
"""
Create the OCP deploy config, if something needs to be changed for
specific platform you can overload this method in child class.
"""
# Generate install-config from template
logger.info("Generating install-config")
_templating = templating.Templating()
ocp_install_template = (
f"install-config-{self.deployment_platform}-"
f"{self.deployment_type}.yaml.j2"
)
ocp_install_template_path = os.path.join("ocp-deployment", ocp_install_template)
install_config_str = _templating.render_template(
ocp_install_template_path, config.ENV_DATA
)
# Log the install config *before* adding the pull secret,
# so we don't leak sensitive data.
logger.info(f"Install config: \n{install_config_str}")
# Parse the rendered YAML so that we can manipulate the object directly
install_config_obj = yaml.safe_load(install_config_str)
install_config_obj["pullSecret"] = self.get_pull_secret()
ssh_key = self.get_ssh_key()
if ssh_key:
install_config_obj["sshKey"] = ssh_key
install_config_str = yaml.safe_dump(install_config_obj)
install_config = os.path.join(self.cluster_path, "install-config.yaml")
with open(install_config, "w") as f:
f.write(install_config_str)
def deploy(self, log_cli_level="DEBUG"):
"""
Implement ocp deploy in specific child class
"""
raise NotImplementedError("deploy_ocp functionality not implemented")
def test_cluster(self):
"""
Test if OCP cluster installed successfuly
"""
# Test cluster access
if not OCP.set_kubeconfig(
os.path.join(
self.cluster_path,
config.RUN.get("kubeconfig_location"),
)
):
pytest.fail("Cluster is not available!")
def destroy(self, log_level="DEBUG"):
"""
Destroy OCP cluster specific
Args:
log_level (str): log level openshift-installer (default: DEBUG)
"""
# Retrieve cluster metadata
metadata_file = os.path.join(self.cluster_path, "metadata.json")
with open(metadata_file) as f:
self.metadata = json.loads(f.read())
utils.destroy_cluster(
installer=self.installer,
cluster_path=self.cluster_path,
log_level=log_level,
)
|
3,406 |
extract
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from collections import namedtuple
from itertools import groupby
from typing import (
Any, Dict, Iterator, Union,
)
from pyhocon import ConfigFactory, ConfigTree
from databuilder import Scoped
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
TableKey = namedtuple('TableKey', ['schema', 'table_name'])
LOGGER = logging.getLogger(__name__)
class Db2MetadataExtractor(Extractor):
"""
Extracts Db2 table and column metadata from underlying meta store database using SQLAlchemyExtractor
"""
# SELECT statement from Db2 SYSIBM to extract table and column metadata
SQL_STATEMENT = """
SELECT
{cluster_source} as cluster, c.TABSCHEMA as schema, c.TABNAME as name, t.REMARKS as description,
c.COLNAME as col_name,
CASE WHEN c.TYPENAME='VARCHAR' OR c.TYPENAME='CHARACTER' THEN
TRIM (TRAILING FROM c.TYPENAME) concat '(' concat c.LENGTH concat ')'
WHEN c.TYPENAME='DECIMAL' THEN
TRIM (TRAILING FROM c.TYPENAME) concat '(' concat c.LENGTH concat ',' concat c.SCALE concat ')'
ELSE TRIM (TRAILING FROM c.TYPENAME) END as col_type,
c.REMARKS as col_description, c.COLNO as col_sort_order
FROM SYSCAT.COLUMNS c
INNER JOIN
SYSCAT.TABLES as t on c.TABSCHEMA=t.TABSCHEMA and c.TABNAME=t.TABNAME
{where_clause_suffix}
ORDER by cluster, schema, name, col_sort_order ;
"""
# CONFIG KEYS
WHERE_CLAUSE_SUFFIX_KEY = 'where_clause_suffix'
CLUSTER_KEY = 'cluster_key'
DATABASE_KEY = 'database_key'
# Default values
DEFAULT_CLUSTER_NAME = 'master'
DEFAULT_CONFIG = ConfigFactory.from_dict(
{WHERE_CLAUSE_SUFFIX_KEY: ' ', CLUSTER_KEY: DEFAULT_CLUSTER_NAME}
)
def init(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(Db2MetadataExtractor.DEFAULT_CONFIG)
self._cluster = conf.get_string(Db2MetadataExtractor.CLUSTER_KEY)
cluster_source = f"'{self._cluster}'"
self._database = conf.get_string(Db2MetadataExtractor.DATABASE_KEY, default='db2')
self.sql_stmt = Db2MetadataExtractor.SQL_STATEMENT.format(
where_clause_suffix=conf.get_string(Db2MetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY),
cluster_source=cluster_source
)
self._alchemy_extractor = SQLAlchemyExtractor()
sql_alch_conf = Scoped.get_scoped_conf(conf, self._alchemy_extractor.get_scope())\
.with_fallback(ConfigFactory.from_dict({SQLAlchemyExtractor.EXTRACT_SQL: self.sql_stmt}))
self.sql_stmt = sql_alch_conf.get_string(SQLAlchemyExtractor.EXTRACT_SQL)
LOGGER.info('SQL for Db2 metadata: %s', self.sql_stmt)
self._alchemy_extractor.init(sql_alch_conf)
self._extract_iter: Union[None, Iterator] = None
def METHOD_NAME(self) -> Union[TableMetadata, None]:
if not self._extract_iter:
self._extract_iter = self._get_extract_iter()
try:
return next(self._extract_iter)
except StopIteration:
return None
def get_scope(self) -> str:
return 'extractor.db2_metadata'
def _get_extract_iter(self) -> Iterator[TableMetadata]:
"""
Using itertools.groupby and raw level iterator, it groups to table and yields TableMetadata
:return:
"""
for key, group in groupby(self._get_raw_extract_iter(), self._get_table_key):
columns = []
for row in group:
last_row = row
columns.append(ColumnMetadata(row['col_name'], row['col_description'],
row['col_type'], row['col_sort_order']))
yield TableMetadata(self._database, last_row['cluster'],
last_row['schema'],
last_row['name'],
last_row['description'],
columns)
def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:
"""
Provides iterator of result row from SQLAlchemy extractor
:return:
"""
row = self._alchemy_extractor.METHOD_NAME()
while row:
yield row
row = self._alchemy_extractor.METHOD_NAME()
def _get_table_key(self, row: Dict[str, Any]) -> Union[TableKey, None]:
"""
Table key consists of schema and table name
:param row:
:return:
"""
if row:
return TableKey(schema=row['schema'], table_name=row['name'])
return None
|
3,407 |
reparent image
|
import toga
from toga.constants import CENTER, COLUMN, HIDDEN, ROW, VISIBLE
from toga.style import Pack
class ExampleLayoutApp(toga.App):
def startup(self):
self.button_hide = toga.Button(
text="Hide label",
style=Pack(padding=10, width=120),
on_press=self.hide_label,
)
self.button_add = toga.Button(
text="Add image",
style=Pack(padding=10, width=120),
on_press=self.add_image,
)
self.button_remove = toga.Button(
text="Remove image",
style=Pack(padding=10, width=120),
on_press=self.remove_image,
enabled=False,
)
self.button_insert = toga.Button(
text="Insert image",
style=Pack(padding=10, width=120),
on_press=self.insert_image,
)
self.button_reparent = toga.Button(
text="Reparent image",
style=Pack(padding=10, width=120),
on_press=self.METHOD_NAME,
enabled=False,
)
self.button_add_to_scroll = toga.Button(
text="Add new label",
style=Pack(padding=10, width=120),
on_press=self.add_label,
)
self.content_box = toga.Box(
children=[], style=Pack(direction=COLUMN, padding=10, flex=1)
)
image = toga.Image("resources/tiberius.png")
self.image_view = toga.ImageView(
image, style=Pack(padding=10, width=60, height=60)
)
# this tests adding children during init, before we have an implementation
self.button_box = toga.Box(
children=[
self.button_hide,
self.button_add,
self.button_insert,
self.button_reparent,
self.button_remove,
self.button_add_to_scroll,
],
style=Pack(direction=COLUMN),
)
self.box = toga.Box(
children=[], style=Pack(direction=ROW, padding=10, alignment=CENTER, flex=1)
)
# this tests adding children when we already have an impl but no window or app
self.box.add(self.button_box)
self.box.add(self.content_box)
# add a couple of labels to get us started
self.labels = []
for i in range(3):
self.add_label()
self.main_window = toga.MainWindow()
self.main_window.content = self.box
self.main_window.show()
def hide_label(self, sender):
if self.labels[0].style.visibility == HIDDEN:
self.labels[0].style.visibility = VISIBLE
self.button_hide.text = "Hide label"
else:
self.labels[0].style.visibility = HIDDEN
self.button_hide.text = "Show label"
def add_image(self, sender):
self.content_box.add(self.image_view)
self.button_reparent.enabled = True
self.button_remove.enabled = True
self.button_add.enabled = False
self.button_insert.enabled = False
def insert_image(self, sender):
self.content_box.insert(1, self.image_view)
self.button_reparent.enabled = True
self.button_remove.enabled = True
self.button_add.enabled = False
self.button_insert.enabled = False
def remove_image(self, sender):
self.image_view.parent.remove(self.image_view)
self.button_reparent.enabled = False
self.button_remove.enabled = False
self.button_add.enabled = True
self.button_insert.enabled = True
def METHOD_NAME(self, sender):
if self.image_view.parent is self.button_box:
self.content_box.insert(0, self.image_view)
elif self.image_view.parent is self.content_box:
self.button_box.add(self.image_view)
def add_label(self, sender=None):
# this tests adding children when we already have an impl, window and app
new_label = toga.Label(
f"Label {len(self.content_box.children)}", style=Pack(padding=2, width=70)
)
self.content_box.add(new_label)
self.labels.append(new_label)
def main():
return ExampleLayoutApp("Layout", "org.beeware.widgets.layout")
if __name__ == "__main__":
app = main()
app.main_loop()
|
3,408 |
get columns
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import copy
import frappe
from frappe import _
from frappe.query_builder.functions import IfNull
from frappe.utils import date_diff, flt, getdate
def execute(filters=None):
if not filters:
return [], []
validate_filters(filters)
columns = METHOD_NAME(filters)
data = get_data(filters)
if not data:
return [], [], None, []
data, chart_data = prepare_data(data, filters)
return columns, data, None, chart_data
def validate_filters(filters):
from_date, to_date = filters.get("from_date"), filters.get("to_date")
if not from_date and to_date:
frappe.throw(_("From and To Dates are required."))
elif date_diff(to_date, from_date) < 0:
frappe.throw(_("To Date cannot be before From Date."))
def get_data(filters):
po = frappe.qb.DocType("Purchase Order")
po_item = frappe.qb.DocType("Purchase Order Item")
pi_item = frappe.qb.DocType("Purchase Invoice Item")
query = (
frappe.qb.from_(po)
.from_(po_item)
.left_join(pi_item)
.on(pi_item.po_detail == po_item.name)
.select(
po.transaction_date.as_("date"),
po_item.schedule_date.as_("required_date"),
po_item.project,
po.name.as_("purchase_order"),
po.status,
po.supplier,
po_item.item_code,
po_item.qty,
po_item.received_qty,
(po_item.qty - po_item.received_qty).as_("pending_qty"),
IfNull(pi_item.qty, 0).as_("billed_qty"),
po_item.base_amount.as_("amount"),
(po_item.received_qty * po_item.base_rate).as_("received_qty_amount"),
(po_item.billed_amt * IfNull(po.conversion_rate, 1)).as_("billed_amount"),
(po_item.base_amount - (po_item.billed_amt * IfNull(po.conversion_rate, 1))).as_(
"pending_amount"
),
po.set_warehouse.as_("warehouse"),
po.company,
po_item.name,
)
.where(
(po_item.parent == po.name) & (po.status.notin(("Stopped", "Closed"))) & (po.docstatus == 1)
)
.groupby(po_item.name)
.orderby(po.transaction_date)
)
for field in ("company", "name"):
if filters.get(field):
query = query.where(po[field] == filters.get(field))
if filters.get("from_date") and filters.get("to_date"):
query = query.where(
po.transaction_date.between(filters.get("from_date"), filters.get("to_date"))
)
if filters.get("status"):
query = query.where(po.status.isin(filters.get("status")))
if filters.get("project"):
query = query.where(po_item.project == filters.get("project"))
data = query.run(as_dict=True)
return data
def prepare_data(data, filters):
completed, pending = 0, 0
pending_field = "pending_amount"
completed_field = "billed_amount"
if filters.get("group_by_po"):
purchase_order_map = {}
for row in data:
# sum data for chart
completed += row[completed_field]
pending += row[pending_field]
# prepare data for report view
row["qty_to_bill"] = flt(row["qty"]) - flt(row["billed_qty"])
if filters.get("group_by_po"):
po_name = row["purchase_order"]
if not po_name in purchase_order_map:
# create an entry
row_copy = copy.deepcopy(row)
purchase_order_map[po_name] = row_copy
else:
# update existing entry
po_row = purchase_order_map[po_name]
po_row["required_date"] = min(getdate(po_row["required_date"]), getdate(row["required_date"]))
# sum numeric columns
fields = [
"qty",
"received_qty",
"pending_qty",
"billed_qty",
"qty_to_bill",
"amount",
"received_qty_amount",
"billed_amount",
"pending_amount",
]
for field in fields:
po_row[field] = flt(row[field]) + flt(po_row[field])
chart_data = prepare_chart_data(pending, completed)
if filters.get("group_by_po"):
data = []
for po in purchase_order_map:
data.append(purchase_order_map[po])
return data, chart_data
return data, chart_data
def prepare_chart_data(pending, completed):
labels = ["Amount to Bill", "Billed Amount"]
return {
"data": {"labels": labels, "datasets": [{"values": [pending, completed]}]},
"type": "donut",
"height": 300,
}
def METHOD_NAME(filters):
columns = [
{"label": _("Date"), "fieldname": "date", "fieldtype": "Date", "width": 90},
{"label": _("Required By"), "fieldname": "required_date", "fieldtype": "Date", "width": 90},
{
"label": _("Purchase Order"),
"fieldname": "purchase_order",
"fieldtype": "Link",
"options": "Purchase Order",
"width": 160,
},
{"label": _("Status"), "fieldname": "status", "fieldtype": "Data", "width": 130},
{
"label": _("Supplier"),
"fieldname": "supplier",
"fieldtype": "Link",
"options": "Supplier",
"width": 130,
},
{
"label": _("Project"),
"fieldname": "project",
"fieldtype": "Link",
"options": "Project",
"width": 130,
},
]
if not filters.get("group_by_po"):
columns.append(
{
"label": _("Item Code"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 100,
}
)
columns.extend(
[
{
"label": _("Qty"),
"fieldname": "qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty",
},
{
"label": _("Received Qty"),
"fieldname": "received_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty",
},
{
"label": _("Pending Qty"),
"fieldname": "pending_qty",
"fieldtype": "Float",
"width": 80,
"convertible": "qty",
},
{
"label": _("Billed Qty"),
"fieldname": "billed_qty",
"fieldtype": "Float",
"width": 80,
"convertible": "qty",
},
{
"label": _("Qty to Bill"),
"fieldname": "qty_to_bill",
"fieldtype": "Float",
"width": 80,
"convertible": "qty",
},
{
"label": _("Amount"),
"fieldname": "amount",
"fieldtype": "Currency",
"width": 110,
"options": "Company:company:default_currency",
"convertible": "rate",
},
{
"label": _("Billed Amount"),
"fieldname": "billed_amount",
"fieldtype": "Currency",
"width": 110,
"options": "Company:company:default_currency",
"convertible": "rate",
},
{
"label": _("Pending Amount"),
"fieldname": "pending_amount",
"fieldtype": "Currency",
"width": 130,
"options": "Company:company:default_currency",
"convertible": "rate",
},
{
"label": _("Received Qty Amount"),
"fieldname": "received_qty_amount",
"fieldtype": "Currency",
"width": 130,
"options": "Company:company:default_currency",
"convertible": "rate",
},
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 100,
},
{
"label": _("Company"),
"fieldname": "company",
"fieldtype": "Link",
"options": "Company",
"width": 100,
},
]
)
return columns
|
3,409 |
name
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import benchexec.result as result
import benchexec.tools.template
from benchexec.tools.sv_benchmarks_util import get_data_model_from_task, ILP32, LP64
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for witness2test
(https://github.com/diffblue/cprover-sv-comp/pull/14).
"""
def executable(self, tool_locator):
"""
Find the path to the executable file that will get executed.
@return a string pointing to an executable file
"""
return tool_locator.find_executable("test-gen.sh")
def version(self, executable):
return self._version_from_tool(executable)
def METHOD_NAME(self):
"""
Return the name of the tool, formatted for humans.
@return a non-empty string
"""
return "CProver witness2test"
def cmdline(self, executable, options, task, rlimits):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
A typical run has only one input file, but there can be more than one.
@param propertyfile: contains a specification for the verifier (optional, not always present).
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
@return a list of strings that represent the command line to execute
"""
if task.property_file:
options = options + ["--propertyfile", task.property_file]
data_model_param = get_data_model_from_task(task, {ILP32: "-m32", LP64: "-m64"})
if data_model_param and data_model_param not in options:
options += [data_model_param]
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
@param run.exit_code.value: the exit code of the program, None if the program was killed
@param runb.exi_code.signal: the signal that killed the program, None if program exited itself
@param output: a list of strings of output lines of the tool (both stdout and stderr)
@param isTimeout: whether the result is a timeout
(useful to distinguish between program killed because of error and timeout)
@return a non-empty string, usually one of the benchexec.result.RESULT_* constants
"""
output = run.output
status = result.RESULT_ERROR
if run.exit_code.value == 0:
if output:
result_str = output[-1].strip()
if result_str == "TRUE":
status = result.RESULT_TRUE_PROP
elif "FALSE" in result_str:
if result_str == "FALSE(valid-memtrack)":
status = result.RESULT_FALSE_MEMTRACK
elif result_str == "FALSE(valid-deref)":
status = result.RESULT_FALSE_DEREF
elif result_str == "FALSE(valid-free)":
status = result.RESULT_FALSE_FREE
elif result_str == "FALSE(no-overflow)":
status = result.RESULT_FALSE_OVERFLOW
else:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in output:
status = result.RESULT_UNKNOWN
elif (
output
and re.match(r"^INVALID WITNESS FILE", output[-1].strip()) is not None
):
status += " (invalid witness file)"
return status
|
3,410 |
es format datetime
|
import json
import time
from datetime import datetime, timedelta, timezone
from django.conf import settings
from django.core.management.base import CommandError
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from corehq.apps.es.exceptions import TaskError, TaskMissing
from corehq.util.es.elasticsearch import SerializationError
from corehq.util.json import CommCareJSONEncoder
TASK_POLL_DELAY = 10 # number of seconds to sleep between polling for task info
class ElasticJSONSerializer(object):
"""Modified version of ``elasticsearch.serializer.JSONSerializer``
that uses the CommCareJSONEncoder for serializing to JSON.
"""
mimetype = 'application/json'
def loads(self, s):
try:
return json.loads(s)
except (ValueError, TypeError) as e:
raise SerializationError(s, e)
def dumps(self, data):
# don't serialize strings
if isinstance(data, str):
return data
try:
return json.dumps(data, cls=CommCareJSONEncoder)
except (ValueError, TypeError) as e:
raise SerializationError(data, e)
def values_list(hits, *fields, **kwargs):
"""modeled after django's QuerySet.values_list"""
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
if not fields:
raise TypeError('must be called with at least one field')
if flat:
field, = fields
return [hit.get(field) for hit in hits]
else:
return [tuple(hit.get(field) for field in fields) for hit in hits]
def flatten_field_dict(results, fields_property='fields'):
"""
In ElasticSearch 1.3, the return format was changed such that field
values are always returned as lists, where as previously they would
be returned as scalars if the field had a single value, and returned
as lists if the field had multiple values.
This method restores the behavior of 0.90 .
https://www.elastic.co/guide/en/elasticsearch/reference/1.3/_return_values.html
"""
field_dict = results.get(fields_property, {})
for key, val in field_dict.items():
new_val = val
if type(val) == list and len(val) == 1:
new_val = val[0]
field_dict[key] = new_val
return field_dict
def METHOD_NAME(val):
"""
Takes a date or datetime object and converts it to a format ES can read
(see DATE_FORMATS_ARR). Strings are returned unmodified.
"""
if isinstance(val, str):
return val
elif isinstance(val, datetime) and val.microsecond and val.tzinfo:
# We don't support microsec precision with timezones
return val.astimezone(timezone.utc).replace(tzinfo=None).isoformat()
else:
return val.isoformat()
def check_task_progress(task_id, just_once=False):
"""
A util to be used in management commands to check the state of a task in ES.
If just_once is set to False it will continuoslly poll for task stats until task is completed.
"""
from corehq.apps.es.client import manager
node_id = task_id.split(':')[0]
node_name = manager.get_node_info(node_id, metric="name")
print(f"Looking for task with ID '{task_id}' running on '{node_name}'")
progress_data = []
while True:
try:
task_details = manager.get_task(task_id=task_id)
except TaskMissing:
if not just_once:
return # task completed
raise CommandError(f"Task with id {task_id} not found")
except TaskError as err:
raise CommandError(f"Fetching task failed: {err}")
status = task_details["status"]
total = status["total"]
if total: # total can be 0 initially
created, updated, deleted = status["created"], status["updated"], status["deleted"]
progress = created + updated + deleted
progress_percent = progress / total * 100
running_time_nanos = task_details["running_time_in_nanos"]
run_time = timedelta(microseconds=running_time_nanos / 1000)
remaining_time_absolute = 'unknown'
remaining_time_relative = ''
if progress:
progress_data.append({
"progress": progress,
"time": time.monotonic() * 1000000000
})
remaining = total - progress
# estimate based on progress since beginning of task
remaining_nanos_absolute = running_time_nanos / progress * remaining
remaining_time_absolute = timedelta(microseconds=remaining_nanos_absolute / 1000)
if len(progress_data) > 1:
# estimate based on last 12 loops of data
progress_nanos = progress_data[-1]["time"] - progress_data[0]["time"]
progress_diff = progress_data[-1]["progress"] - progress_data[0]["progress"]
progress_data = progress_data[-12:] # truncate progress data
if progress_diff:
remaining_nanos = progress_nanos / progress_diff * remaining
remaining_time_relative = timedelta(microseconds=remaining_nanos / 1000)
else:
# avoid ZeroDivisionError
remaining_time_relative = ''
print(f"Progress {progress_percent:.2f}% ({progress} / {total}). "
f"Elapsed time: {_format_timedelta(run_time)}. "
f"Estimated remaining time: "
f"(average since start = {_format_timedelta(remaining_time_absolute)}) "
f"(recent average = {_format_timedelta(remaining_time_relative)})")
if just_once:
return
time.sleep(TASK_POLL_DELAY)
def _format_timedelta(td):
out = str(td)
return out.split(".")[0]
def sorted_mapping(mapping):
"""Return a recursively sorted Elastic mapping."""
if isinstance(mapping, dict):
mapping_ = {}
for key, value in sorted(mapping.items(), key=mapping_sort_key):
mapping_[key] = sorted_mapping(value)
return mapping_
if isinstance(mapping, (tuple, list)):
return [sorted_mapping(item) for item in mapping]
return mapping
def mapping_sort_key(item):
key, value = item
return 1 if key == "properties" else 0, key, value
def index_runtime_name(name):
# transform the name if testing
return f"{TEST_DATABASE_PREFIX}{name}" if settings.UNIT_TESTING else name
|
3,411 |
extract candidates
|
from typing import List
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel, StrictFloat, StrictStr, ValidationError, validator
from mlflow.gateway.config import MlflowModelServingConfig, RouteConfig
from mlflow.gateway.constants import MLFLOW_SERVING_RESPONSE_KEY
from mlflow.gateway.providers.base import BaseProvider
from mlflow.gateway.providers.utils import send_request
from mlflow.gateway.schemas import chat, completions, embeddings
class ServingTextResponse(BaseModel):
predictions: List[StrictStr]
@validator("predictions", pre=True)
def METHOD_NAME(cls, predictions):
if isinstance(predictions, list) and not predictions:
raise ValueError("The input list is empty")
if isinstance(predictions, dict):
if "candidates" not in predictions and len(predictions) > 1:
raise ValueError(
"The dict format is invalid for this route type. Ensure the served model "
"returns a dict key containing 'candidates'"
)
if len(predictions) == 1:
predictions = next(iter(predictions.values()))
else:
predictions = predictions.get("candidates", predictions)
if not predictions:
raise ValueError("The input list is empty")
return predictions
class EmbeddingsResponse(BaseModel):
predictions: List[List[StrictFloat]]
@validator("predictions", pre=True)
def validate_predictions(cls, predictions):
if isinstance(predictions, list) and not predictions:
raise ValueError("The input list is empty")
if isinstance(predictions, list) and all(
isinstance(item, list) and not item for item in predictions
):
raise ValueError("One or more lists in the returned prediction response are empty")
elif all(isinstance(item, float) for item in predictions):
return [predictions]
else:
return predictions
class MlflowModelServingProvider(BaseProvider):
def __init__(self, config: RouteConfig) -> None:
super().__init__(config)
if config.model.config is None or not isinstance(
config.model.config, MlflowModelServingConfig
):
raise TypeError(f"Invalid config type {config.model.config}")
self.mlflow_config: MlflowModelServingConfig = config.model.config
self.headers = {"Content-Type": "application/json"}
@staticmethod
def _extract_mlflow_response_key(response):
if MLFLOW_SERVING_RESPONSE_KEY not in response:
raise HTTPException(
status_code=502,
detail=f"The response is missing the required key: {MLFLOW_SERVING_RESPONSE_KEY}.",
)
return response[MLFLOW_SERVING_RESPONSE_KEY]
@staticmethod
def _process_payload(payload, key):
payload = jsonable_encoder(payload, exclude_none=True)
input_data = payload.pop(key, None)
request_payload = {"inputs": input_data if isinstance(input_data, list) else [input_data]}
if payload:
request_payload["params"] = payload
return request_payload
@staticmethod
def _process_completions_response_for_mlflow_serving(response):
try:
validated_response = ServingTextResponse(**response)
inference_data = validated_response.predictions
except ValidationError as e:
raise HTTPException(status_code=502, detail=str(e))
return [{"text": entry, "metadata": {}} for entry in inference_data]
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
# Example request to MLflow REST API server for completions:
# {
# "inputs": ["hi", "hello", "bye"],
# "params": {
# "temperature": 0.5,
# "top_k": 3,
# }
# }
resp = await send_request(
headers=self.headers,
base_url=self.mlflow_config.model_server_url,
path="invocations",
payload=self._process_payload(payload, "prompt"),
)
# Example response:
# {"predictions": ["hello", "hi", "goodbye"]}
return completions.ResponsePayload(
**{
"candidates": self._process_completions_response_for_mlflow_serving(resp),
"metadata": {
"model": self.config.model.name,
"route_type": self.config.route_type,
},
}
)
def _process_chat_response_for_mlflow_serving(self, response):
try:
validated_response = ServingTextResponse(**response)
inference_data = validated_response.predictions
except ValidationError as e:
raise HTTPException(status_code=502, detail=str(e))
return [
{"message": {"role": "assistant", "content": entry}, "metadata": {}}
for entry in inference_data
]
async def chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload:
# Example request to MLflow REST API for chat:
# {
# "inputs": ["question"],
# "params": ["temperature": 0.2],
# }
payload = self._process_payload(payload, "messages")
query_count = len(payload["inputs"])
if query_count > 1:
raise HTTPException(
status_code=422,
detail="MLflow chat models are only capable of processing a single query at a "
f"time. The request submitted consists of {query_count} queries.",
)
payload["inputs"] = [payload["inputs"][0]["content"]]
resp = await send_request(
headers=self.headers,
base_url=self.mlflow_config.model_server_url,
path="invocations",
payload=payload,
)
# Example response:
# {"predictions": ["answer"]}
return chat.ResponsePayload(
**{
"candidates": self._process_chat_response_for_mlflow_serving(resp),
"metadata": {
"model": self.config.model.name,
"route_type": self.config.route_type,
},
}
)
def _process_embeddings_response_for_mlflow_serving(self, response):
try:
validated_response = EmbeddingsResponse(**response)
inference_data = validated_response.predictions
except ValidationError as e:
raise HTTPException(status_code=502, detail=str(e))
return inference_data
async def embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
# Example request to MLflow REST API server for embeddings:
# {
# "inputs": ["a sentence", "another sentence"],
# "params": {
# "output_value": "token_embeddings",
# }
# }
resp = await send_request(
headers=self.headers,
base_url=self.mlflow_config.model_server_url,
path="invocations",
payload=self._process_payload(payload, "text"),
)
# Example response:
# {"predictions": [[0.100, -0.234, 0.002, ...], [0.222, -0.111, 0.134, ...]]}
return embeddings.ResponsePayload(
**{
"embeddings": self._process_embeddings_response_for_mlflow_serving(resp),
"metadata": {
"model": self.config.model.name,
"route_type": self.config.route_type,
},
}
)
|
3,412 |
pytest generate tests
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import multiprocessing
import pytest
from libqtile.backend.base import drawer
from test.helpers import BareConfig, TestManager
def pytest_addoption(parser):
parser.addoption("--debuglog", action="store_true", default=False, help="enable debug output")
parser.addoption(
"--backend",
action="append",
choices=("x11", "wayland"),
help="Test a specific backend. Can be passed more than once.",
)
def pytest_cmdline_main(config):
if not config.option.backend:
config.option.backend = ["x11"]
ignore = config.option.ignore or []
if "wayland" not in config.option.backend:
ignore.append("test/backend/wayland")
if "x11" not in config.option.backend:
ignore.append("test/backend/x11")
config.option.ignore = ignore
def METHOD_NAME(metafunc):
if "backend" in metafunc.fixturenames:
backends = metafunc.config.option.backend
metafunc.parametrize("backend_name", backends)
@pytest.fixture(scope="session", params=[1])
def outputs(request):
return request.param
dualmonitor = pytest.mark.parametrize("outputs", [2], indirect=True)
multimonitor = pytest.mark.parametrize("outputs", [1, 2], indirect=True)
@pytest.fixture(scope="session")
def xephyr(request, outputs):
if "x11" not in request.config.option.backend:
yield
return
from test.backend.x11.conftest import x11_environment
kwargs = getattr(request, "param", {})
with x11_environment(outputs, **kwargs) as x:
yield x
@pytest.fixture(scope="session")
def wayland_session(request, outputs):
if "wayland" not in request.config.option.backend:
yield
return
from test.backend.wayland.conftest import wayland_environment
with wayland_environment(outputs) as w:
yield w
@pytest.fixture(scope="function")
def backend(request, backend_name, xephyr, wayland_session):
if backend_name == "x11":
from test.backend.x11.conftest import XBackend
yield XBackend({"DISPLAY": xephyr.display}, args=[xephyr.display])
elif backend_name == "wayland":
from test.backend.wayland.conftest import WaylandBackend
yield WaylandBackend(wayland_session)
@pytest.fixture(scope="function")
def log_queue():
"""Creates a new Queue for logging messages run in a backend process."""
yield multiprocessing.Queue(-1)
@pytest.fixture(scope="function")
def logger(caplog, log_queue):
"""
Connects logging messages in the backend to a logger in the main thread
and returns a caplog fixture which can access those messages.
"""
root = logging.getLogger()
listener = logging.handlers.QueueListener(log_queue, *root.handlers)
listener.start()
yield caplog
listener.stop()
@pytest.fixture(scope="function")
def manager_nospawn(request, backend, log_queue):
with TestManager(backend, request.config.getoption("--debuglog")) as manager:
manager.log_queue = log_queue
yield manager
@pytest.fixture(scope="function")
def manager(request, manager_nospawn):
config = getattr(request, "param", BareConfig)
manager_nospawn.start(config)
yield manager_nospawn
@pytest.fixture(scope="function")
def fake_window():
"""
A fake window that can provide a fake drawer to test widgets.
"""
class FakeWindow:
class _NestedWindow:
wid = 10
window = _NestedWindow()
def create_drawer(self, width, height):
return drawer.Drawer(None, self, width, height)
return FakeWindow()
|
3,413 |
get api operation policy
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApiOperationPolicyResult',
'AwaitableGetApiOperationPolicyResult',
'get_api_operation_policy',
'get_api_operation_policy_output',
]
@pulumi.output_type
class GetApiOperationPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, value=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetApiOperationPolicyResult(GetApiOperationPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiOperationPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
value=self.value)
def METHOD_NAME(api_id: Optional[str] = None,
format: Optional[str] = None,
operation_id: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiOperationPolicyResult:
"""
Get the policy configuration at the API Operation level.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['format'] = format
__args__['operationId'] = operation_id
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20230301preview:getApiOperationPolicy', __args__, opts=opts, typ=GetApiOperationPolicyResult).value
return AwaitableGetApiOperationPolicyResult(
format=pulumi.get(__ret__, 'format'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(METHOD_NAME)
def get_api_operation_policy_output(api_id: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Optional[str]]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiOperationPolicyResult]:
"""
Get the policy configuration at the API Operation level.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
...
|
3,414 |
load selective
|
""" Module to load Family into project """
#pylint: disable=import-error,invalid-name,broad-except,superfluous-parens
import os
import re
from pyrevit.framework import clr
from pyrevit import forms, revit, DB, script
logger = script.get_logger()
class FamilyLoader:
"""
Enables loading a family from an absolute path.
Attributes
----------
path : str
Absolute path to family .rfa file
name : str
File name
is_loaded : bool
Checks if family name already exists in project
Methods
-------
get_symbols()
Loads family in a fake transaction to return all symbols
load_selective()
Loads the family and selected symbols
load_all()
Loads family and all its symbols
Credit
------
Based on Ehsan Iran-Nejads 'Load More Types'
"""
def __init__(self, path):
"""
Parameters
----------
path : str
Absolute path to family .rfa file
"""
self.path = path
self.name = os.path.basename(path).replace(".rfa", "")
@property
def is_loaded(self):
"""
Checks if family name already exists in project
Returns
-------
bool
Flag indicating if family is already loaded
"""
collector = DB.FilteredElementCollector(revit.doc).OfClass(DB.Family)
condition = (x for x in collector if x.Name == self.name)
return next(condition, None) is not None
def get_symbols(self):
"""
Loads family in a fake transaction to return all symbols.
Returns
-------
set()
Set of family symbols
Remark
------
Uses SmartSortableFamilySymbol for effective sorting
"""
logger.debug('Fake loading family: {}'.format(self.name))
symbol_set = set()
with revit.ErrorSwallower():
# DryTransaction will rollback all the changes
with revit.DryTransaction('Fake load'):
ret_ref = clr.Reference[DB.Family]()
revit.doc.LoadFamily(self.path, ret_ref)
loaded_fam = ret_ref.Value
# Get the symbols
for symbol_id in loaded_fam.GetFamilySymbolIds():
symbol = revit.doc.GetElement(symbol_id)
symbol_name = revit.query.get_name(symbol)
sortable_sym = SmartSortableFamilySymbol(symbol_name)
logger.debug('Importable Symbol: {}'.format(sortable_sym))
symbol_set.add(sortable_sym)
return sorted(symbol_set)
def METHOD_NAME(self):
""" Loads the family and selected symbols. """
symbols = self.get_symbols()
# Dont prompt if only 1 symbol available
if len(symbols) == 1:
self.load_all()
return
# User input -> Select family symbols
selected_symbols = forms.SelectFromList.show(
symbols,
title=self.name,
button_name="Load type(s)",
multiselect=True)
if selected_symbols is None:
logger.debug('No family symbols selected.')
return
logger.debug('Selected symbols are: {}'.format(selected_symbols))
# Load family with selected symbols
with revit.Transaction('Loaded {}'.format(self.name)):
try:
for symbol in selected_symbols:
logger.debug('Loading symbol: {}'.format(symbol))
revit.doc.LoadFamilySymbol(self.path, symbol.symbol_name)
logger.debug('Successfully loaded all selected symbols')
except Exception as load_err:
logger.error(
'Error loading family symbol from {} | {}'
.format(self.path, load_err))
raise load_err
def load_all(self):
""" Loads family and all its symbols. """
with revit.Transaction('Loaded {}'.format(self.name)):
try:
revit.doc.LoadFamily(self.path)
logger.debug(
'Successfully loaded family: {}'.format(self.name))
except Exception as load_err:
logger.error(
'Error loading family symbol from {} | {}'
.format(self.path, load_err))
raise load_err
class SmartSortableFamilySymbol:
"""
Enables smart sorting of family symbols.
Attributes
----------
symbol_name : str
name of the family symbol
Example
-------
symbol_set = set()
for family_symbol in familiy_symbols:
family_symbol_name = revit.query.get_name(family_symbol)
sortable_sym = SmartSortableFamilySymbol(family_symbol_name)
symbol_set.add(sortable_sym)
sorted_symbols = sorted(symbol_set)
Credit
------
Copied from Ehsan Iran-Nejads SmartSortableFamilyType
in 'Load More Types'.
"""
def __init__(self, symbol_name):
self.symbol_name = symbol_name
self.sort_alphabetically = False
self.number_list = [
int(x)
for x in re.findall(r'\d+', self.symbol_name)]
if not self.number_list:
self.sort_alphabetically = True
def __str__(self):
return self.symbol_name
def __repr__(self):
return '<SmartSortableFamilySymbol Name:{} Values:{} StringSort:{}>'\
.format(self.symbol_name,
self.number_list,
self.sort_alphabetically)
def __eq__(self, other):
return self.symbol_name == other.symbol_name
def __hash__(self):
return hash(self.symbol_name)
def __lt__(self, other):
if self.sort_alphabetically or other.sort_alphabetically:
return self.symbol_name < other.symbol_name
else:
return self.number_list < other.number_list
|
3,415 |
update to one
|
from inkex import errormsg
from .elements import EmbroideryElement
from .i18n import _
from .metadata import InkStitchMetadata
from .svg import PIXELS_PER_MM
from .svg.tags import EMBROIDERABLE_TAGS, INKSTITCH_ATTRIBS
INKSTITCH_SVG_VERSION = 1
def update_inkstitch_document(svg, selection=None):
document = svg.getroot()
# get the inkstitch svg version from the document
search_string = "//*[local-name()='inkstitch_svg_version']//text()"
file_version = document.findone(search_string)
try:
file_version = int(file_version)
except (TypeError, ValueError):
file_version = 0
if file_version == INKSTITCH_SVG_VERSION:
return
if file_version > INKSTITCH_SVG_VERSION:
errormsg(_("This document was created with a newer Version of Ink/Stitch. "
"It is possible that not everything works as expected.\n\n"
"Please update your Ink/Stitch version: https://inkstitch.org/docs/install/"))
# they may not want to be bothered with this info everytime they call an inkstitch extension
# let's udowngrade the file version number
_update_inkstitch_svg_version(svg)
else:
# this document is either a new document or it is outdated
# if we cannot find any inkstitch attribute in the document, we assume that this is a new document which doesn't need to be updated
search_string = "//*[namespace-uri()='http://inkstitch.org/namespace' or " \
"@*[namespace-uri()='http://inkstitch.org/namespace'] or " \
"@*[starts-with(name(), 'embroider_')]]"
inkstitch_element = document.findone(search_string)
if inkstitch_element is None:
_update_inkstitch_svg_version(svg)
return
# update elements
if selection:
# this comes from the updater extension where we only update selected elements
for element in selection:
update_legacy_params(EmbroideryElement(element), file_version, INKSTITCH_SVG_VERSION)
else:
# this is the automatic update when a legacy inkstitch svg version was recognized
for element in document.iterdescendants():
if element.tag in EMBROIDERABLE_TAGS:
update_legacy_params(EmbroideryElement(element), file_version, INKSTITCH_SVG_VERSION)
_update_inkstitch_svg_version(svg)
def _update_inkstitch_svg_version(svg):
# set inkstitch svg version
metadata = InkStitchMetadata(svg.getroot())
metadata['inkstitch_svg_version'] = INKSTITCH_SVG_VERSION
def update_legacy_params(element, file_version, inkstitch_svg_version):
for version in range(file_version + 1, inkstitch_svg_version + 1):
_update_to(version, element)
def _update_to(version, element):
if version == 1:
METHOD_NAME(element)
def METHOD_NAME(element): # noqa: C901
# update legacy embroider_ attributes to namespaced attributes
legacy_attribs = False
for attrib in element.node.attrib:
if attrib.startswith('embroider_'):
_replace_legacy_embroider_param(element, attrib)
legacy_attribs = True
# convert legacy tie setting
legacy_tie = element.get_param('ties', None)
if legacy_tie == "True":
element.set_param('ties', 0)
elif legacy_tie == "False":
element.set_param('ties', 3)
# convert legacy fill_method
legacy_fill_method = element.get_int_param('fill_method', None)
if legacy_fill_method == 0:
element.set_param('fill_method', 'auto_fill')
elif legacy_fill_method == 1:
element.set_param('fill_method', 'contour_fill')
elif legacy_fill_method == 2:
element.set_param('fill_method', 'guided_fill')
elif legacy_fill_method == 3:
element.set_param('fill_method', 'legacy_fill')
underlay_angle = element.get_param('fill_underlay_angle', None)
if underlay_angle and ',' in underlay_angle:
element.set_param('fill_underlay_angle', underlay_angle.replace(',', ' '))
# legacy satin method
if element.get_boolean_param('e_stitch', False) is True:
element.remove_param('e_stitch')
element.set_param('satin_method', 'e_stitch')
if element.get_boolean_param('satin_column', False) or element.get_int_param('stroke_method', 0) == 1:
# reverse_rails defaults to Automatic, but we should never reverse an
# old satin automatically, only new ones
element.set_param('reverse_rails', 'none')
# default setting for fill_underlay has changed
if legacy_attribs and not element.get_param('fill_underlay', ""):
element.set_param('fill_underlay', False)
# default setting for running stitch length has changed (fills and strokes, not satins)
if not element.get_boolean_param('satin_column', False) and element.get_float_param('running_stitch_length_mm', None) is None:
element.set_param('running_stitch_length_mm', 1.5)
# convert legacy stroke_method
if element.get_style("stroke") and not element.node.get('inkscape:connection-start', None):
# manual stitch
legacy_manual_stitch = element.get_boolean_param('manual_stitch', False)
if legacy_manual_stitch is True:
element.remove_param('manual_stitch')
element.set_param('stroke_method', 'manual_stitch')
# stroke_method
legacy_stroke_method = element.get_int_param('stroke_method', None)
if legacy_stroke_method == 0:
element.set_param('stroke_method', 'running_stitch')
elif legacy_stroke_method == 1:
element.set_param('stroke_method', 'ripple_stitch')
if (not element.get_param('stroke_method', None) and
element.get_param('satin_column', False) is False and
not element.node.style('stroke-dasharray')):
element.set_param('stroke_method', 'zigzag_stitch')
# grid_size was supposed to be mm, but it was in pixels
grid_size = element.get_float_param('grid_size', None)
if grid_size:
size = grid_size / PIXELS_PER_MM
size = "{:.2f}".format(size)
element.set_param('grid_size_mm', size)
element.remove_param('grid_size')
def _replace_legacy_embroider_param(element, param):
# remove "embroider_" prefix
new_param = param[10:]
if new_param in INKSTITCH_ATTRIBS:
value = element.node.get(param, "").strip()
element.set_param(param[10:], value)
del element.node.attrib[param]
|
3,416 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetNetworkExperimentProfileResult',
'AwaitableGetNetworkExperimentProfileResult',
'get_network_experiment_profile',
'get_network_experiment_profile_output',
]
@pulumi.output_type
class GetNetworkExperimentProfileResult:
"""
Defines an Network Experiment Profile and lists of Experiments
"""
def __init__(__self__, enabled_state=None, etag=None, METHOD_NAME=None, location=None, name=None, resource_state=None, tags=None, type=None):
if enabled_state and not isinstance(enabled_state, str):
raise TypeError("Expected argument 'enabled_state' to be a str")
pulumi.set(__self__, "enabled_state", enabled_state)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[str]:
"""
The state of the Experiment
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkExperimentProfileResult(GetNetworkExperimentProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkExperimentProfileResult(
enabled_state=self.enabled_state,
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
resource_state=self.resource_state,
tags=self.tags,
type=self.type)
def get_network_experiment_profile(profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkExperimentProfileResult:
"""
Defines an Network Experiment Profile and lists of Experiments
Azure REST API version: 2019-11-01.
:param str profile_name: The Profile identifier associated with the Tenant and Partner
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getNetworkExperimentProfile', __args__, opts=opts, typ=GetNetworkExperimentProfileResult).value
return AwaitableGetNetworkExperimentProfileResult(
enabled_state=pulumi.get(__ret__, 'enabled_state'),
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_state=pulumi.get(__ret__, 'resource_state'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_network_experiment_profile)
def get_network_experiment_profile_output(profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkExperimentProfileResult]:
"""
Defines an Network Experiment Profile and lists of Experiments
Azure REST API version: 2019-11-01.
:param str profile_name: The Profile identifier associated with the Tenant and Partner
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
...
|
3,417 |
append random iv
|
import hashlib
import json
import random
from base64 import decodebytes, encodebytes
from pubnub.crypto_core import PubNubCrypto
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad, unpad
Initial16bytes = '0123456789012345'
class PubNubCryptodome(PubNubCrypto):
mode = AES.MODE_CBC
fallback_mode = None
def __init__(self, pubnub_config):
super().__init__(pubnub_config)
self.mode = pubnub_config.cipher_mode
self.fallback_mode = pubnub_config.fallback_cipher_mode
def encrypt(self, key, msg, use_random_iv=False):
secret = self.get_secret(key)
initialization_vector = self.get_initialization_vector(use_random_iv)
cipher = AES.new(bytes(secret[0:32], 'utf-8'), self.mode, bytes(initialization_vector, 'utf-8'))
encrypted_message = cipher.encrypt(self.pad(msg.encode('utf-8')))
msg_with_iv = self.METHOD_NAME(encrypted_message, use_random_iv, bytes(initialization_vector, "utf-8"))
return encodebytes(msg_with_iv).decode('utf-8').replace("\n", "")
def decrypt(self, key, msg, use_random_iv=False):
secret = self.get_secret(key)
decoded_message = decodebytes(msg.encode("utf-8"))
initialization_vector, extracted_message = self.extract_random_iv(decoded_message, use_random_iv)
cipher = AES.new(bytes(secret[0:32], "utf-8"), self.mode, initialization_vector)
try:
plain = self.depad((cipher.decrypt(extracted_message)).decode('utf-8'))
except UnicodeDecodeError as e:
if not self.fallback_mode:
raise e
cipher = AES.new(bytes(secret[0:32], "utf-8"), self.fallback_mode, initialization_vector)
plain = self.depad((cipher.decrypt(extracted_message)).decode('utf-8'))
try:
return json.loads(plain)
except Exception:
return plain
def METHOD_NAME(self, message, use_random_iv, initialization_vector):
if self.pubnub_configuration.use_random_initialization_vector or use_random_iv:
return initialization_vector + message
else:
return message
def extract_random_iv(self, message, use_random_iv):
if self.pubnub_configuration.use_random_initialization_vector or use_random_iv:
return message[0:16], message[16:]
else:
return bytes(Initial16bytes, "utf-8"), message
def get_initialization_vector(self, use_random_iv):
if self.pubnub_configuration.use_random_initialization_vector or use_random_iv:
return "{0:016}".format(random.randint(0, 9999999999999999))
else:
return Initial16bytes
def pad(self, msg, block_size=16):
padding = block_size - (len(msg) % block_size)
return msg + (chr(padding) * padding).encode('utf-8')
def depad(self, msg):
return msg[0:-ord(msg[-1])]
def get_secret(self, key):
return hashlib.sha256(key.encode("utf-8")).hexdigest()
class PubNubFileCrypto(PubNubCryptodome):
def encrypt(self, key, file):
secret = self.get_secret(key)
initialization_vector = self.get_initialization_vector(use_random_iv=True)
cipher = AES.new(bytes(secret[0:32], "utf-8"), self.mode, bytes(initialization_vector, 'utf-8'))
initialization_vector = bytes(initialization_vector, 'utf-8')
return self.METHOD_NAME(
cipher.encrypt(pad(file, 16)),
use_random_iv=True,
initialization_vector=initialization_vector
)
def decrypt(self, key, file):
secret = self.get_secret(key)
initialization_vector, extracted_file = self.extract_random_iv(file, use_random_iv=True)
try:
cipher = AES.new(bytes(secret[0:32], "utf-8"), self.mode, initialization_vector)
result = unpad(cipher.decrypt(extracted_file), 16)
except ValueError:
cipher = AES.new(bytes(secret[0:32], "utf-8"), self.fallback_mode, initialization_vector)
result = unpad(cipher.decrypt(extracted_file), 16)
return result
|
3,418 |
disable permute
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class SCRFD(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a SCRFD model exported by SCRFD.
:param model_file: (str)Path of model file, e.g ./scrfd.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(SCRFD, self).__init__(runtime_option)
self._model = C.vision.facedet.SCRFD(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "SCRFD initialize failed."
def predict(self, input_image, conf_threshold=0.7, nms_iou_threshold=0.3):
"""Detect the location and key points of human faces from an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:param conf_threshold: confidence threashold for postprocessing, default is 0.7
:param nms_iou_threshold: iou threashold for NMS, default is 0.3
:return: FaceDetectionResult
"""
return self._model.predict(input_image, conf_threshold,
nms_iou_threshold)
def disable_normalize(self):
"""
This function will disable normalize in preprocessing step.
"""
self._model.disable_normalize()
def METHOD_NAME(self):
"""
This function will disable hwc2chw in preprocessing step.
"""
self._model.METHOD_NAME()
# 一些跟SCRFD模型有关的属性封装
# 多数是预处理相关,可通过修改如model.size = [640, 640]改变预处理时resize的大小(前提是模型支持)
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640)
"""
return self._model.size
@property
def padding_value(self):
# padding value, size should be the same as channels
return self._model.padding_value
@property
def is_no_pad(self):
# while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
return self._model.is_no_pad
@property
def is_mini_pad(self):
# only pad to the minimum rectange which height and width is times of stride
return self._model.is_mini_pad
@property
def is_scale_up(self):
# if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
return self._model.is_scale_up
@property
def stride(self):
# padding stride, for is_mini_pad
return self._model.stride
@property
def downsample_strides(self):
"""
Argument for image postprocessing step,
downsample strides (namely, steps) for SCRFD to generate anchors,
will take (8,16,32) as default values
"""
return self._model.downsample_strides
@property
def landmarks_per_face(self):
"""
Argument for image postprocessing step, landmarks_per_face, default 5 in SCRFD
"""
return self._model.landmarks_per_face
@property
def use_kps(self):
"""
Argument for image postprocessing step,
the outputs of onnx file with key points features or not, default true
"""
return self._model.use_kps
@property
def max_nms(self):
"""
Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000
"""
return self._model.max_nms
@property
def num_anchors(self):
"""
Argument for image postprocessing step, anchor number of each stride, default 2
"""
return self._model.num_anchors
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@padding_value.setter
def padding_value(self, value):
assert isinstance(
value,
list), "The value to set `padding_value` must be type of list."
self._model.padding_value = value
@is_no_pad.setter
def is_no_pad(self, value):
assert isinstance(
value, bool), "The value to set `is_no_pad` must be type of bool."
self._model.is_no_pad = value
@is_mini_pad.setter
def is_mini_pad(self, value):
assert isinstance(
value,
bool), "The value to set `is_mini_pad` must be type of bool."
self._model.is_mini_pad = value
@is_scale_up.setter
def is_scale_up(self, value):
assert isinstance(
value,
bool), "The value to set `is_scale_up` must be type of bool."
self._model.is_scale_up = value
@stride.setter
def stride(self, value):
assert isinstance(
value, int), "The value to set `stride` must be type of int."
self._model.stride = value
@downsample_strides.setter
def downsample_strides(self, value):
assert isinstance(
value,
list), "The value to set `downsample_strides` must be type of list."
self._model.downsample_strides = value
@landmarks_per_face.setter
def landmarks_per_face(self, value):
assert isinstance(
value,
int), "The value to set `landmarks_per_face` must be type of int."
self._model.landmarks_per_face = value
@use_kps.setter
def use_kps(self, value):
assert isinstance(
value, bool), "The value to set `use_kps` must be type of bool."
self._model.use_kps = value
@max_nms.setter
def max_nms(self, value):
assert isinstance(
value, int), "The value to set `max_nms` must be type of int."
self._model.max_nms = value
@num_anchors.setter
def num_anchors(self, value):
assert isinstance(
value, int), "The value to set `num_anchors` must be type of int."
self._model.num_anchors = value
|
3,419 |
assert equivalent
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import abc
import numpy as np
import pandas as pd
import nevergrad.common.typing as tp
from nevergrad.common import testing
class Selector(pd.DataFrame): # type: ignore
"""Pandas dataframe class with a simplified selection function"""
@property
def _constructor_expanddim(self) -> tp.Type["Selector"]:
return Selector
@property
def _constructor(self) -> tp.Type["Selector"]:
return Selector
# pylint: disable=arguments-differ
def select(self, **kwargs: tp.Union[str, tp.Sequence[str], tp.Callable[[tp.Any], bool]]) -> "Selector":
"""Select rows based on a value, a sequence of values or a discriminating function
Parameters
----------
kwargs: str, list or callable
selects values in the column provided as keyword, based on str matching, or
presence in the list, or callable returning non-False on the values
Example
-------
df.select(column1=["a", "b"])
will return a new Selector with rows having either "a" or "b" as value in column1
"""
df = self
for name, criterion in kwargs.items():
if isinstance(criterion, abc.Iterable) and not isinstance(criterion, str):
selected = df.loc[:, name].isin(criterion)
elif callable(criterion):
selected = [bool(criterion(x)) for x in df.loc[:, name]]
else:
selected = df.loc[:, name].isin([criterion])
df = df.loc[selected, :]
return Selector(df)
def select_and_drop(
self, **kwargs: tp.Union[str, tp.Sequence[str], tp.Callable[[tp.Any], bool]]
) -> "Selector":
"""Same as select, but drops the columns used for selection"""
df = self.select(**kwargs)
columns = [x for x in df.columns if x not in kwargs]
return Selector(df.loc[:, columns])
def unique(
self, column_s: tp.Union[str, tp.Sequence[str]]
) -> tp.Union[tp.Tuple[tp.Any, ...], tp.Set[tp.Tuple[tp.Any, ...]]]:
"""Returns the set of unique values or set of values for a column or columns
Parameter
---------
column_s: str or tp.Sequence[str]
a column name, or list of column names
Returns
-------
set
a set of values if the input was a column name, or a set of tuple of values
if the name was a list of columns
"""
if isinstance(column_s, str):
return set(self.loc[:, column_s]) # equivalent to df.<name>.unique()
elif isinstance(column_s, (list, tuple)):
testing.assert_set_equal(set(column_s) - set(self.columns), {}, err_msg="Unknown column(s)")
df = self.loc[:, column_s]
assert not df.isnull().values.any(), "Cannot work with NaN values"
return set(tuple(row) for row in df.itertuples(index=False))
else:
raise NotImplementedError("Only strings, lists and tuples are allowed")
@classmethod
def read_csv(cls, path: tp.PathLike) -> "Selector":
return cls(pd.read_csv(str(path)))
def METHOD_NAME(self, other: pd.DataFrame, err_msg: str = "") -> None:
"""Asserts that two selectors are equal, up to row and column permutations
Note
----
Use sparsely, since it is quite slow to test
"""
testing.assert_set_equal(other.columns, self.columns, f"Different columns\n{err_msg}")
np.testing.assert_equal(len(other), len(self), "Different number of rows\n{err_msg}")
other_df = other.loc[:, self.columns]
df_rows: tp.List[tp.List[tp.Tuple[tp.Any, ...]]] = [[], []]
for k, df in enumerate([self, other_df]):
for row in df.itertuples(index=False):
df_rows[k].append(tuple(row))
df_rows[k].sort()
for row1, row2 in zip(*df_rows):
np.testing.assert_array_equal(row1, row2, err_msg=err_msg)
|
3,420 |
test multiple region three sets of positions
|
import json
import unittest
from io import StringIO
from micall.utils.projects_dump import check_key_positions
class CheckKeyPositionsTest(unittest.TestCase):
def setUp(self):
self.warningIO = StringIO()
def testSingleRegion(self):
projects = json.loads("""\
{
"R1": {
"max_variants": 5,
"regions": [
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": null,
"start_pos": 42
}
],
"seed_region_names": [
"R1-seed"
]
}
]
}
}
""")
expected_warnings = ""
check_key_positions(projects, self.warningIO)
self.assertMultiLineEqual(expected_warnings, self.warningIO.getvalue())
def testMultipleRegionSingleSetOfPositions(self):
projects = json.loads("""\
{
"R1": {
"max_variants": 5,
"regions": [
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": null,
"start_pos": 42
}
],
"seed_region_names": [
"R1a-seed"
]
},
{
"coordinate_region": "R1",
"key_positions": [],
"seed_region_names": [
"R1b-seed"
]
}
]
}
}
""")
expected_warnings = ""
check_key_positions(projects, self.warningIO)
self.assertMultiLineEqual(expected_warnings, self.warningIO.getvalue())
def testMultipleRegionTwoSetsOfPositions(self):
projects = json.loads("""\
{
"R1": {
"max_variants": 5,
"regions": [
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": null,
"start_pos": 42
}
],
"seed_region_names": [
"R1a-seed"
]
},
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": 99,
"start_pos": 1
}
],
"seed_region_names": [
"R1b-seed"
]
}
]
}
}
""")
expected_warnings = (
"WARNING: project R1 has multiple sets of key positions for " +
"coordinate region R1.\n")
check_key_positions(projects, self.warningIO)
self.assertMultiLineEqual(expected_warnings, self.warningIO.getvalue())
def METHOD_NAME(self):
projects = json.loads("""\
{
"R1": {
"max_variants": 5,
"regions": [
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": null,
"start_pos": 42
}
],
"seed_region_names": [
"R1a-seed"
]
},
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": 99,
"start_pos": 1
}
],
"seed_region_names": [
"R1b-seed"
]
},
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": 14,
"start_pos": 3
}
],
"seed_region_names": [
"R1c-seed"
]
}
]
}
}
""")
expected_warnings = (
"WARNING: project R1 has multiple sets of key positions for " +
"coordinate region R1.\n")
check_key_positions(projects, self.warningIO)
self.assertMultiLineEqual(expected_warnings, self.warningIO.getvalue())
def testDuplicateSeedAndCoordinate(self):
projects = json.loads("""\
{
"R1": {
"max_variants": 5,
"regions": [
{
"coordinate_region": "R1",
"key_positions": [
{
"end_pos": null,
"start_pos": 42
}
],
"seed_region_names": [
"R1-seed"
]
},
{
"coordinate_region": "R1",
"key_positions": [],
"seed_region_names": [
"R1-seed"
]
}
]
}
}
""")
expected_warnings = (
"WARNING: project R1 has duplicate seed and coordinate: R1-seed, R1\n")
check_key_positions(projects, self.warningIO)
self.assertMultiLineEqual(expected_warnings, self.warningIO.getvalue())
|
3,421 |
on reset
|
from __future__ import annotations
import random
from typing import Optional
from src import users, config
from src.cats import role_order, Win_Stealer
from src.containers import UserDict
from src.events import Event, event_listener
from src.functions import get_all_players, change_role
from src.messages import messages
from src.gamestate import GameState
from src.users import User
__all__ = ["get_blacklist", "get_stats_flag"]
ROLES: UserDict[users.User, str] = UserDict()
STATS_FLAG = False # if True, we begin accounting for amnesiac in update_stats
def get_blacklist(var: GameState):
return var.current_mode.SECONDARY_ROLES.keys() | Win_Stealer | {"villager", "cultist", "amnesiac"}
def get_stats_flag(var):
return STATS_FLAG
@event_listener("transition_night_begin")
def on_transition_night_begin(evt: Event, var: GameState):
global STATS_FLAG
if var.night_count == config.Main.get("gameplay.safes.amnesiac_night"):
amnesiacs = get_all_players(var, ("amnesiac",))
if amnesiacs and not config.Main.get("gameplay.hidden.amnesiac"):
STATS_FLAG = True
for amn in amnesiacs:
change_role(var, amn, "amnesiac", ROLES[amn], message="amnesia_clear")
@event_listener("spy")
def on_investigate(evt: Event, var: GameState, actor: User, target: User, spy_role: str):
if evt.data["role"] == "amnesiac" and spy_role in ("augur", "detective", "investigator", "sorcerer"):
evt.data["role"] = ROLES[target]
@event_listener("new_role", priority=1) # Exchange, clone, etc. - assign the amnesiac's final role
def update_amnesiac(evt: Event, var: GameState, player: User, old_role: Optional[str]):
# FIXME: exchange totem messes with gameplay.hidden.amnesiac (the new amnesiac is no longer hidden should they die)
if evt.params.inherit_from is not None and evt.data["role"] == "amnesiac" and old_role != "amnesiac":
evt.data["role"] = ROLES[evt.params.inherit_from]
@event_listener("new_role")
def on_new_role(evt: Event, var: GameState, player: User, old_role: Optional[str]):
if evt.params.inherit_from is None and evt.data["role"] == "amnesiac":
roles = set(role_order()) - get_blacklist(var)
ROLES[player] = random.choice(list(roles))
@event_listener("role_revealed")
def on_revealing_totem(evt: Event, var: GameState, user: User, role: str):
if role not in get_blacklist(var) and not config.Main.get("gameplay.hidden.amnesiac") and var.original_roles["amnesiac"]:
global STATS_FLAG
STATS_FLAG = True
if role == "amnesiac":
user.send(messages["amnesia_clear"].format(ROLES[user]))
change_role(var, user, "amnesiac", ROLES[user])
@event_listener("get_reveal_role")
def on_reveal_role(evt: Event, var: GameState, user: User):
if config.Main.get("gameplay.hidden.amnesiac") and var.original_main_roles[user] == "amnesiac":
evt.data["role"] = "amnesiac"
@event_listener("get_endgame_message")
def on_get_endgame_message(evt: Event, var: GameState, player: User, role: str, is_main_role: bool):
if role == "amnesiac":
evt.data["message"].append(messages["amnesiac_endgame"].format(ROLES[player]))
@event_listener("revealroles_role")
def on_revealroles_role(evt: Event, var: GameState, user: User, role: str):
if role == "amnesiac":
evt.data["special_case"].append(messages["amnesiac_revealroles"].format(ROLES[user]))
@event_listener("update_stats")
def on_update_stats(evt: Event, var: GameState, player: User, mainrole: str, revealrole: str, allroles: set[str]):
if STATS_FLAG and not get_blacklist(var) & {mainrole, revealrole}:
evt.data["possible"].add("amnesiac")
@event_listener("reset")
def METHOD_NAME(evt: Event, var: GameState):
global STATS_FLAG
ROLES.clear()
STATS_FLAG = False
@event_listener("get_role_metadata")
def on_get_role_metadata(evt: Event, var: Optional[GameState], kind: str):
if kind == "role_categories":
evt.data["amnesiac"] = {"Hidden", "Team Switcher"}
|
3,422 |
remove
|
import sys
import time
from multiprocessing import Process, SimpleQueue, set_start_method
from avocado.core.nrunner.app import BaseRunnerApp
from avocado.core.nrunner.runner import RUNNER_RUN_STATUS_INTERVAL, BaseRunner
from avocado.core.utils import messages
from avocado.utils.software_manager.main import MESSAGES
from avocado.utils.software_manager.manager import SoftwareManager
class PackageRunner(BaseRunner):
"""Runner for dependencies of type package
This runner handles, the installation, verification and removal of
packages using the avocado-software-manager.
Runnable attributes usage:
* kind: 'package'
* uri: not used
* args: not used
* kwargs:
- name: the package name (required)
- action: one of 'install', 'check', or 'remove' (optional, defaults
to 'install')
"""
name = "package"
description = "Runner for dependencies of type package"
@staticmethod
def _check(software_manager, package):
if software_manager.check_installed(package):
result = "pass"
stdout = MESSAGES["check-installed"]["success"] % package
stderr = ""
else:
result = "error"
stdout = ""
stderr = MESSAGES["check-installed"]["fail"] % package
return result, stdout, stderr
@staticmethod
def _install(software_manager, cmd, package):
result = "pass"
stderr = ""
if not software_manager.check_installed(package):
if software_manager.install(package):
stdout = MESSAGES[cmd]["success"] % package
else:
# check if the error is a false negative because of package
# installation collision
if software_manager.check_installed(package):
stdout = MESSAGES[cmd]["success"] % package
else:
result = "error"
stdout = ""
stderr = MESSAGES[cmd]["fail"] % package
else:
stdout = MESSAGES["check-installed"]["success"] % package
return result, stdout, stderr
@staticmethod
def METHOD_NAME(software_manager, cmd, package):
result = "pass"
stderr = ""
if software_manager.check_installed(package):
if software_manager.remove(package):
stdout = MESSAGES[cmd]["success"] % package
else:
# check if the error is a false negative because of package
# installation collision
if not software_manager.check_installed(package):
stdout = MESSAGES[cmd]["success"] % package
else:
result = "error"
stdout = ""
stderr = MESSAGES[cmd]["fail"] % package
else:
stdout = MESSAGES["check-installed"]["fail"] % package
return result, stdout, stderr
def _run_software_manager(self, cmd, package, queue):
software_manager = SoftwareManager()
if not software_manager.is_capable():
output = {
"result": "error",
"stdout": "",
"stderr": ("Package manager not supported or not available."),
}
queue.put(output)
return
if cmd == "install":
result, stdout, stderr = self._install(software_manager, cmd, package)
elif cmd == "remove":
result, stdout, stderr = self.METHOD_NAME(software_manager, cmd, package)
elif cmd == "check":
result, stdout, stderr = self._check(software_manager, package)
output = {"result": result, "stdout": stdout, "stderr": stderr}
queue.put(output)
def run(self, runnable):
# pylint: disable=W0201
self.runnable = runnable
yield messages.StartedMessage.get()
# check if there is a valid 'action' argument
cmd = self.runnable.kwargs.get("action", "install")
# avoid invalid arguments
if cmd not in ["install", "check", "remove"]:
stderr = (
f"Invalid action {cmd}. Use one of 'install', 'check' " f"or 'remove'"
)
yield messages.StderrMessage.get(stderr.encode())
yield messages.FinishedMessage.get("error")
return
package = self.runnable.kwargs.get("name")
# if package was passed correctly, run avocado-software-manager
if package is not None:
# let's spawn it to another process to be able to update the
# status messages and avoid the software-manager to lock this
# process
queue = SimpleQueue()
process = Process(
target=self._run_software_manager, args=(cmd, package, queue)
)
process.start()
while queue.empty():
time.sleep(RUNNER_RUN_STATUS_INTERVAL)
yield messages.RunningMessage.get()
output = queue.get()
result = output["result"]
stdout = output["stdout"]
stderr = output["stderr"]
else:
# Otherwise, log the missing package name
result = "error"
stdout = ""
stderr = (
'Package name should be passed as kwargs using name="package_name".'
)
yield messages.StdoutMessage.get(stdout.encode())
yield messages.StderrMessage.get(stderr.encode())
yield messages.FinishedMessage.get(result)
class RunnerApp(BaseRunnerApp):
PROG_NAME = "avocado-runner-package"
PROG_DESCRIPTION = "nrunner application for dependencies of type package"
RUNNABLE_KINDS_CAPABLE = ["package"]
def main():
if sys.platform == "darwin":
set_start_method("fork")
app = RunnerApp(print)
app.run()
if __name__ == "__main__":
main()
|
3,423 |
agent
|
from __future__ import annotations
import traceback
from copy import deepcopy
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from .. import __rootdir__
from ..data import agent_list,ocr_error
from . import segment
from .image import saveimg
from .log import logger
from .recognize import RecognizeError
from ..ocr import ocrhandle
def poly_center(poly):
return (np.average([x[0] for x in poly]), np.average([x[1] for x in poly]))
def in_poly(poly, p):
return poly[0, 0] <= p[0] <= poly[2, 0] and poly[0, 1] <= p[1] <= poly[2, 1]
char_map = {}
agent_sorted = sorted(deepcopy(agent_list), key=len)
origin = origin_kp = origin_des = None
FLANN_INDEX_KDTREE = 0
GOOD_DISTANCE_LIMIT = 0.7
SIFT = cv2.SIFT_create()
def agent_sift_init():
global origin, origin_kp, origin_des
if origin is None:
logger.debug('agent_sift_init')
height = width = 2000
lnum = 25
cell = height // lnum
img = np.zeros((height, width, 3), dtype=np.uint8)
img = Image.fromarray(img)
font = ImageFont.truetype(
f'{__rootdir__}/fonts/SourceHanSansSC-Bold.otf', size=30, encoding='utf-8'
)
chars = sorted(list(set(''.join([x for x in agent_list]))))
assert len(chars) <= (lnum - 2) * (lnum - 2)
for idx, char in enumerate(chars):
x, y = idx % (lnum - 2) + 1, idx // (lnum - 2) + 1
char_map[(x, y)] = char
ImageDraw.Draw(img).text(
(x * cell, y * cell), char, (255, 255, 255), font=font
)
origin = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
origin_kp, origin_des = SIFT.detectAndCompute(origin, None)
def sift_recog(query, resolution, draw=False,bigfont = False):
"""
使用 SIFT 提取特征点识别干员名称
"""
agent_sift_init()
# 大号字体修改参数
if bigfont:
SIFT = cv2.SIFT_create(
contrastThreshold=0.1,
edgeThreshold=20
)
else:
SIFT = cv2.SIFT_create()
query = cv2.cvtColor(np.array(query), cv2.COLOR_RGB2GRAY)
# the height & width of query image
height, width = query.shape
multi = 2 * (resolution / 1080)
query = cv2.resize(query, (int(width * multi), int(height * multi)))
query_kp, query_des = SIFT.detectAndCompute(query, None)
# build FlannBasedMatcher
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(query_des, origin_des, k=2)
# store all the good matches as per Lowe's ratio test
good = []
for x, y in matches:
if x.distance < GOOD_DISTANCE_LIMIT * y.distance:
good.append(x)
if draw:
result = cv2.drawMatches(query, query_kp, origin, origin_kp, good, None)
plt.imshow(result, 'gray')
plt.show()
count = {}
for x in good:
x, y = origin_kp[x.trainIdx].pt
c = char_map[(int(x) // 80, int(y) // 80)]
count[c] = count.get(c, 0) + 1
best = None
best_score = 0
for x in agent_sorted:
score = 0
for c in set(x):
score += count.get(c, -1)
if score > best_score:
best = x
best_score = score
logger.debug(f'segment.sift_recog: {count}, {best}')
return best
def METHOD_NAME(img, draw=False):
"""
识别干员总览界面的干员名称
"""
try:
height, width, _ = img.shape
resolution = height
left, right = 0, width
# 异形屏适配
while np.max(img[:, right - 1]) < 100:
right -= 1
while np.max(img[:, left]) < 100:
left += 1
# 去除左侧干员详情
x0 = left + 1
while not (
img[height - 10, x0 - 1, 0] > img[height - 10, x0, 0] + 10
and abs(int(img[height - 10, x0, 0]) - int(img[height - 10, x0 + 1, 0])) < 5
):
x0 += 1
# 获取分割结果
ret, ocr = segment.METHOD_NAME(img, draw)
# 确定位置后开始精确识别
ret_succ = []
ret_fail = []
ret_agent = []
for poly in ret:
found_ocr, fx = None, 0
for x in ocr:
cx, cy = poly_center(x[2])
if in_poly(poly, (cx + x0, cy)) and cx > fx:
fx = cx
found_ocr = x
__img = img[poly[0, 1]: poly[2, 1], poly[0, 0]: poly[2, 0]]
try:
if found_ocr is not None:
x = found_ocr
if x[1] in agent_list and x[1] not in ['砾', '陈']: # ocr 经常会把这两个搞错
ret_agent.append(x[1])
ret_succ.append(poly)
continue
res = sift_recog(__img, resolution, draw)
if (res is not None) and res in agent_list:
ret_agent.append(res)
ret_succ.append(poly)
continue
logger.debug(
f'干员名称识别异常:{x[1]} 为不存在的数据,请报告至 https://github.com/Konano/arknights-mower/issues'
)
saveimg(__img, 'failure_agent')
raise Exception(x[1])
else:
if 80 <= np.min(__img):
continue
res = sift_recog(__img, resolution, draw)
if res is not None:
ret_agent.append(res)
ret_succ.append(poly)
continue
logger.warning(f'干员名称识别异常:区域 {poly.tolist()}')
saveimg(__img, 'failure_agent')
raise Exception("启动 Plan B")
ret_fail.append(poly)
raise Exception("启动 Plan B")
except Exception as e:
# 大哥不行了,二哥上!
_msg = str(e)
ret_fail.append(poly)
if "Plan B" not in _msg:
if _msg in ocr_error.keys():
name = ocr_error[_msg]
elif "Off" in _msg:
name = 'U-Official'
else:
continue
ret_agent.append(name)
ret_succ.append(poly)
continue
if len(ret_fail):
saveimg(img, 'failure')
if draw:
__img = img.copy()
cv2.polylines(__img, ret_fail, True, (255, 0, 0), 3, cv2.LINE_AA)
plt.imshow(__img)
plt.show()
logger.debug(f'character_recognize.agent: {ret_agent}')
logger.debug(f'character_recognize.agent: {[x.tolist() for x in ret]}')
return list(zip(ret_agent, ret_succ))
except Exception as e:
logger.debug(traceback.format_exc())
saveimg(img, 'failure_agent')
raise RecognizeError(e)
def agent_name(__img, height, draw: bool = False):
query = cv2.cvtColor(np.array(__img), cv2.COLOR_RGB2GRAY)
h, w= query.shape
dim = (w*4, h*4)
# resize image
resized = cv2.resize(__img, dim, interpolation=cv2.INTER_AREA)
ocr = ocrhandle.predict(resized)
name = ''
try:
if len(ocr) > 0 and ocr[0][1] in agent_list and ocr[0][1] not in ['砾', '陈']:
name = ocr[0][1]
elif len(ocr) > 0 and ocr[0][1] in ocr_error.keys():
name = ocr_error[ocr[0][1]]
else:
res = sift_recog(__img, height, draw,bigfont=True)
if (res is not None) and res in agent_list:
name = res
else:
raise Exception("识别错误")
except Exception as e:
saveimg(__img, 'failure_agent')
return name
|
3,424 |
print
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import cProfile
import functools
import os
import pstats
import timeit
from contextlib import contextmanager
from io import StringIO
from prettytable import ALL, PrettyTable
FILE = os.path.abspath(__file__)
PROF_DIR = os.path.join(os.path.dirname(FILE), "data")
if not os.path.exists(PROF_DIR):
os.makedirs(PROF_DIR)
class ProfilePrinter:
def __init__(self, column_widths=None, field_format=None, template="column"):
assert template in ("column", "row")
self._template = template
self._column_widths = column_widths
self._field_format = field_format
self._header = None
if template == "column":
self.table = PrettyTable(header=False, hrules=ALL)
else:
self.table = PrettyTable(header=False, hrules=ALL)
def _formatted_values(self, values):
if self._field_format is not None:
assert len(self._field_format) == len(values)
return [
f.format(val) if f else str(val)
for f, val in zip(self._field_format, values)
]
return values
def _add_using_row_format(self, values):
assert len(self._header) == len(values)
formatted_vals = self._formatted_values(values)
for i in range(len(self._header)):
self.table.add_row([self._header[i], formatted_vals[i]])
def _add_using_column_format(self, values):
formatted_vals = self._formatted_values(values)
self.table.add_row(formatted_vals)
def push(self, values):
if self._template == "column":
self._add_using_column_format(values)
else:
self._add_using_row_format(values)
def header(self, values):
self._header = values
if self._template == "column":
field_names = values
self.table.add_row(values)
else:
field_names = ["KEY", "VALUE"]
self.table.field_names = field_names
for i in range(len(field_names)):
self.table.align[field_names[i]] = "l"
if self._column_widths:
self.table.max_width[field_names[i]] = self._column_widths[i]
def METHOD_NAME(self):
METHOD_NAME(self.table)
@contextmanager
def profile_print(column_widths=None, field_format=None, template="column"):
out_buffer = ProfilePrinter(column_widths, field_format, template)
try:
yield out_buffer
finally:
out_buffer.METHOD_NAME()
def profile_timeit(fn_callable, repeat=1):
ret = fn_callable()
return ret, min(timeit.repeat(fn_callable, repeat=repeat, number=1))
def profile_cprofile(fn_callable, prof_file):
prof = cProfile.Profile()
ret = prof.runcall(fn_callable)
prof.dump_stats(prof_file)
prof_stats = StringIO()
p = pstats.Stats(prof_file, stream=prof_stats)
p.strip_dirs().sort_stats("cumulative").print_stats(0.5)
return ret, prof_stats.getvalue()
class Profile:
def __init__(self, tool, tool_cfg, fn_id):
self.tool = tool
self.tool_cfg = tool_cfg
self.fn_id = fn_id
def _set_decorator_params(self):
if callable(self.tool):
self.tool = self.tool()
if callable(self.tool_cfg):
self.tool_cfg = self.tool_cfg()
def __call__(self, fn):
def wrapped_fn(*args, **kwargs):
self._set_decorator_params()
fn_callable = functools.partial(fn, *args, **kwargs)
if self.tool == "timeit":
return profile_timeit(fn_callable, **self.tool_cfg)
elif self.tool == "cprofile":
prof_file = os.path.join(PROF_DIR, self.fn_id(*args, **kwargs))
return profile_cprofile(fn_callable, prof_file=prof_file)
else:
raise ValueError(
"Invalid profiling tool specified: {}.".format(self.tool)
)
return wrapped_fn
|
3,425 |
from json dict convenience
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Generic, List, Optional, Tuple, TypeVar
from chia.consensus.coinbase import farmer_parent_id, pool_parent_id
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.spend_bundle import SpendBundle
from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from chia.util.errors import Err
from chia.util.ints import uint8, uint32, uint64
from chia.util.streamable import Streamable, streamable
from chia.wallet.util.transaction_type import TransactionType
T = TypeVar("T")
minimum_send_attempts = 6
@dataclass
class ItemAndTransactionRecords(Generic[T]):
item: T
transaction_records: List["TransactionRecord"]
@streamable
@dataclass(frozen=True)
class TransactionRecord(Streamable):
"""
Used for storing transaction data and status in wallets.
"""
confirmed_at_height: uint32
created_at_time: uint64
to_puzzle_hash: bytes32
amount: uint64
fee_amount: uint64
confirmed: bool
sent: uint32
spend_bundle: Optional[SpendBundle]
additions: List[Coin]
removals: List[Coin]
wallet_id: uint32
# Represents the list of peers that we sent the transaction to, whether each one
# included it in the mempool, and what the error message (if any) was
sent_to: List[Tuple[str, uint8, Optional[str]]]
trade_id: Optional[bytes32]
type: uint32 # TransactionType
# name is also called bundle_id and tx_id
name: bytes32
memos: List[Tuple[bytes32, List[bytes]]]
def is_in_mempool(self) -> bool:
# If one of the nodes we sent it to responded with success, we set it to success
for _, mis, _ in self.sent_to:
if MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS:
return True
# Note, transactions pending inclusion (pending) return false
return False
def height_farmed(self, genesis_challenge: bytes32) -> Optional[uint32]:
if not self.confirmed:
return None
if self.type == TransactionType.FEE_REWARD or self.type == TransactionType.COINBASE_REWARD:
for block_index in range(self.confirmed_at_height, self.confirmed_at_height - 100, -1):
if block_index < 0:
return None
pool_parent = pool_parent_id(uint32(block_index), genesis_challenge)
farmer_parent = farmer_parent_id(uint32(block_index), genesis_challenge)
if pool_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if farmer_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
return None
def get_memos(self) -> Dict[bytes32, List[bytes]]:
return {coin_id: ms for coin_id, ms in self.memos}
@classmethod
def METHOD_NAME(cls, modified_tx_input: Dict):
modified_tx = modified_tx_input.copy()
if "to_address" in modified_tx:
modified_tx["to_puzzle_hash"] = decode_puzzle_hash(modified_tx["to_address"]).hex()
if "to_address" in modified_tx:
del modified_tx["to_address"]
# Converts memos from a flat dict into a nested list
memos_dict: Dict[str, List[str]] = {}
memos_list: List = []
if "memos" in modified_tx:
for coin_id, memo in modified_tx["memos"].items():
if coin_id not in memos_dict:
memos_dict[coin_id] = []
memos_dict[coin_id].append(memo)
for coin_id, memos in memos_dict.items():
memos_list.append((coin_id, memos))
modified_tx["memos"] = memos_list
return cls.from_json_dict(modified_tx)
def to_json_dict_convenience(self, config: Dict) -> Dict:
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
formatted = self.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(self.to_puzzle_hash, prefix)
formatted["memos"] = {
coin_id.hex(): memo.hex()
for coin_id, memos in self.get_memos().items()
for memo in memos
if memo is not None
}
return formatted
def is_valid(self) -> bool:
if len(self.sent_to) < minimum_send_attempts:
# we haven't tried enough peers yet
return True
if any(x[1] == MempoolInclusionStatus.SUCCESS for x in self.sent_to):
# we managed to push it to mempool at least once
return True
if any(x[2] in (Err.INVALID_FEE_LOW_FEE.name, Err.INVALID_FEE_TOO_CLOSE_TO_ZERO.name) for x in self.sent_to):
# we tried to push it to mempool and got a fee error so it's a temporary error
return True
return False
def hint_dict(self) -> Dict[bytes32, bytes32]:
return {coin_id: bytes32(memos[0]) for coin_id, memos in self.memos if len(memos) > 0 and len(memos[0]) == 32}
|
3,426 |
list share subscription source share synchronization settings
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListShareSubscriptionSourceShareSynchronizationSettingsResult',
'AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult',
'list_share_subscription_source_share_synchronization_settings',
'list_share_subscription_source_share_synchronization_settings_output',
]
@pulumi.output_type
class ListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
List response for get source share Synchronization settings
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The Url of next result page.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.ScheduledSourceSynchronizationSettingResponse']:
"""
Collection of items of type DataTransferObjects.
"""
return pulumi.get(self, "value")
class AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(ListShareSubscriptionSourceShareSynchronizationSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=self.next_link,
value=self.value)
def METHOD_NAME(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_subscription_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult:
"""
Get synchronization settings set on a share
Azure REST API version: 2021-08-01.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
:param str skip_token: Continuation token
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareSubscriptionName'] = share_subscription_name
__args__['skipToken'] = skip_token
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datashare:listShareSubscriptionSourceShareSynchronizationSettings', __args__, opts=opts, typ=ListShareSubscriptionSourceShareSynchronizationSettingsResult).value
return AwaitableListShareSubscriptionSourceShareSynchronizationSettingsResult(
next_link=pulumi.get(__ret__, 'next_link'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(METHOD_NAME)
def list_share_subscription_source_share_synchronization_settings_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListShareSubscriptionSourceShareSynchronizationSettingsResult]:
"""
Get synchronization settings set on a share
Azure REST API version: 2021-08-01.
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
:param str skip_token: Continuation token
"""
...
|
3,427 |
logs tags
|
import abc
import os
import time
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.contrib.trace_utils import int_service
from ddtrace.internal.dogstatsd import get_dogstatsd_client
from ddtrace.internal.hostname import get_hostname
from ddtrace.internal.log_writer import V2LogWriter
from ddtrace.sampler import RateSampler
if TYPE_CHECKING:
from ddtrace import Pin
from ddtrace import Span
class BaseLLMIntegration:
_integration_name = "baseLLM"
def __init__(self, config, stats_url, site, api_key):
# FIXME: this currently does not consider if the tracer is configured to
# use a different hostname. eg. tracer.configure(host="new-hostname")
# Ideally the metrics client should live on the tracer or some other core
# object that is strongly linked with configuration.
self._statsd = get_dogstatsd_client(stats_url, namespace=self._integration_name)
self._config = config
self._log_writer = V2LogWriter(
site=site,
api_key=api_key,
interval=float(os.getenv("_DD_%s_LOG_WRITER_INTERVAL" % self._integration_name.upper(), "1.0")),
timeout=float(os.getenv("_DD_%s_LOG_WRITER_TIMEOUT" % self._integration_name.upper(), "2.0")),
)
self._span_pc_sampler = RateSampler(sample_rate=config.span_prompt_completion_sample_rate)
self._log_pc_sampler = RateSampler(sample_rate=config.log_prompt_completion_sample_rate)
def is_pc_sampled_span(self, span):
# type: (Span) -> bool
if not span.sampled:
return False
return self._span_pc_sampler.sample(span)
def is_pc_sampled_log(self, span):
# type: (Span) -> bool
if not self._config.logs_enabled or not span.sampled:
return False
return self._log_pc_sampler.sample(span)
def start_log_writer(self):
# type: (...) -> None
self._log_writer.start()
@abc.abstractmethod
def _set_base_span_tags(self, span, **kwargs):
# type: (Span, Dict[str, Any]) -> None
"""Set default LLM span attributes when possible."""
pass
def trace(self, pin, operation_id, **kwargs):
# type: (Pin, str, Dict[str, Any]) -> Span
"""
Start a LLM request span.
Reuse the service of the application since we'll tag downstream request spans with the LLM name.
Eventually those should also be internal service spans once peer.service is implemented.
"""
span = pin.tracer.trace(
"%s.request" % self._integration_name, resource=operation_id, service=int_service(pin, self._config)
)
# Enable trace metrics for these spans so users can see per-service openai usage in APM.
span.set_tag(SPAN_MEASURED_KEY)
self._set_base_span_tags(span, **kwargs)
return span
@classmethod
@abc.abstractmethod
def METHOD_NAME(cls, span):
# type: (Span) -> str
"""Generate ddtags from the corresponding span."""
pass
def log(self, span, level, msg, attrs):
# type: (Span, str, str, Dict[str, Any]) -> None
if not self._config.logs_enabled:
return
tags = self.METHOD_NAME(span)
log = {
"timestamp": time.time() * 1000,
"message": msg,
"hostname": get_hostname(),
"ddsource": self._integration_name,
"service": span.service or "",
"status": level,
"ddtags": tags,
}
if span is not None:
log["dd.trace_id"] = str(span.trace_id)
log["dd.span_id"] = str(span.span_id)
log.update(attrs)
self._log_writer.enqueue(log)
@classmethod
@abc.abstractmethod
def _metrics_tags(cls, span):
# type: (Span) -> list
"""Generate a list of metrics tags from a given span."""
return []
def metric(self, span, kind, name, val, tags=None):
# type: (Span, str, str, Any, Optional[List[str]]) -> None
"""Set a metric using the context from the given span."""
if not self._config.metrics_enabled:
return
metric_tags = self._metrics_tags(span)
if tags:
metric_tags += tags
if kind == "dist":
self._statsd.distribution(name, val, tags=metric_tags)
elif kind == "incr":
self._statsd.increment(name, val, tags=metric_tags)
elif kind == "gauge":
self._statsd.gauge(name, val, tags=metric_tags)
else:
raise ValueError("Unexpected metric type %r" % kind)
def trunc(self, text):
# type: (str) -> str
"""Truncate the given text.
Use to avoid attaching too much data to spans.
"""
if not text:
return text
text = text.replace("\n", "\\n").replace("\t", "\\t")
if len(text) > self._config.span_char_limit:
text = text[: self._config.span_char_limit] + "..."
return text
|
3,428 |
write application
|
###############################################################################
#
# App - A class for writing the Excel XLSX App file.
#
# Copyright 2013-2017, John McNamara, [email protected]
#
# Package imports.
from . import xmlwriter
class App(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX App file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(App, self).__init__()
self.part_names = []
self.heading_pairs = []
self.properties = {}
def _add_part_name(self, part_name):
# Add the name of a workbook Part such as 'Sheet1' or 'Print_Titles'.
self.part_names.append(part_name)
def _add_heading_pair(self, heading_pair):
# Add the name of a workbook Heading Pair such as 'Worksheets',
# 'Charts' or 'Named Ranges'.
# Ignore empty pairs such as chartsheets.
if not heading_pair[1]:
return
self.heading_pairs.append(('lpstr', heading_pair[0]))
self.heading_pairs.append(('i4', heading_pair[1]))
def _set_properties(self, properties):
# Set the document properties.
self.properties = properties
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_properties()
self.METHOD_NAME()
self._write_doc_security()
self._write_scale_crop()
self._write_heading_pairs()
self._write_titles_of_parts()
self._write_manager()
self._write_company()
self._write_links_up_to_date()
self._write_shared_doc()
self._write_hyperlink_base()
self._write_hyperlinks_changed()
self._write_app_version()
self._xml_end_tag('Properties')
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_properties(self):
# Write the <Properties> element.
schema = 'http://schemas.openxmlformats.org/officeDocument/2006/'
xmlns = schema + 'extended-properties'
xmlns_vt = schema + 'docPropsVTypes'
attributes = [
('xmlns', xmlns),
('xmlns:vt', xmlns_vt),
]
self._xml_start_tag('Properties', attributes)
def METHOD_NAME(self):
# Write the <Application> element.
self._xml_data_element('Application', 'Microsoft Excel')
def _write_doc_security(self):
# Write the <DocSecurity> element.
self._xml_data_element('DocSecurity', '0')
def _write_scale_crop(self):
# Write the <ScaleCrop> element.
self._xml_data_element('ScaleCrop', 'false')
def _write_heading_pairs(self):
# Write the <HeadingPairs> element.
self._xml_start_tag('HeadingPairs')
self._write_vt_vector('variant', self.heading_pairs)
self._xml_end_tag('HeadingPairs')
def _write_titles_of_parts(self):
# Write the <TitlesOfParts> element.
parts_data = []
self._xml_start_tag('TitlesOfParts')
for part_name in self.part_names:
parts_data.append(('lpstr', part_name))
self._write_vt_vector('lpstr', parts_data)
self._xml_end_tag('TitlesOfParts')
def _write_vt_vector(self, base_type, vector_data):
# Write the <vt:vector> element.
attributes = [
('size', len(vector_data)),
('baseType', base_type),
]
self._xml_start_tag('vt:vector', attributes)
for vt_data in vector_data:
if base_type == 'variant':
self._xml_start_tag('vt:variant')
self._write_vt_data(vt_data)
if base_type == 'variant':
self._xml_end_tag('vt:variant')
self._xml_end_tag('vt:vector')
def _write_vt_data(self, vt_data):
# Write the <vt:*> elements such as <vt:lpstr> and <vt:if>.
self._xml_data_element("vt:%s" % vt_data[0], vt_data[1])
def _write_company(self):
company = self.properties.get('company', '')
self._xml_data_element('Company', company)
def _write_manager(self):
# Write the <Manager> element.
if 'manager' not in self.properties:
return
self._xml_data_element('Manager', self.properties['manager'])
def _write_links_up_to_date(self):
# Write the <LinksUpToDate> element.
self._xml_data_element('LinksUpToDate', 'false')
def _write_shared_doc(self):
# Write the <SharedDoc> element.
self._xml_data_element('SharedDoc', 'false')
def _write_hyperlink_base(self):
# Write the <HyperlinkBase> element.
hyperlink_base = self.properties.get('hyperlink_base')
if hyperlink_base is None:
return
self._xml_data_element('HyperlinkBase', hyperlink_base)
def _write_hyperlinks_changed(self):
# Write the <HyperlinksChanged> element.
self._xml_data_element('HyperlinksChanged', 'false')
def _write_app_version(self):
# Write the <AppVersion> element.
self._xml_data_element('AppVersion', '12.0000')
|
3,429 |
make pixel extractor
|
"""Helper methods for accessing single pixel from a rasterio file object.
"""
import rasterio
import rasterio.crs
import rasterio.warp
from typing import Iterable, List, Optional, Tuple, Union
RowCol = Tuple[int, int]
XY = Tuple[float, float]
LonLat = Tuple[float, float]
SomeCoord = Union[RowCol, XY, LonLat]
PixelValue = Union[float, int]
NOTSET = object()
def METHOD_NAME(
mode="pixel",
band=1,
src_nodata_fallback=None,
src_nodata_override=None,
dst_nodata=NOTSET,
):
"""Returns function that can extract single pixel from opened rasterio file.
Signature of the returned function is:
`src, coordinate_tuple, [band] -> pixel`
Where coordinate_tuple is interpreted according to `mode`
mode - How to interpret coordinate:
- pixel: (row, col)
- native: (x, y) in file native coordinate space
- lonlat: (lon, lat) (specifically EPSG:4326)
band - Default band to read, can be over-written on read
dst_nodata - when set use that instead of defaulting to src nodata value,
can be set to `None` to remap to `None`
src_nodata_fallback - nodata value to use if src file is missing nodata value
src_nodata_override - when set use that instead of what's in the file,
useful when nodata metadata is incorrect in the file
but correct value is available out of band.
"""
default_band = band
if dst_nodata is NOTSET:
def _dst_nodata(src_nodata):
return src_nodata
else:
def _dst_nodata(src_nodata):
return dst_nodata
def remap_pix(pix, src_nodata, dst_nodata):
# TODO: special case src_nodata is nan case
return dst_nodata if pix == src_nodata else pix
def extract_pixel(src, coord, band=default_band):
ri, ci = coord
src_nodata = _resolve_nodata(
src, band, fallback=src_nodata_fallback, override=src_nodata_override
)
dst_nodata = _dst_nodata(src_nodata)
if 0 <= ri < src.height and 0 <= ci < src.width:
window = ((ri, ri + 1), (ci, ci + 1))
pix = src.read(band, window=window)
# TODO: support band being a list of bands
return remap_pix(pix[0][0], src_nodata, dst_nodata)
else:
return dst_nodata
def extract_native(src, coord, band=default_band):
return extract_pixel(src, src.index(*coord), band=band)
def extract_lonlat(src, coord, band=default_band):
lon, lat = coord
x, y = rasterio.warp.transform(
rasterio.crs.CRS.from_epsg(4326), src.crs, [lon], [lat]
)
xy = (x[0], y[0])
return extract_native(src, xy, band=band)
extractors = dict(pixel=extract_pixel, native=extract_native, lonlat=extract_lonlat)
extractor = extractors.get(mode)
if extractor is None:
raise ValueError("Only support mode=<pixel|native|lonlat>")
return extractor
def _resolve_nodata(src, band, fallback=None, override=None):
"""Figure out what value to use for nodata given a band and fallback/override
settings
:param src: Rasterio file
"""
if override is not None:
return override
band0 = band if isinstance(band, int) else band[0]
nodata = src.nodatavals[band0 - 1]
if nodata is None:
return fallback
return nodata
def _mode_value(
pixel: Optional[RowCol] = None,
xy: Optional[XY] = None,
lonlat: Optional[LonLat] = None,
) -> Union[Tuple[str, SomeCoord], Tuple[None, None]]:
if pixel is not None:
return "pixel", pixel
if xy is not None:
return "native", xy
if lonlat is not None:
return "lonlat", lonlat
return (None, None)
def read_pixels(
urls: Iterable[str],
pixel: Optional[RowCol] = None,
xy: Optional[XY] = None,
lonlat: Optional[LonLat] = None,
band: int = 1,
**kwargs,
) -> List[PixelValue]:
"""Read a single pixel at the same location from a bunch of different files.
Location can be specified in 3 different ways:
pixel (row: int, column: int) -- in pixel coords
xy (X: float, Y: float) -- in Projected coordinates of the native CRS of the image
lonlat (lon: float, lat: float) -- in EPSG:4326
"""
mode, coord = _mode_value(pixel=pixel, xy=xy, lonlat=lonlat)
if mode is None:
raise ValueError("Have to supply one of: pixel, xy, or lonlat.")
extractor = METHOD_NAME(mode=mode, band=band, **kwargs)
def read_from_url(url):
url = rasterio.parse_path(url)
with rasterio.DatasetReader(url, sharing=False) as src:
return extractor(src, coord=coord)
return [read_from_url(url) for url in urls]
|
3,430 |
decimation
|
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
import sys
import math
from . import fft_python as fft
from . import fft_vfc, fft_vcc
from .fft_python import window
try:
from gnuradio import filter
except ImportError:
sys.stderr.write('fft.logpwrfft required gr-filter.\n')
sys.exit(1)
class _logpwrfft_base(gr.hier_block2):
"""
Create a log10(abs(fft)) stream chain, with real or complex input.
"""
def __init__(self, sample_rate, fft_size, ref_scale, frame_rate, avg_alpha, average, win=None, shift=False):
"""
Create an log10(abs(fft)) stream chain.
Provide access to the setting the filter and sample rate.
Args:
sample_rate: Incoming stream sample rate
fft_size: Number of FFT bins
ref_scale: Sets 0 dB value input amplitude
frame_rate: Output frame rate
avg_alpha: FFT averaging (over time) constant [0.0-1.0]
average: Whether to average [True, False]
win: the window taps generation function
shift: shift zero-frequency component to center of spectrum
"""
gr.hier_block2.__init__(self, self._name,
# Input signature
gr.io_signature(1, 1, self._item_size),
gr.io_signature(1, 1, gr.sizeof_float * fft_size)) # Output signature
self._sd = blocks.stream_to_vector_decimator(item_size=self._item_size,
sample_rate=sample_rate,
vec_rate=frame_rate,
vec_len=fft_size)
if win is None:
win = window.blackmanharris
fft_window = win(fft_size)
fft = self._fft_block[0](fft_size, True, fft_window, shift=shift)
window_power = sum([x * x for x in fft_window])
c2magsq = blocks.complex_to_mag_squared(fft_size)
self._avg = filter.single_pole_iir_filter_ff(1.0, fft_size)
self._log = blocks.nlog10_ff(10, fft_size,
# Adjust for number of bins
-20 * math.log10(fft_size) -
# Adjust for windowing loss
10 * math.log10(float(window_power) / fft_size) -
20 * math.log10(float(ref_scale) / 2)) # Adjust for reference scale
self.connect(self, self._sd, fft, c2magsq, self._avg, self._log, self)
self._average = average
self._avg_alpha = avg_alpha
self.set_avg_alpha(avg_alpha)
self.set_average(average)
def set_decimation(self, decim):
"""
Set the decimation on stream decimator.
Args:
decim: the new decimation
"""
self._sd.set_decimation(decim)
def set_vec_rate(self, vec_rate):
"""
Set the vector rate on stream decimator.
Args:
vec_rate: the new vector rate
"""
self._sd.set_vec_rate(vec_rate)
def set_sample_rate(self, sample_rate):
"""
Set the new sampling rate
Args:
sample_rate: the new rate
"""
self._sd.set_sample_rate(sample_rate)
def set_average(self, average):
"""
Set the averaging filter on/off.
Args:
average: true to set averaging on
"""
self._average = average
if self._average:
self._avg.set_taps(self._avg_alpha)
else:
self._avg.set_taps(1.0)
def set_avg_alpha(self, avg_alpha):
"""
Set the average alpha and set the taps if average was on.
Args:
avg_alpha: the new iir filter tap
"""
self._avg_alpha = avg_alpha
self.set_average(self._average)
def sample_rate(self):
"""
Return the current sample rate.
"""
return self._sd.sample_rate()
def METHOD_NAME(self):
"""
Return the current decimation.
"""
return self._sd.METHOD_NAME()
def frame_rate(self):
"""
Return the current frame rate.
"""
return self._sd.frame_rate()
def average(self):
"""
Return whether or not averaging is being performed.
"""
return self._average
def avg_alpha(self):
"""
Return averaging filter constant.
"""
return self._avg_alpha
class logpwrfft_f(_logpwrfft_base):
"""
Create an fft block chain, with real input.
"""
_name = "logpwrfft_f"
_item_size = gr.sizeof_float
_fft_block = (fft_vfc, )
class logpwrfft_c(_logpwrfft_base):
"""
Create an fft block chain, with complex input.
"""
_name = "logpwrfft_c"
_item_size = gr.sizeof_gr_complex
_fft_block = (fft_vcc, )
|
3,431 |
to str
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta3Subject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'V1beta3GroupSubject',
'kind': 'str',
'service_account': 'V1beta3ServiceAccountSubject',
'user': 'V1beta3UserSubject'
}
attribute_map = {
'group': 'group',
'kind': 'kind',
'service_account': 'serviceAccount',
'user': 'user'
}
def __init__(self, group=None, kind=None, service_account=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1beta3Subject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._kind = None
self._service_account = None
self._user = None
self.discriminator = None
if group is not None:
self.group = group
self.kind = kind
if service_account is not None:
self.service_account = service_account
if user is not None:
self.user = user
@property
def group(self):
"""Gets the group of this V1beta3Subject. # noqa: E501
:return: The group of this V1beta3Subject. # noqa: E501
:rtype: V1beta3GroupSubject
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1beta3Subject.
:param group: The group of this V1beta3Subject. # noqa: E501
:type: V1beta3GroupSubject
"""
self._group = group
@property
def kind(self):
"""Gets the kind of this V1beta3Subject. # noqa: E501
`kind` indicates which one of the other fields is non-empty. Required # noqa: E501
:return: The kind of this V1beta3Subject. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta3Subject.
`kind` indicates which one of the other fields is non-empty. Required # noqa: E501
:param kind: The kind of this V1beta3Subject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def service_account(self):
"""Gets the service_account of this V1beta3Subject. # noqa: E501
:return: The service_account of this V1beta3Subject. # noqa: E501
:rtype: V1beta3ServiceAccountSubject
"""
return self._service_account
@service_account.setter
def service_account(self, service_account):
"""Sets the service_account of this V1beta3Subject.
:param service_account: The service_account of this V1beta3Subject. # noqa: E501
:type: V1beta3ServiceAccountSubject
"""
self._service_account = service_account
@property
def user(self):
"""Gets the user of this V1beta3Subject. # noqa: E501
:return: The user of this V1beta3Subject. # noqa: E501
:rtype: V1beta3UserSubject
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1beta3Subject.
:param user: The user of this V1beta3Subject. # noqa: E501
:type: V1beta3UserSubject
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta3Subject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta3Subject):
return True
return self.to_dict() != other.to_dict()
|
3,432 |
test max request size per handler
|
"""Tests for the CherryPy configuration system."""
import os
import cherrypy
from cherrypy.test import helper
localDir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# Client-side code #
class ServerConfigTests(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return cherrypy.request.wsgi_environ['SERVER_PORT']
@cherrypy.expose
def upload(self, file):
return 'Size: %s' % len(file.file.read())
@cherrypy.expose
@cherrypy.config(**{'request.body.maxbytes': 100})
def tinyupload(self):
return cherrypy.request.body.read()
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': 9876,
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
'server.socket_timeout': 0.5,
# Test explicit server.instance
'server.2.instance': 'cherrypy._cpwsgi_server.CPWSGIServer',
'server.2.socket_port': 9877,
# Test non-numeric <servername>
# Also test default server.instance = builtin server
'server.yetanother.socket_port': 9878,
})
PORT = 9876
def testBasicConfig(self):
self.getPage('/')
self.assertBody(str(self.PORT))
def testAdditionalServers(self):
if self.scheme == 'https':
return self.skip('not available under ssl')
self.PORT = 9877
self.getPage('/')
self.assertBody(str(self.PORT))
self.PORT = 9878
self.getPage('/')
self.assertBody(str(self.PORT))
def METHOD_NAME(self):
if getattr(cherrypy.server, 'using_apache', False):
return self.skip('skipped due to known Apache differences... ')
self.getPage('/tinyupload', method='POST',
headers=[('Content-Type', 'text/plain'),
('Content-Length', '100')],
body='x' * 100)
self.assertStatus(200)
self.assertBody('x' * 100)
self.getPage('/tinyupload', method='POST',
headers=[('Content-Type', 'text/plain'),
('Content-Length', '101')],
body='x' * 101)
self.assertStatus(413)
def testMaxRequestSize(self):
if getattr(cherrypy.server, 'using_apache', False):
return self.skip('skipped due to known Apache differences... ')
for size in (500, 5000, 50000):
self.getPage('/', headers=[('From', 'x' * 500)])
self.assertStatus(413)
# Test for https://github.com/cherrypy/cherrypy/issues/421
# (Incorrect border condition in readline of SizeCheckWrapper).
# This hangs in rev 891 and earlier.
lines256 = 'x' * 248
self.getPage('/',
headers=[('Host', '%s:%s' % (self.HOST, self.PORT)),
('From', lines256)])
# Test upload
cd = (
'Content-Disposition: form-data; '
'name="file"; '
'filename="hello.txt"'
)
body = '\r\n'.join([
'--x',
cd,
'Content-Type: text/plain',
'',
'%s',
'--x--'])
partlen = 200 - len(body)
b = body % ('x' * partlen)
h = [('Content-type', 'multipart/form-data; boundary=x'),
('Content-Length', '%s' % len(b))]
self.getPage('/upload', h, 'POST', b)
self.assertBody('Size: %d' % partlen)
b = body % ('x' * 200)
h = [('Content-type', 'multipart/form-data; boundary=x'),
('Content-Length', '%s' % len(b))]
self.getPage('/upload', h, 'POST', b)
self.assertStatus(413)
|
3,433 |
update items
|
#!/usr/bin/env python
# --!-- coding: utf8 --!--
from PyQt5.QtCore import Qt, QModelIndex, QMimeData
from PyQt5.QtGui import QBrush, QColor
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem
from lxml import etree as ET
from manuskript import settings
from manuskript.enums import Plot, Outline, PlotStep
from manuskript.models import references as Ref
from manuskript.ui import style as S
class plotTreeView(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self._model = None
self._catRow = [-1, -1, -1]
self._filter = ""
self._lastID = -1
self._updating = False
self._showSubPlot = False
self.setRootIsDecorated(False)
self.setIndentation(10)
self.setColumnCount(1)
self._rootItem = QTreeWidgetItem()
self.insertTopLevelItem(0, self._rootItem)
# self.currentItemChanged.connect(self._currentItemChanged)
###############################################################################
# SETTERS
###############################################################################
def setShowSubPlot(self, v):
self._showSubPlot = v
self.METHOD_NAME()
def setPlotModel(self, model):
self._model = model
self._model.dataChanged.connect(self.updateMaybe)
self._model.rowsInserted.connect(self.updateMaybe2)
self._model.rowsRemoved.connect(self.updateMaybe2)
self.METHOD_NAME()
def setFilter(self, text):
self._filter = text
self.METHOD_NAME()
###############################################################################
# GETTERS
###############################################################################
def getItemByID(self, ID):
"Recursively search items to find one whose data is ``ID``."
def find(item, ID):
if item.data(0, Qt.UserRole) == ID:
return item
for i in range(item.childCount()):
r = find(item.child(i), ID)
if r:
return r
return find(self.invisibleRootItem(), ID)
def currentPlotIndex(self):
"Returns index of the current item in plot model."
return self._model.getIndexFromID(self.currentPlotID())
def currentPlotID(self):
"Returns ID of the current item in plot model."
ID = None
if self.currentItem():
ID = self.currentItem().data(0, Qt.UserRole)
return ID
###############################################################################
# UPDATES
###############################################################################
def updateMaybe(self, topLeft, bottomRight):
if topLeft.parent() != QModelIndex() and \
topLeft.column() <= PlotStep.name <= bottomRight.column() and \
self._showSubPlot:
# Name's of Step has been updated, we update Items if showing
# subplots.
self.METHOD_NAME()
elif topLeft.parent() != QModelIndex():
return
if topLeft.column() <= Plot.name <= bottomRight.column():
# Update name
self.updateNames()
elif topLeft.column() <= Plot.importance <= bottomRight.column():
# Importance changed
self.METHOD_NAME()
def updateMaybe2(self, parent, first, last):
"Rows inserted or removed"
if parent == QModelIndex():
self.METHOD_NAME()
elif self._showSubPlot:
self.METHOD_NAME()
def updateNames(self):
for i in range(self.topLevelItemCount()):
item = self.topLevelItem(i)
for c in range(item.childCount()):
sub = item.child(c)
ID = sub.data(0, Qt.UserRole)
if ID:
name = self._model.getPlotNameByID(ID)
sub.setText(0, name)
def METHOD_NAME(self):
if not self._model:
return
if self.currentItem():
self._lastID = self.currentItem().data(0, Qt.UserRole)
self._updating = True
self.clear()
plots = self._model.getPlotsByImportance()
h = [self.tr("Main"), self.tr("Secondary"), self.tr("Minor")]
for i in range(3):
cat = QTreeWidgetItem(self, [h[i]])
cat.setBackground(0, QBrush(QColor(S.highlightLight)))
cat.setForeground(0, QBrush(QColor(S.highlightedTextDark)))
cat.setTextAlignment(0, Qt.AlignCenter)
f = cat.font(0)
f.setBold(True)
cat.setFont(0, f)
cat.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.addTopLevelItem(cat)
# cat.setChildIndicatorPolicy(cat.DontShowIndicator)
for ID in plots[i]:
name = self._model.getPlotNameByID(ID)
if not self._filter.lower() in name.lower():
continue
item = QTreeWidgetItem(cat, [name])
item.setData(0, Qt.UserRole, ID)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
if self._showSubPlot:
f = item.font(0)
f.setBold(True)
item.setFont(0, f)
for subID, name, summary in self._model.getSubPlotsByID(ID):
sub = QTreeWidgetItem(item, [name])
# sub.setData(0, Qt.UserRole, "{}:{}".format(ID, subID))
sub.setData(0, Qt.UserRole, ID)
if ID == self._lastID:
self.setCurrentItem(item)
self.expandAll()
self._updating = False
###############################################################################
# DRAG N DROP
###############################################################################
def mimeTypes(self):
return ["application/xml"]
def mimeData(self, items):
mimeData = QMimeData()
encodedData = ""
root = ET.Element("outlineItems")
for item in items:
plotID = item.data(0, Qt.UserRole)
subplotRaw = item.parent().indexOfChild(item)
_id, name, summary = self._model.getSubPlotsByID(plotID)[subplotRaw]
sub = ET.Element("outlineItem")
sub.set(Outline.title.name, name)
sub.set(Outline.type.name, settings.defaultTextType)
sub.set(Outline.summaryFull.name, summary)
sub.set(Outline.notes.name, self.tr("**Plot:** {}").format(
Ref.plotReference(plotID)))
root.append(sub)
encodedData = ET.tostring(root)
mimeData.setData("application/xml", encodedData)
return mimeData
###############################################################################
# EVENTS
###############################################################################
def mouseDoubleClickEvent(self, event):
item = self.currentItem()
if not item:
return
# Catching double clicks to forbid collapsing of toplevel items
if item.parent():
QTreeWidget.mouseDoubleClickEvent(self, event)
|
3,434 |
flushheaders
|
"""Generic MIME writer.
This module defines the class MimeWriter. The MimeWriter class implements
a basic formatter for creating MIME multi-part files. It doesn't seek around
the output file nor does it use large amounts of buffer space. You must write
the parts out in the order that they should occur in the final file.
MimeWriter does buffer the headers you add, allowing you to rearrange their
order.
"""
import mimetools
__all__ = ["MimeWriter"]
import warnings
warnings.warn("the MimeWriter module is deprecated; use the email package instead",
DeprecationWarning, 2)
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
"""Add a header line to the MIME message.
The key is the name of the header, where the value obviously provides
the value of the header. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to append.
"""
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def METHOD_NAME(self):
"""Writes out and forgets all headers accumulated so far.
This is useful if you don't need a body part at all; for example,
for a subpart of type message/rfc822 that's (mis)used to store some
header-like information.
"""
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
The content-type is set to the provided ctype, and the optional
parameter, plist, provides additional parameters for the
content-type declaration. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to insert at the start.
"""
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.METHOD_NAME()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
Additionally, this method initializes the multi-part code, where the
subtype parameter provides the multipart subtype, the boundary
parameter may provide a user-defined boundary specification, and the
plist parameter provides optional parameters for the subtype. The
optional argument, prefix, determines where the header is inserted;
0 means append at the end, 1 means insert at the start. The default
is to insert at the start. Subparts should be created using the
nextpart() method.
"""
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
"""Returns a new instance of MimeWriter which represents an
individual part in a multipart message.
This may be used to write the part as well as used for creating
recursively complex multipart messages. The message must first be
initialized with the startmultipartbody() method before using the
nextpart() method.
"""
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
"""This is used to designate the last part of a multipart message.
It should always be used when writing multipart messages.
"""
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter
|
3,435 |
test is iterable
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import graph_editor as ge
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class UtilTest(test.TestCase):
def test_list_view(self):
"""Test for ge.util.ListView."""
l = [0, 1, 2]
lv = ge.util.ListView(l)
# Should not be the same id.
self.assertIsNot(l, lv)
# Should behave the same way than the original list.
self.assertTrue(len(lv) == 3 and lv[0] == 0 and lv[1] == 1 and lv[2] == 2)
# Should be read only.
with self.assertRaises(TypeError):
lv[0] = 0
def METHOD_NAME(self):
"""Test for ge.util.is_iterable."""
self.assertTrue(ge.util.is_iterable([0, 1, 2]))
self.assertFalse(ge.util.is_iterable(3))
def test_unique_graph(self):
"""Test for ge.util.check_graphs and ge.util.get_unique_graph."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
g1 = ops.Graph()
with g1.as_default():
a1 = constant_op.constant(1)
b1 = constant_op.constant(2)
# Same graph, should be fine.
self.assertIsNone(ge.util.check_graphs(a0, b0))
# Two different graphs, should assert.
with self.assertRaises(ValueError):
ge.util.check_graphs(a0, b0, a1, b1)
# a0 and b0 belongs to the same graph, should be fine.
self.assertEqual(ge.util.get_unique_graph([a0, b0]), g0)
# Different graph, should raise an error.
with self.assertRaises(ValueError):
ge.util.get_unique_graph([a0, b0, a1, b1])
def test_make_list_of_op(self):
"""Test for ge.util.make_list_of_op."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
# Should extract the ops from the graph.
self.assertEqual(len(ge.util.make_list_of_op(g0)), 2)
# Should extract the ops from the tuple.
self.assertEqual(len(ge.util.make_list_of_op((a0.op, b0.op))), 2)
def test_make_list_of_t(self):
"""Test for ge.util.make_list_of_t."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable
# Should extract the tensors from tre graph.
self.assertEqual(len(ge.util.make_list_of_t(g0)), 3)
# Should extract the tensors from the tuple
self.assertEqual(len(ge.util.make_list_of_t((a0, b0))), 2)
# Should extract the tensors and ignore the ops.
self.assertEqual(
len(ge.util.make_list_of_t(
(a0, a0.op, b0), ignore_ops=True)), 2)
def test_get_generating_consuming(self):
"""Test for ge.util.get_generating_ops and ge.util.get_generating_ops."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0)
self.assertEqual(len(ge.util.get_generating_ops([a0, b0])), 2)
self.assertEqual(len(ge.util.get_consuming_ops([a0, b0])), 1)
self.assertEqual(len(ge.util.get_generating_ops([c0])), 1)
self.assertEqual(ge.util.get_consuming_ops([c0]), [])
def test_control_outputs(self):
"""Test for the ge.util.ControlOutputs class."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
x0 = constant_op.constant(3)
with ops.control_dependencies([x0.op]):
c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable
control_outputs = ge.util.ControlOutputs(g0).get_all()
self.assertEqual(len(control_outputs), 1)
self.assertEqual(len(control_outputs[x0.op]), 1)
self.assertIs(list(control_outputs[x0.op])[0], c0.op)
def test_scope(self):
"""Test simple path scope functionalities."""
self.assertEqual(ge.util.scope_finalize("foo/bar"), "foo/bar/")
self.assertEqual(ge.util.scope_dirname("foo/bar/op"), "foo/bar/")
self.assertEqual(ge.util.scope_basename("foo/bar/op"), "op")
def test_placeholder(self):
"""Test placeholder functionalities."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1, name="foo")
# Test placeholder name.
self.assertEqual(ge.util.placeholder_name(a0), "geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None), "geph")
self.assertEqual(
ge.util.placeholder_name(
a0, scope="foo/"), "foo/geph__foo_0")
self.assertEqual(
ge.util.placeholder_name(
a0, scope="foo"), "foo/geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None, scope="foo/"), "foo/geph")
self.assertEqual(ge.util.placeholder_name(None, scope="foo"), "foo/geph")
# Test placeholder creation.
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1, dtype=dtypes.float32, name="a0")
c0 = math_ops.add(
ge.util.make_placeholder_from_tensor(a0),
ge.util.make_placeholder_from_dtype_and_shape(dtype=dtypes.float32))
self.assertEqual(c0.op.inputs[0].op.name, "geph__a0_0")
self.assertEqual(c0.op.inputs[1].op.name, "geph")
if __name__ == "__main__":
test.main()
|
3,436 |
download
|
import os
import os.path as osp
import networkx as nx
import numpy as np
import requests
import scipy.sparse as sp
from sklearn.model_selection import train_test_split
from spektral.data import Dataset, Graph
from spektral.datasets.utils import DATASET_FOLDER
from spektral.utils.io import load_binary
class Citation(Dataset):
"""
The citation datasets Cora, Citeseer and Pubmed.
Node attributes are bag-of-words vectors representing the most common words
in the text document associated to each node.
Two papers are connected if either one cites the other.
Labels represent the subject area of the paper.
The train, test, and validation splits are given as binary masks and are
accessible via the `mask_tr`, `mask_va`, and `mask_te` attributes.
**Arguments**
- `name`: name of the dataset to load (`'cora'`, `'citeseer'`, or
`'pubmed'`);
- `random_split`: if True, return a randomized split (20 nodes per class
for training, 30 nodes per class for validation and the remaining nodes for
testing, as recommended by [Shchur et al. (2018)](https://arxiv.org/abs/1811.05868)).
If False (default), return the "Planetoid" public splits defined by
[Yang et al. (2016)](https://arxiv.org/abs/1603.08861).
- `normalize_x`: if True, normalize the features.
- `dtype`: numpy dtype of graph data.
"""
url = "https://github.com/tkipf/gcn/raw/master/gcn/data/{}"
suffixes = ["x", "y", "tx", "ty", "allx", "ally", "graph", "test.index"]
def __init__(
self, name, random_split=False, normalize_x=False, dtype=np.float32, **kwargs
):
if hasattr(dtype, "as_numpy_dtype"):
# support tf.dtypes
dtype = dtype.as_numpy_dtype
if name.lower() not in self.available_datasets():
raise ValueError(
"Unknown dataset {}. See {}.available_datasets() for a complete list of"
"available datasets.".format(name, self.__class__.__name__)
)
self.name = name.lower()
self.random_split = random_split
self.normalize_x = normalize_x
self.mask_tr = self.mask_va = self.mask_te = None
self.dtype = dtype
super().__init__(**kwargs)
@property
def path(self):
return osp.join(DATASET_FOLDER, "Citation", self.name)
def read(self):
objects = [_read_file(self.path, self.name, s) for s in self.suffixes]
objects = [o.A if sp.issparse(o) else o for o in objects]
x, y, tx, ty, allx, ally, graph, idx_te = objects
# Public Planetoid splits. This is the default
idx_tr = np.arange(y.shape[0])
idx_va = np.arange(y.shape[0], y.shape[0] + 500)
idx_te = idx_te.astype(int)
idx_te_sort = np.sort(idx_te)
# Fix disconnected nodes in Citeseer
if self.name == "citeseer":
idx_te_len = idx_te.max() - idx_te.min() + 1
tx_ext = np.zeros((idx_te_len, x.shape[1]))
tx_ext[idx_te_sort - idx_te.min(), :] = tx
tx = tx_ext
ty_ext = np.zeros((idx_te_len, y.shape[1]))
ty_ext[idx_te_sort - idx_te.min(), :] = ty
ty = ty_ext
x = np.vstack((allx, tx))
y = np.vstack((ally, ty))
x[idx_te, :] = x[idx_te_sort, :]
y[idx_te, :] = y[idx_te_sort, :]
# Row-normalize the features
if self.normalize_x:
print("Pre-processing node features")
x = _preprocess_features(x)
if self.random_split:
# Throw away public splits and compute random ones like Shchur et al.
indices = np.arange(y.shape[0])
n_classes = y.shape[1]
idx_tr, idx_te, _, y_te = train_test_split(
indices, y, train_size=20 * n_classes, stratify=y
)
idx_va, idx_te = train_test_split(
idx_te, train_size=30 * n_classes, stratify=y_te
)
# Adjacency matrix
a = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) # CSR
a.setdiag(0)
a.eliminate_zeros()
# Train/valid/test masks
self.mask_tr = _idx_to_mask(idx_tr, y.shape[0])
self.mask_va = _idx_to_mask(idx_va, y.shape[0])
self.mask_te = _idx_to_mask(idx_te, y.shape[0])
return [
Graph(
x=x.astype(self.dtype),
a=a.astype(self.dtype),
y=y.astype(self.dtype),
)
]
def METHOD_NAME(self):
print("Downloading {} dataset.".format(self.name))
os.makedirs(self.path, exist_ok=True)
for n in self.suffixes:
f_name = "ind.{}.{}".format(self.name, n)
req = requests.get(self.url.format(f_name))
if req.status_code == 404:
raise ValueError(
"Cannot download dataset ({} returned 404).".format(
self.url.format(f_name)
)
)
with open(os.path.join(self.path, f_name), "wb") as out_file:
out_file.write(req.content)
@staticmethod
def available_datasets():
return ["cora", "citeseer", "pubmed"]
class Cora(Citation):
"""
Alias for `Citation('cora')`.
"""
def __init__(self, random_split=False, normalize_x=False, **kwargs):
super().__init__(
"cora", random_split=random_split, normalize_x=normalize_x, **kwargs
)
class Citeseer(Citation):
"""
Alias for `Citation('citeseer')`.
"""
def __init__(self, random_split=False, normalize_x=False, **kwargs):
super().__init__(
"citeseer", random_split=random_split, normalize_x=normalize_x, **kwargs
)
class Pubmed(Citation):
"""
Alias for `Citation('pubmed')`.
"""
def __init__(self, random_split=False, normalize_x=False, **kwargs):
super().__init__(
"pubmed", random_split=random_split, normalize_x=normalize_x, **kwargs
)
def _read_file(path, name, suffix):
full_fname = os.path.join(path, "ind.{}.{}".format(name, suffix))
if suffix == "test.index":
return np.loadtxt(full_fname)
return load_binary(full_fname)
def _idx_to_mask(idx, l):
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=bool)
def _preprocess_features(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
|
3,437 |
add item
|
import Screens.InfoBar
from enigma import eServiceReference, eTimer
from Screens.Screen import Screen
from Components.ServiceScan import ServiceScan as CScan
from Components.ProgressBar import ProgressBar
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.Sources.FrontendInfo import FrontendInfo
from Components.config import config
class FIFOList(MenuList):
def __init__(self, len=10):
self.len = len
self.list = []
MenuList.__init__(self, self.list)
def METHOD_NAME(self, item):
self.list.append(item)
self.l.setList(self.list[-self.len:])
def clear(self):
del self.list[:]
self.l.setList(self.list)
def getCurrentSelection(self):
return self.list and self.getCurrent() or None
def listAll(self):
self.l.setList(self.list)
self.selectionEnabled(True)
class ServiceScanSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget name="Title" position="6,4" size="120,42" font="Regular;16" transparent="1" />
<widget name="scan_progress" position="6,50" zPosition="1" borderWidth="1" size="56,12" backgroundColor="dark" />
<widget name="Service" position="6,22" size="120,26" font="Regular;12" transparent="1" />
</screen>"""
def __init__(self, session, parent, showStepSlider=True):
Screen.__init__(self, session, parent)
self["Title"] = Label(parent.title or _("Service scan"))
self["Service"] = Label(_("No service"))
self["scan_progress"] = ProgressBar()
def updateProgress(self, value):
self["scan_progress"].setValue(value)
def updateService(self, name):
self["Service"].setText(name)
class ServiceScan(Screen):
def up(self):
self["servicelist"].up()
selectedService = self["servicelist"].getCurrentSelection()
if selectedService:
self.session.summary.updateService(selectedService[0])
def down(self):
self["servicelist"].down()
selectedService = self["servicelist"].getCurrentSelection()
if selectedService:
self.session.summary.updateService(selectedService[0])
def ok(self):
if self["scan"].isDone():
try:
from Plugins.SystemPlugins.LCNScanner.plugin import LCNBuildHelper
lcn = LCNBuildHelper()
lcn.buildAfterScan()
except Exception as e:
print(e)
if self.currentInfobar.__class__.__name__ == "InfoBar":
selectedService = self["servicelist"].getCurrentSelection()
if selectedService and self.currentServiceList is not None:
self.currentServiceList.setTvMode()
bouquets = self.currentServiceList.getBouquetList()
last_scanned_bouquet = bouquets and next((x[1] for x in bouquets if x[0] == "Last Scanned"), None)
if last_scanned_bouquet:
self.currentServiceList.enterUserbouquet(last_scanned_bouquet)
self.currentServiceList.setCurrentSelection(eServiceReference(selectedService[1]))
service = self.currentServiceList.getCurrentSelection()
if not self.session.postScanService or service != self.session.postScanService:
self.session.postScanService = service
self.currentServiceList.addToHistory(service)
config.servicelist.lastmode.save()
self.currentServiceList.saveChannel(service)
self.doCloseRecursive()
self.cancel()
def cancel(self):
self.exit(False)
def doCloseRecursive(self):
self.exit(True)
def exit(self, returnValue):
if self.currentInfobar.__class__.__name__ == "InfoBar":
self.close(returnValue)
self.close()
def __init__(self, session, scanList):
Screen.__init__(self, session)
self["Title"] = Label(_("Scanning..."))
self.scanList = scanList
if hasattr(session, 'infobar'):
self.currentInfobar = Screens.InfoBar.InfoBar.instance
if self.currentInfobar:
self.currentServiceList = self.currentInfobar.servicelist
if self.session.pipshown and self.currentServiceList:
if self.currentServiceList.dopipzap:
self.currentServiceList.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
else:
self.currentInfobar = None
self.session.nav.stopService()
self["scan_progress"] = ProgressBar()
self["scan_state"] = Label(_("scan state"))
self["network"] = Label()
self["transponder"] = Label()
self["pass"] = Label("")
self["servicelist"] = FIFOList(len=10)
self["FrontendInfo"] = FrontendInfo()
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("OK"))
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"up": self.up,
"down": self.down,
"ok": self.ok,
"save": self.ok,
"cancel": self.cancel,
"menu": self.doCloseRecursive
}, -2)
self.setTitle(_("Service scan"))
self.onFirstExecBegin.append(self.doServiceScan)
self.scanTimer = eTimer()
self.scanTimer.callback.append(self.scanPoll)
def scanPoll(self):
if self["scan"].isDone():
self.scanTimer.stop()
self["servicelist"].moveToIndex(0)
selectedService = self["servicelist"].getCurrentSelection()
if selectedService:
self.session.summary.updateService(selectedService[0])
def doServiceScan(self):
self["servicelist"].len = self["servicelist"].instance.size().height() // self["servicelist"].l.getItemSize().height()
self["scan"] = CScan(self["scan_progress"], self["scan_state"], self["servicelist"], self["pass"], self.scanList, self["network"], self["transponder"], self["FrontendInfo"], self.session.summary)
self.scanTimer.start(250)
def createSummary(self):
return ServiceScanSummary
|
3,438 |
draw 4
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# You can run this .tac file directly with:
# twistd -ny demo_insults.tac
#
# Re-using a private key is dangerous, generate one.
#
# For this example you can use:
#
# $ ckeygen -t rsa -f ssh-keys/ssh_host_rsa_key
"""
Various simple terminal manipulations using the insults module.
This demo sets up two listening ports: one on 6022 which accepts ssh
connections; one on 6023 which accepts telnet connections. No login
for the telnet server is required; for the ssh server, \"username\" is
the username and \"password\" is the password.
The TerminalProtocol subclass defined here ignores most user input
(except to print it out to the server log) and spends the duration of
the connection drawing (the author's humble approximation of)
raindrops at random locations on the client's terminal. +, -, *, and
/ are respected and each adjusts an aspect of the timing of the
animation process.
"""
import random
import string
from twisted.application import internet, service
from twisted.conch.insults import insults
from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm
from twisted.conch.ssh import keys
from twisted.conch.telnet import TelnetBootstrapProtocol, TelnetTransport
from twisted.cred import checkers, portal
from twisted.internet import protocol, task
from twisted.python import log
class DrawingFinished(Exception):
"""Sentinel exception, raised when no \"frames\" for a particular
\"animation\" remain to be drawn.
"""
class Drawable:
"""Representation of an animation.
Constructed with a protocol instance and a coordinate on the
screen, waits for invocations of iterate() at which point it
erases the previous frame of the animation and draws the next one,
using its protocol instance and always placing the upper left hand
corner of the frame at the given coordinates.
Frames are defined with draw_ prefixed methods. Erasure is
performed by erase_ prefixed methods.
"""
n = 0
def __init__(self, proto, col, line):
self.proto = proto
self.col = col
self.line = line
def drawLines(self, s):
lines = s.splitlines()
c = self.col
line = self.line
for l in lines:
self.proto.cursorPosition(c - len(lines) / 2, line)
self.proto.write(l)
line += 1
def iterate(self):
getattr(self, "erase_" + str(self.n))()
self.n += 1
f = getattr(self, "draw_" + str(self.n), None)
if f is None:
raise DrawingFinished()
f()
def erase_0(self):
pass
class Splat(Drawable):
HEIGHT = 5
WIDTH = 11
def draw_1(self):
# . .
# . . .
# . .
self.drawLines(" . .\n. . .\n . .")
def erase_1(self):
self.drawLines(" \n \n ")
def draw_2(self):
# . . . .
# . o o o .
# . o o o o .
# . o o o .
# . . . .
self.drawLines(" . . . .\n . o o o .\n. o o o o .\n . o o o .\n . . . .")
def erase_2(self):
self.drawLines(" \n \n \n \n ")
def draw_3(self):
# o o o o
# o O O O o
# o O O O O o
# o O O O o
# o o o o
self.drawLines(" o o o o\n o O O O o\no O O O O o\n o O O O o\n o o o o")
erase_3 = erase_2
def METHOD_NAME(self):
# O O O O
# O . . . O
# O . . . . O
# O . . . O
# O O O O
self.drawLines(" O O O O\n O . . . O\nO . . . . O\n O . . . O\n O O O O")
erase_4 = erase_3
def draw_5(self):
# . . . .
# . .
# . .
# . .
# . . . .
self.drawLines(" . . . .\n . .\n. .\n . .\n . . . .")
erase_5 = erase_4
class Drop(Drawable):
WIDTH = 3
HEIGHT = 4
def draw_1(self):
# o
self.drawLines(" o")
def erase_1(self):
self.drawLines(" ")
def draw_2(self):
# _
# / \
# \./
self.drawLines(" _ \n/ \\\n\\./")
def erase_2(self):
self.drawLines(" \n \n ")
def draw_3(self):
# O
self.drawLines(" O")
def erase_3(self):
self.drawLines(" ")
class DemoProtocol(insults.TerminalProtocol):
"""Draws random things at random places on the screen."""
width = 80
height = 24
interval = 0.1
rate = 0.05
def connectionMade(self):
self.run()
def connectionLost(self, reason):
self._call.stop()
del self._call
def run(self):
# Clear the screen, matey
self.terminal.eraseDisplay()
self._call = task.LoopingCall(self._iterate)
self._call.start(self.interval)
def _iterate(self):
cls = random.choice((Splat, Drop))
# Move to a random location on the screen
col = random.randrange(self.width - cls.WIDTH) + cls.WIDTH
line = random.randrange(self.height - cls.HEIGHT) + cls.HEIGHT
s = cls(self.terminal, col, line)
c = task.LoopingCall(s.iterate)
c.start(self.rate).addErrback(lambda f: f.trap(DrawingFinished)).addErrback(
log.err
)
# ITerminalListener
def terminalSize(self, width, height):
self.width = width
self.height = height
def unhandledControlSequence(self, seq):
log.msg(f"Client sent something weird: {seq!r}")
def keystrokeReceived(self, keyID, modifier):
if keyID == "+":
self.interval /= 1.1
elif keyID == "-":
self.interval *= 1.1
elif keyID == "*":
self.rate /= 1.1
elif keyID == "/":
self.rate *= 1.1
else:
log.msg(f"Client sent: {keyID!r}")
return
self._call.stop()
self._call = task.LoopingCall(self._iterate)
self._call.start(self.interval)
def makeService(args):
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username=b"password")
f = protocol.ServerFactory()
f.protocol = lambda: TelnetTransport(
TelnetBootstrapProtocol,
insults.ServerProtocol,
args["protocolFactory"],
*args.get("protocolArgs", ()),
**args.get("protocolKwArgs", {}),
)
tsvc = internet.TCPServer(args["telnet"], f)
def chainProtocolFactory():
return insults.ServerProtocol(
args["protocolFactory"],
*args.get("protocolArgs", ()),
**args.get("protocolKwArgs", {}),
)
rlm = TerminalRealm()
rlm.chainedProtocolFactory = chainProtocolFactory
ptl = portal.Portal(rlm, [checker])
f = ConchFactory(ptl)
f.publicKeys[b"ssh-rsa"] = keys.Key.fromFile("ssh-keys/ssh_host_rsa_key.pub")
f.privateKeys[b"ssh-rsa"] = keys.Key.fromFile("ssh-keys/ssh_host_rsa_key")
csvc = internet.TCPServer(args["ssh"], f)
m = service.MultiService()
tsvc.setServiceParent(m)
csvc.setServiceParent(m)
return m
application = service.Application("Insults Demo App")
makeService(
{"protocolFactory": DemoProtocol, "telnet": 6023, "ssh": 6022}
).setServiceParent(application)
|
3,439 |
test custom packagers
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import FunctionType
import pytest
import mlrun
from mlrun import MLClientCtx
from mlrun.package import ContextHandler
from mlrun.runtimes import RunError
def test_init():
"""
During the context handler's initialization, it collects the default packagers found in the class variables
`_MLRUN_REQUIREMENTS_PACKAGERS`, `_EXTENDED_PACKAGERS` and `_MLRUN_FRAMEWORKS_PACKAGERS` so this test is making sure
there is no error raised during the init collection of packagers when new ones are being added.
"""
ContextHandler()
def _look_for_context_via_get_or_create(not_a_context=None):
assert not isinstance(not_a_context, MLClientCtx)
context_handler = ContextHandler()
context_handler.look_for_context(args=(), kwargs={})
return context_handler.is_context_available()
def _look_for_context_via_header(context: MLClientCtx):
context_handler = ContextHandler()
context_handler.look_for_context(args=(), kwargs={"context": context})
return context_handler.is_context_available()
@pytest.mark.parametrize(
"func",
[_look_for_context_via_get_or_create, _look_for_context_via_header],
)
@pytest.mark.parametrize("run_via_mlrun", [True, False])
def test_look_for_context(rundb_mock, func: FunctionType, run_via_mlrun: bool):
"""
Test the `look_for_context` method of the context handler. The method should find or create a context only when it
is being run through MLRun.
:param rundb_mock: A runDB mock fixture.
:param func: The function to run in the test.
:param run_via_mlrun: Boolean flag to expect to find a context (run via MLRun) as True and to not find a context
as False.
"""
if not run_via_mlrun:
assert not func(None)
return
run = mlrun.new_function().run(handler=func, returns=["result:result"])
assert run.status.results["result"]
def collect_custom_packagers():
return
@pytest.mark.parametrize(
"packager, expected_result",
[
("tests.package.test_packagers_manager.PackagerA", True),
("tests.package.packagers_testers.default_packager_tester.SomeClass", False),
],
)
@pytest.mark.parametrize("is_mandatory", [True, False])
def METHOD_NAME(
rundb_mock, packager: str, expected_result: bool, is_mandatory: bool
):
"""
Test the custom packagers collection from a project during the `look_for_context` method.
:param rundb_mock: A runDB mock fixture.
:param packager: The custom packager to collect.
:param expected_result: Whether the packager collection should succeed.
:param is_mandatory: If the packager is mandatory for the run or not. Mandatory packagers will always raise
exception if they couldn't be collected.
"""
project = mlrun.get_or_create_project(name="default")
project.add_custom_packager(
packager=packager,
is_mandatory=is_mandatory,
)
project.save_to_db()
mlrun_function = project.set_function(
func=__file__, name="test_custom_packagers", image="mlrun/mlrun"
)
if expected_result or not is_mandatory:
mlrun_function.run(handler="collect_custom_packagers", local=True)
return
try:
mlrun_function.run(handler="collect_custom_packagers", local=True)
assert False
except RunError:
pass
@pytest.mark.parametrize(
"host, is_logging_worker", [("test-worker-0", True), ("test-worker-1", False)]
)
def test_is_logging_worker(host: str, is_logging_worker: bool):
"""
Test the `_is_logging_worker` method of the context handler.
:param host: The pod's name where the worker's rank is expected to be.
:param is_logging_worker: The expected result.
"""
context_handler = ContextHandler()
context_handler._context = MLClientCtx()
context_handler._context.set_label("kind", "mpijob")
context_handler._context.set_label("host", host)
assert context_handler._is_logging_worker() is is_logging_worker
|
3,440 |
test bbox json yes
|
"""Tests for `$ fio cat`."""
from click.testing import CliRunner
from fiona.fio.main import main_group
def test_one(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(main_group, ['cat', path_coutwildrnp_shp])
assert result.exit_code == 0
assert result.output.count('"Feature"') == 67
def test_two(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(main_group, ['cat', path_coutwildrnp_shp, path_coutwildrnp_shp])
assert result.exit_code == 0
assert result.output.count('"Feature"') == 134
def test_bbox_no(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--bbox', '0,10,80,20'],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output == ""
def test_bbox_yes(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--bbox', '-109,37,-107,39'],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 19
def test_bbox_yes_two_files(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, path_coutwildrnp_shp, '--bbox', '-109,37,-107,39'],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 38
def METHOD_NAME(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--bbox', '[-109,37,-107,39]'],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 19
def test_bbox_where(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--bbox', '-120,40,-100,50',
'--where', "NAME LIKE 'Mount%'"],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 4
def test_where_no(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--where', "STATE LIKE '%foo%'"],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output == ""
def test_where_yes(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, '--where', "NAME LIKE 'Mount%'"],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 9
def test_where_yes_two_files(path_coutwildrnp_shp):
runner = CliRunner()
result = runner.invoke(
main_group,
['cat', path_coutwildrnp_shp, path_coutwildrnp_shp,
'--where', "NAME LIKE 'Mount%'"],
catch_exceptions=False)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 18
def test_where_fail(data_dir):
runner = CliRunner()
result = runner.invoke(main_group, ['cat', '--where', "NAME=3",
data_dir])
assert result.exit_code != 0
def test_multi_layer(data_dir):
layerdef = "1:coutwildrnp,1:coutwildrnp"
runner = CliRunner()
result = runner.invoke(
main_group, ['cat', '--layer', layerdef, data_dir])
assert result.output.count('"Feature"') == 134
def test_multi_layer_fail(data_dir):
runner = CliRunner()
result = runner.invoke(main_group, ['cat', '--layer', '200000:coutlildrnp',
data_dir])
assert result.exit_code != 0
def test_vfs(path_coutwildrnp_zip):
runner = CliRunner()
result = runner.invoke(main_group, [
'cat', f'zip://{path_coutwildrnp_zip}'])
assert result.exit_code == 0
assert result.output.count('"Feature"') == 67
def test_dst_crs_epsg3857(path_coutwildrnp_shp):
"""Confirm fix of issue #952"""
runner = CliRunner()
result = runner.invoke(
main_group, ["cat", "--dst-crs", "EPSG:3857", path_coutwildrnp_shp]
)
assert result.exit_code == 0
assert result.output.count('"Feature"') == 67
|
3,441 |
train
|
"""
[Graph Attention Networks]
(https://arxiv.org/abs/1710.10903)
"""
import dgl.sparse as dglsp
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.data import CoraGraphDataset
from torch.optim import Adam
class GATConv(nn.Module):
def __init__(self, in_size, out_size, num_heads, dropout):
super().__init__()
self.out_size = out_size
self.num_heads = num_heads
self.dropout = nn.Dropout(dropout)
self.W = nn.Linear(in_size, out_size * num_heads)
self.a_l = nn.Parameter(torch.zeros(1, out_size, num_heads))
self.a_r = nn.Parameter(torch.zeros(1, out_size, num_heads))
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
nn.init.xavier_normal_(self.W.weight, gain=gain)
nn.init.xavier_normal_(self.a_l, gain=gain)
nn.init.xavier_normal_(self.a_r, gain=gain)
###########################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to implement
# multihead attention.
###########################################################################
def forward(self, A_hat, Z):
Z = self.dropout(Z)
Z = self.W(Z).view(Z.shape[0], self.out_size, self.num_heads)
# a^T [Wh_i || Wh_j] = a_l Wh_i + a_r Wh_j
e_l = (Z * self.a_l).sum(dim=1)
e_r = (Z * self.a_r).sum(dim=1)
e = e_l[A_hat.row] + e_r[A_hat.col]
a = F.leaky_relu(e)
A_atten = dglsp.val_like(A_hat, a).softmax()
a_drop = self.dropout(A_atten.val)
A_atten = dglsp.val_like(A_atten, a_drop)
return dglsp.bspmm(A_atten, Z)
class GAT(nn.Module):
def __init__(
self, in_size, out_size, hidden_size=8, num_heads=8, dropout=0.6
):
super().__init__()
self.in_conv = GATConv(
in_size, hidden_size, num_heads=num_heads, dropout=dropout
)
self.out_conv = GATConv(
hidden_size * num_heads, out_size, num_heads=1, dropout=dropout
)
def forward(self, A_hat, X):
# Flatten the head and feature dimension.
Z = F.elu(self.in_conv(A_hat, X)).flatten(1)
# Average over the head dimension.
Z = self.out_conv(A_hat, Z).mean(-1)
return Z
def evaluate(g, pred):
label = g.ndata["label"]
val_mask = g.ndata["val_mask"]
test_mask = g.ndata["test_mask"]
# Compute accuracy on validation/test set.
val_acc = (pred[val_mask] == label[val_mask]).float().mean()
test_acc = (pred[test_mask] == label[test_mask]).float().mean()
return val_acc, test_acc
def METHOD_NAME(model, g, A_hat, X):
label = g.ndata["label"]
train_mask = g.ndata["train_mask"]
optimizer = Adam(model.parameters(), lr=1e-2, weight_decay=5e-4)
for epoch in range(50):
# Forward.
model.METHOD_NAME()
logits = model(A_hat, X)
# Compute loss with nodes in training set.
loss = F.cross_entropy(logits[train_mask], label[train_mask])
# Backward.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Compute prediction.
model.eval()
logits = model(A_hat, X)
pred = logits.argmax(dim=1)
# Evaluate the prediction.
val_acc, test_acc = evaluate(g, pred)
print(
f"In epoch {epoch}, loss: {loss:.3f}, val acc: {val_acc:.3f}, test"
f" acc: {test_acc:.3f}"
)
if __name__ == "__main__":
# If CUDA is available, use GPU to accelerate the training, use CPU
# otherwise.
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load graph from the existing dataset.
dataset = CoraGraphDataset()
g = dataset[0].to(dev)
# Create the sparse adjacency matrix A.
indices = torch.stack(g.edges())
N = g.num_nodes()
A = dglsp.spmatrix(indices, shape=(N, N))
# Add self-loops.
I = dglsp.identity(A.shape, device=dev)
A_hat = A + I
# Create GAT model.
X = g.ndata["feat"]
in_size = X.shape[1]
out_size = dataset.num_classes
model = GAT(in_size, out_size).to(dev)
# Kick off training.
METHOD_NAME(model, g, A_hat, X)
|
3,442 |
build test graph
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.control_flow_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.platform import test
class ControlFlowUtilTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testIsSwitch(self):
switch_false, _ = control_flow_ops.switch(1, True)
switch = switch_false.op
self.assertTrue(control_flow_util.IsSwitch(switch))
ref_switch_false, _ = control_flow_ops.ref_switch(test_ops.ref_output(),
True)
ref_switch = ref_switch_false.op
self.assertTrue(control_flow_util.IsSwitch(ref_switch))
self.assertFalse(control_flow_util.IsSwitch(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopEnter(self):
enter = gen_control_flow_ops.enter(1, frame_name="name").op
self.assertTrue(control_flow_util.IsLoopEnter(enter))
self.assertFalse(control_flow_util.IsLoopConstantEnter(enter))
ref_enter = gen_control_flow_ops.ref_enter(test_ops.ref_output(),
frame_name="name").op
self.assertTrue(control_flow_util.IsLoopEnter(ref_enter))
self.assertFalse(control_flow_util.IsLoopConstantEnter(ref_enter))
const_enter = gen_control_flow_ops.enter(1, frame_name="name",
is_constant=True).op
self.assertTrue(control_flow_util.IsLoopEnter(const_enter))
self.assertTrue(control_flow_util.IsLoopConstantEnter(const_enter))
self.assertFalse(control_flow_util.IsLoopEnter(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopExit(self):
exit_op = control_flow_ops.exit(1).op
self.assertTrue(control_flow_util.IsLoopExit(exit_op))
ref_exit = control_flow_ops.exit(test_ops.ref_output()).op
self.assertTrue(control_flow_util.IsLoopExit(ref_exit))
self.assertFalse(control_flow_util.IsLoopExit(test_ops.int_output().op))
def METHOD_NAME(self):
g = ops.Graph()
with g.as_default():
def while_loop(x):
def b(x):
with ops.name_scope("NestedCond"):
return control_flow_ops.cond(
math_ops.less(x, 100), lambda: math_ops.add(x, 1),
lambda: math_ops.add(x, 2))
c = lambda x: math_ops.less(x, 10000)
with ops.name_scope("OuterWhile"):
return control_flow_ops.while_loop(c, b, [x])
x = array_ops.placeholder(dtypes.int32)
with ops.name_scope("OuterCond"):
control_flow_ops.cond(
math_ops.less(x, 1000), lambda: while_loop(x),
lambda: math_ops.add(x, 2))
return g
def testIsCondSwitch(self):
g = self.METHOD_NAME()
cond_switch = [
"OuterCond/cond/Switch",
"OuterCond/cond/OuterWhile/while/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Add/Switch",
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Add_1/Switch",
"OuterCond/cond/Add/Switch",
]
for n in g.get_operations():
if control_flow_util.IsSwitch(n):
self.assertTrue(
control_flow_util.IsCondSwitch(n) != control_flow_util.IsLoopSwitch(
n))
if n.name in cond_switch:
self.assertTrue(control_flow_util.IsSwitch(n))
self.assertTrue(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
self.assertFalse(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
else:
self.assertFalse(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
def testIsLoopSwitch(self):
g = self.METHOD_NAME()
loop_switch = ["OuterCond/cond/OuterWhile/while/Switch_1"]
for n in g.get_operations():
if control_flow_util.IsSwitch(n):
self.assertTrue(
control_flow_util.IsCondSwitch(n) != control_flow_util.IsLoopSwitch(
n))
if n.name in loop_switch:
self.assertTrue(control_flow_util.IsSwitch(n))
self.assertFalse(
control_flow_util.IsCondSwitch(n),
msg="Mismatch for {}".format(n.name))
self.assertTrue(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
else:
self.assertFalse(
control_flow_util.IsLoopSwitch(n),
msg="Mismatch for {}".format(n.name))
def testIsCondMerge(self):
g = self.METHOD_NAME()
cond_merges = [
"OuterCond/cond/OuterWhile/while/NestedCond/cond/Merge",
"OuterCond/cond/Merge"
]
for n in g.get_operations():
if n.name in cond_merges:
self.assertTrue(control_flow_util.IsMerge(n))
self.assertTrue(control_flow_util.IsCondMerge(n))
self.assertFalse(control_flow_util.IsLoopMerge(n))
else:
self.assertFalse(control_flow_util.IsCondMerge(n))
self.assertTrue(not control_flow_util.IsMerge(n) or
control_flow_util.IsLoopMerge(n))
def testIsLoopMerge(self):
g = self.METHOD_NAME()
loop_merges = [
"OuterCond/cond/OuterWhile/while/Merge",
]
for n in g.get_operations():
if n.name in loop_merges:
self.assertTrue(control_flow_util.IsMerge(n))
self.assertFalse(control_flow_util.IsCondMerge(n))
self.assertTrue(control_flow_util.IsLoopMerge(n))
else:
self.assertFalse(control_flow_util.IsLoopMerge(n))
self.assertTrue(not control_flow_util.IsMerge(n) or
control_flow_util.IsCondMerge(n))
if __name__ == "__main__":
test.main()
|
3,443 |
get termcolor opts
|
#!/usr/bin/env python
"""
Larch show() function
"""
import os
import sys
import types
import numpy
from larch import Group
TERMCOLOR_COLORS = ('grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
def get(sym=None, _larch=None):
"""get object from symbol table from symbol name:
>>> g = group(a = 1, b=2.3, z = 'a string')
>>> print(get('g.z'))
'a string'
"""
if sym is None:
sym = _larch.symtable
group = None
symtable = _larch.symtable
if symtable.isgroup(sym):
group = sym
elif isinstance(sym, types.ModuleType):
group = sym
elif isinstance(sym, str):
group = symtable._lookup(sym, create=False)
return group
def show_tree(group, indent=0, groups_shown=None, _larch=None):
"""show members of a Group, with a tree structure for sub-groups
> show_tree(group1)
"""
if groups_shown is None:
groups_shown = []
for item in dir(group):
if (item.startswith('__') and item.endswith('__')):
continue
obj = getattr(group, item)
dval = None
if _larch.symtable.isgroup(obj):
_larch.writer.write('%s %s: %s\n' % (indent*' ', item, obj))
if id(obj) in groups_shown:
_larch.writer.write('%s (shown above)\n' % (indent*' '))
else:
groups_shown.append(id(obj))
show_tree(obj, indent=indent+3, _larch=_larch, groups_shown=groups_shown)
else:
dval = repr(obj)
if isinstance(obj, numpy.ndarray):
if len(obj) > 10 or len(obj.shape)>1:
dval = "array<shape=%s, type=%s>" % (repr(obj.shape),
repr(obj.dtype))
_larch.writer.write('%s %s: %s\n' % (indent*' ', item, dval))
def show(sym=None, with_private=False, with_color=True, color=None,
color2=None, truncate=True, with_methods=True, _larch=None):
"""show group members:
Options
-------
with_private: show 'private' members ('__private__') if True
with_color: show alternating lines in color if True and color is available.
truncate: truncate representation of lengthy lists and tuples if True
with_methods: suppress display of methods if False
"""
if sym is None:
sym = _larch.symtable
group = None
symtable = _larch.symtable
display = symtable._sys.display
with_color = with_color and display.use_color
title = sym
if symtable.isgroup(sym):
group = sym
title = repr(sym)[1:-1]
elif isinstance(sym, types.ModuleType):
group = sym
title = sym.__name__
if group is None:
_larch.writer.write("%s\n" % repr(sym))
return
if title.startswith(symtable.top_group):
title = title[6:]
if group == symtable:
title = 'Group _main'
## set colors for output
colopts1 = display.colors['text']
colopts2 = display.colors['text2']
if with_color:
if color is not None:
colopts1['color'] = color
if color2 is not None:
colopts2['color'] = color2
_copts = {1: colopts1, 0: colopts2}
members = dir(group)
dmembers = []
nmethods = 0
for item in members:
if (item.startswith('__') and item.endswith('__') and
not with_private):
continue
obj = getattr(group, item)
if callable(obj):
nmethods +=1
if not with_methods:
continue
dmembers.append((item, obj))
write = _larch.writer.write
color_output = hasattr(_larch.writer, 'set_textstyle')
title_fmt = '== %s: %i methods, %i attributes ==\n'
write(title_fmt % (title, nmethods, len(dmembers)-nmethods))
count = 0
for item, obj in dmembers:
if (isinstance(obj, numpy.ndarray) and
(len(obj) > 10 or len(obj.shape)>1)):
dval = "array<shape=%s, type=%s>" % (repr(obj.shape),
repr(obj.dtype))
elif isinstance(obj, (list, tuple)) and truncate and len(obj) > 5:
dval = "[%s, %s, ... %s, %s]" % (repr(obj[0]), repr(obj[1]),
repr(obj[-2]), repr(obj[-1]))
else:
try:
dval = repr(obj)
except:
dval = obj
if color_output:
_larch.writer.set_textstyle({True:'text', False:'text2'}[(count%2)==1])
count += 1
write(' %s: %s\n' % (item, dval))
if color_output:
_larch.writer.set_textstyle('text')
_larch.writer.flush()
def METHOD_NAME(dtype, _larch=None):
""" get color options suitable for passing to
larch's writer.write() for color output
first argument should be string of
'text', 'text2', 'error', 'comment'"""
out = {'color': None}
display = _larch.symtable._sys.display
if display.use_color:
out = getattr(display.colors, dtype, out)
return out
_larch_builtins = dict(show=show, show_tree=show_tree, get=get,
METHOD_NAME= METHOD_NAME)
|
3,444 |
test multiple renders multiple unique slugs
|
import os
from unittest.mock import patch
from django.http import HttpRequest
from django.template import engines
from django.test import SimpleTestCase, TestCase, override_settings
from core.tests.templatetags.test_svg_icon import VALID_SVG
@override_settings(
STATICFILES_DIRS=[
os.path.join(os.path.dirname(__file__), "staticfiles"),
]
)
class SvgIconTests(TestCase):
def setUp(self):
self.jinja_engine = engines["wagtail-env"]
def test_jinja_tag(self):
template = self.jinja_engine.from_string('{{ svg_icon("test") }}')
self.assertEqual(template.render(), VALID_SVG)
@patch("core.templatetags.svg_icon.FALLBACK_ICON_NAME", "test")
def test_jinja_tag_fallback(self):
template = self.jinja_engine.from_string('{{ svg_icon("invalid") }}')
self.assertEqual(template.render(), VALID_SVG)
@patch("core.templatetags.svg_icon.FALLBACK_ICON_NAME", "missing")
def test_jinja_tag_fallback_not_found_error(self):
template = self.jinja_engine.from_string('{{ svg_icon("missing") }}')
with self.assertRaises(FileNotFoundError):
template.render()
@patch("core.templatetags.svg_icon.FALLBACK_ICON_NAME", "invalid")
def test_jinja_tag_fallback_invalid_error(self):
template = self.jinja_engine.from_string('{{ svg_icon("invalid") }}')
with self.assertRaises(ValueError):
template.render()
@override_settings(FLAGS={"MY_FLAG": [("boolean", True)]})
class FeatureFlagTests(TestCase):
def setUp(self):
self.jinja_engine = engines["wagtail-env"]
def test_flag_enabled_tag(self):
template = self.jinja_engine.from_string(
'{{ flag_enabled("MY_FLAG") }}'
)
self.assertEqual(template.render({"request": None}), "True")
def test_flag_disabled_tag(self):
template = self.jinja_engine.from_string(
'{{ flag_disabled("MY_FLAG") }}'
)
self.assertEqual(template.render({"request": None}), "False")
class SlugifyUniqueTests(SimpleTestCase):
def setUp(self):
self.engine = engines["wagtail-env"]
self.template = '{{ "Some text" | slugify_unique }}'
def render(self, template, context=None):
return self.engine.from_string(template).render(context=context)
def test_no_context(self):
self.assertEqual(self.render(self.template), "some-text")
def test_no_request_in_context(self):
self.assertEqual(self.render(self.template, {}), "some-text")
def test_render_with_request_in_context(self):
self.assertEqual(
self.render(self.template, {"request": HttpRequest()}), "some-text"
)
def test_render_uses_request_to_make_multiple_unique_slugs(self):
request = HttpRequest()
template = " and ".join([self.template, self.template])
self.assertEqual(
self.render(template, {"request": request}),
"some-text and some-text-1",
)
def test_render_without_request_repeats_slugs(self):
template = " and ".join([self.template, self.template])
self.assertEqual(self.render(template), "some-text and some-text")
def METHOD_NAME(self):
request = HttpRequest()
rendered = [
self.render(self.template, {"request": request}) for _ in range(5)
]
self.assertEqual(
rendered,
[
"some-text",
"some-text-1",
"some-text-2",
"some-text-3",
"some-text-4",
],
)
def test_different_requests_allow_repeats(self):
for _ in range(5):
self.assertEqual(
self.render(self.template, {"request": HttpRequest()}),
"some-text",
)
class LanguageTagTests(SimpleTestCase):
def setUp(self):
self.engine = engines["wagtail-env"]
def render(self, template):
return self.engine.from_string(template).render()
def test_english_translation(self):
self.assertEqual(
self.render(
"{% language 'en' %}{{ _( 'English' ) }}{% endlanguage %}"
),
"English",
)
def test_spanish_translation(self):
self.assertEqual(
self.render(
"{% language 'es' %}{{ _( 'English' ) }}{% endlanguage %}"
),
"Inglés",
)
|
3,445 |
on open shell
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.utils.display import Display
display = Display()
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"Command authorization failed"),
]
def METHOD_NAME(self):
try:
self._exec_cli_command(b"terminal length 0")
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure("unable to set terminal parameters")
try:
self._exec_cli_command(b"terminal width 512")
try:
self._exec_cli_command(b"terminal width 0")
except AnsibleConnectionFailure:
pass
except AnsibleConnectionFailure:
display.display(
"WARNING: Unable to set terminal width, command responses may be truncated"
)
def on_become(self, passwd=None):
if self._get_prompt().endswith(b"#"):
return
cmd = {u"command": u"enable"}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u"prompt"] = to_text(
r"[\r\n]?(?:.*)?[Pp]assword: ?$", errors="surrogate_or_strict"
)
cmd[u"answer"] = passwd
cmd[u"prompt_retry_check"] = True
try:
self._exec_cli_command(
to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
)
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b"#"):
raise AnsibleConnectionFailure(
"failed to elevate privilege to enable mode still at prompt [%s]"
% prompt
)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure(
"unable to elevate privilege to enable mode, at prompt [%s] with error: %s"
% (prompt, e.message)
)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b"(config" in prompt:
self._exec_cli_command(b"end")
self._exec_cli_command(b"disable")
elif prompt.endswith(b"#"):
self._exec_cli_command(b"disable")
|
3,446 |
all hamming
|
import sys
"""
reference: dict_trie
"""
if sys.version_info.major < 3:
from itertools import imap as map
def _add(root, word, count):
"""Add a word to a trie.
:arg dict root: Root of the trie.
:arg str word: A word.
:arg int count: Multiplicity of `word`.
"""
node = root
for char in word:
if char not in node:
node[char] = {}
node = node[char]
if '' not in node:
node[''] = 0
node[''] += count
def _find(root, word):
"""Find the node after following the path in a trie given by {word}.
:arg dict root: Root of the trie.
:arg str word: A word.
:returns dict: The node if found, {} otherwise.
"""
node = root
for char in word:
if char not in node:
return {}
node = node[char]
return node
def _remove(node, word, count):
"""Remove a word from a trie.
:arg dict node: Current node.
:arg str word: Word to be removed.
:arg int count: Multiplicity of `word`, force remove if this is -1.
:returns bool: True if the last occurrence of `word` is removed.
"""
if not word:
if '' in node:
node[''] -= count
if node[''] < 1 or count == -1:
node.pop('')
return True
return False
car, cdr = word[0], word[1:]
if car not in node:
return False
result = _remove(node[car], cdr, count)
if result:
if not node[car]:
node.pop(car)
return result
def _iterate(path, node, unique):
"""Convert a trie into a list.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg bool unique: Do not list multiplicities.
:returns iter: All words in a trie.
"""
if '' in node:
if not unique:
for _ in range(1, node['']):
yield path
yield path
for char in node:
if char:
for result in _iterate(path + char, node[char], unique):
yield result
def _fill(node, alphabet, length):
"""Make a full trie using the characters in {alphabet}.
:arg dict node: Current node.
:arg tuple alphabet: Used alphabet.
:arg int length: Length of the words to be generated.
:returns iter: Trie containing all words of length {length} over alphabet
{alphabet}.
"""
if not length:
node[''] = 1
return
for char in alphabet:
node[char] = {}
_fill(node[char], alphabet, length - 1)
def _hamming(path, node, word, distance, cigar):
"""Find all paths in a trie that are within a certain hamming distance of
{word}.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg str word: Query word.
:arg int distance: Amount of allowed errors.
:returns iter: All words in a trie that have Hamming distance of at most
{distance} to {word}.
"""
if distance < 0:
return
if not word:
if '' in node:
yield (path, distance, cigar)
return
car, cdr = word[0], word[1:]
for char in node:
if char:
if char == car:
penalty = 0
operation = '='
else:
penalty = 1
operation = 'X'
for result in _hamming(
path + char, node[char], cdr, distance - penalty,
cigar + operation):
yield result
def _levenshtein(path, node, word, distance, cigar):
"""Find all paths in a trie that are within a certain Levenshtein
distance of {word}.
:arg str path: Path taken so far to reach the current node.
:arg dict node: Current node.
:arg str word: Query word.
:arg int distance: Amount of allowed errors.
:returns iter: All words in a trie that have Hamming distance of at most
{distance} to {word}.
"""
if distance < 0:
return
if not word:
if '' in node:
yield (path, distance, cigar)
car, cdr = '', ''
else:
car, cdr = word[0], word[1:]
# Deletion.
for result in _levenshtein(path, node, cdr, distance - 1, cigar + 'D'):
yield result
for char in node:
if char:
# Substitution.
if car:
if char == car:
penalty = 0
operation = '='
else:
penalty = 1
operation = 'X'
for result in _levenshtein(
path + char, node[char], cdr, distance - penalty,
cigar + operation):
yield result
# Insertion.
for result in _levenshtein(
path + char, node[char], word, distance - 1, cigar + 'I'):
yield result
class Trie(object):
def __init__(self, words=None):
"""Initialise the class.
:arg list words: List of words.
"""
self.root = {}
if words:
for word in words:
self.add(word)
def __contains__(self, word):
return '' in _find(self.root, word)
def __iter__(self):
return _iterate('', self.root, True)
def list(self, unique=True):
return _iterate('', self.root, unique)
def add(self, word, count=1):
_add(self.root, word, count)
def get(self, word):
node = _find(self.root, word)
if '' in node:
return node['']
return None
def remove(self, word, count=1):
return _remove(self.root, word, count)
def has_prefix(self, word):
return _find(self.root, word) != {}
def fill(self, alphabet, length):
_fill(self.root, alphabet, length)
def all_hamming_(self, word, distance):
return map(
lambda x: (x[0], distance - x[1], x[2]),
_hamming('', self.root, word, distance, ''))
def METHOD_NAME(self, word, distance):
return map(
lambda x: x[0], _hamming('', self.root, word, distance, ''))
def hamming(self, word, distance):
try:
return next(self.METHOD_NAME(word, distance))
except StopIteration:
return None
def best_hamming(self, word, distance):
"""Find the best match with {word} in a trie.
:arg str word: Query word.
:arg int distance: Maximum allowed distance.
:returns str: Best match with {word}.
"""
if self.get(word):
return word
for i in range(1, distance + 1):
result = self.hamming(word, i)
if result is not None:
return result
return None
def all_levenshtein_(self, word, distance):
return map(
lambda x: (x[0], distance - x[1], x[2]),
_levenshtein('', self.root, word, distance, ''))
def all_levenshtein(self, word, distance):
return map(
lambda x: x[0], _levenshtein('', self.root, word, distance, ''))
def levenshtein(self, word, distance):
try:
return next(self.all_levenshtein(word, distance))
except StopIteration:
return None
def best_levenshtein(self, word, distance):
"""Find the best match with {word} in a trie.
:arg str word: Query word.
:arg int distance: Maximum allowed distance.
:returns str: Best match with {word}.
"""
if self.get(word):
return word
for i in range(1, distance + 1):
result = self.levenshtein(word, i)
if result is not None:
return result
return None
|
3,447 |
build arguments schema
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"connectedmachine private-link-resource show",
)
class Show(AAZCommand):
"""Get the private link resources that need to be created for an Azure Monitor PrivateLinkScope.
:example: Sample command for private-link-resource show
az connectedmachine private-link-resource show --group-name hybridcompute --resource-group myResourceGroup --scope-name myPrivateLinkScope
"""
_aaz_info = {
"version": "2022-12-27",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.hybridcompute/privatelinkscopes/{}/privatelinkresources/{}", "2022-12-27"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.group_name = AAZStrArg(
options=["-n", "--name", "--group-name"],
help="The name of the private link resource.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.scope_name = AAZStrArg(
options=["--scope-name"],
help="The name of the Azure Arc PrivateLinkScope resource.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkResourcesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class PrivateLinkResourcesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridCompute/privateLinkScopes/{scopeName}/privateLinkResources/{groupName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"groupName", self.ctx.args.group_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"scopeName", self.ctx.args.scope_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-12-27",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType()
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
flags={"read_only": True},
)
properties.required_members = AAZListType(
serialized_name="requiredMembers",
flags={"read_only": True},
)
properties.required_zone_names = AAZListType(
serialized_name="requiredZoneNames",
flags={"read_only": True},
)
required_members = cls._schema_on_200.properties.required_members
required_members.Element = AAZStrType()
required_zone_names = cls._schema_on_200.properties.required_zone_names
required_zone_names.Element = AAZStrType()
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
3,448 |
get funding rate history
|
"""
Base class for FuturesExchangeInterfaces.
Provides common functionality for Futures Exchanges.
Copyright (C) 2022 Matias Kotlik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import abc
from typing import Union, Optional, List
import blankly.utils.utils as utils
from blankly.enums import MarginType, HedgeMode, PositionMode, OrderType, Side, TimeInForce, ContractType
from blankly.exchanges.interfaces.abc_base_exchange_interface import ABCBaseExchangeInterface
from blankly.exchanges.orders.futures.futures_order import FuturesOrder
class FuturesExchangeInterface(ABCBaseExchangeInterface, abc.ABC):
exchange_name: str
def __init__(self,
exchange_name,
authenticated_api,
preferences_path=None):
self.exchange_name = exchange_name
self.calls = authenticated_api
self.user_preferences = utils.load_user_preferences(preferences_path)
self.exchange_properties = None
self.available_currencies = {}
if self.user_preferences['settings']['test_connectivity_on_auth']:
self.init_exchange()
@staticmethod
def to_blankly_symbol(symbol: str):
return symbol
@staticmethod
def to_exchange_symbol(symbol: str):
return symbol
def get_exchange_type(self) -> str:
"""Returns the exchange type (ex. 'binance', 'coinbase', 'alpaca')"""
return self.exchange_name
@abc.abstractmethod
def init_exchange(self):
"""Initializes the exchange"""
pass
@abc.abstractmethod
def get_products(self, symbol: str = None) -> dict:
"""Returns a list of all products traded on this exchange"""
pass
@abc.abstractmethod
def get_account(self, symbol: str = None) -> dict:
"""Returns account information, or information for only one `symbol` if one is given."""
pass
# TODO this metohd name might need to change to get_position ?
@abc.abstractmethod
def get_position(self, symbol: str = None) -> Optional[dict]:
"""Returns position information, or information for only one `symbol` if one is given"""
pass
@abc.abstractmethod
def market_order(self,
symbol: str,
side: Side,
size: float,
position: PositionMode = None,
reduce_only: bool = None) -> FuturesOrder:
"""Places a market order"""
pass
@abc.abstractmethod
def limit_order(self,
symbol: str,
side: Side,
price: float,
size: float,
position: PositionMode = None,
reduce_only: bool = None,
time_in_force: TimeInForce = None) -> FuturesOrder:
"""Places a limit order"""
pass
@abc.abstractmethod
def take_profit_order(self,
symbol: str,
side: Side,
price: float,
size: float,
position: PositionMode = None) -> FuturesOrder:
"""Place a take-profit order for a position"""
pass
@abc.abstractmethod
def stop_loss_order(self,
symbol: str,
side: Side,
price: float,
size: float,
position: PositionMode = None) -> FuturesOrder:
"""Place a stop-loss order for a position"""
pass
@abc.abstractmethod
def set_hedge_mode(self, hedge_mode: HedgeMode):
pass
@abc.abstractmethod
def get_hedge_mode(self):
pass
@abc.abstractmethod
def set_leverage(self, leverage: float, symbol: str = None):
pass
@abc.abstractmethod
def get_leverage(self, symbol: str = None) -> float:
pass
@abc.abstractmethod
def set_margin_type(self, symbol: str, type: MarginType):
pass
@abc.abstractmethod
def get_margin_type(self, symbol: str):
pass
@abc.abstractmethod
def cancel_order(self, symbol: str, order_id: int) -> FuturesOrder:
"""Cancels an order"""
pass
@abc.abstractmethod
def get_open_orders(self, symbol: str = None) -> List[FuturesOrder]:
"""Returns all currently open orders, filtered by `symbol` if one is provided."""
pass
@abc.abstractmethod
def get_order(self, symbol: str, order_id: int) -> FuturesOrder:
"""Returns information for the order corresponding to `order_id`"""
pass
@abc.abstractmethod
def get_price(self, symbol: str) -> float:
"""Returns the current price of an asset"""
pass
@property
def account(self) -> dict:
"""Account information"""
return self.get_account()
@property
def positions(self) -> dict:
"""Position information"""
return self.get_position()
@property
def orders(self) -> list:
"""All currently open orders"""
return self.get_open_orders()
@property
def cash(self) -> float:
"""The amount of cash in a portfolio. The currency for 'cash' is set in settings.json"""
using_setting = self.user_preferences['settings'][
self.exchange_name]['cash']
return self.get_account(using_setting)['available']
@abc.abstractmethod
def METHOD_NAME(self, symbol: str, epoch_start: int,
epoch_stop: int) -> list:
"""
Get the funding rate history between `epoch_start` and `epoch_end`.
Returns a list of {'rate': int, 'time': int}
"""
pass
@abc.abstractmethod
def get_funding_rate(self, symbol: str) -> float:
pass
@abc.abstractmethod
def get_maker_fee(self) -> float:
pass
@abc.abstractmethod
def get_taker_fee(self) -> float:
pass
@abc.abstractmethod
def get_funding_rate_resolution(self) -> int:
pass
@property
def should_auto_trunc(self):
return self.user_preferences['settings'].get('auto_truncate', False)
|
3,449 |
check platform type
|
import os
import shutil
import sys
import time
from typing import Any, NoReturn, Optional
from .setting import (
CompilerType,
LOG_DIR,
PROFILE_DIR,
TestList,
TestPlatform,
TestType,
)
def convert_time(seconds: float) -> str:
seconds = int(round(seconds))
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
def print_time(message: str, start_time: float, summary_time: bool = False) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
end_time = time.time()
print(message, convert_time(end_time - start_time), file=log_file)
if summary_time:
print("\n", file=log_file)
def print_log(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[LOG] {' '.join(args)}", file=log_file)
def print_error(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[ERROR] {' '.join(args)}", file=log_file)
def remove_file(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def remove_folder(path: str) -> None:
shutil.rmtree(path)
def create_folder(*paths: Any) -> None:
for path in paths:
os.makedirs(path, exist_ok=True)
# clean up all the files generated by coverage tool
def clean_up() -> None:
# remove profile folder
remove_folder(PROFILE_DIR)
sys.exit("Clean Up Successfully!")
def convert_to_relative_path(whole_path: str, base_path: str) -> str:
# ("profile/raw", "profile") -> "raw"
if base_path not in whole_path:
raise RuntimeError(base_path + " is not in " + whole_path)
return whole_path[len(base_path) + 1 :]
def replace_extension(filename: str, ext: str) -> str:
return filename[: filename.rfind(".")] + ext
# a file is related if it's in one of the test_list folder
def related_to_test_list(file_name: str, test_list: TestList) -> bool:
for test in test_list:
if test.name in file_name:
return True
return False
def get_raw_profiles_folder() -> str:
return os.environ.get("RAW_PROFILES_FOLDER", os.path.join(PROFILE_DIR, "raw"))
def detect_compiler_type(platform: TestPlatform) -> CompilerType:
if platform == TestPlatform.OSS:
from package.oss.utils import ( # type: ignore[assignment, import, misc]
detect_compiler_type,
)
cov_type = detect_compiler_type() # type: ignore[call-arg]
else:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
detect_compiler_type,
)
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
return cov_type # type: ignore[no-any-return]
def get_test_name_from_whole_path(path: str) -> str:
# code_coverage_tool/profile/merged/haha.merged -> haha
start = path.rfind("/")
end = path.rfind(".")
assert start >= 0 and end >= 0
return path[start + 1 : end]
def check_compiler_type(cov_type: Optional[CompilerType]) -> None:
if cov_type is not None and cov_type in [CompilerType.GCC, CompilerType.CLANG]:
return
raise Exception(
f"Can't parse compiler type: {cov_type}.",
" Please set environment variable COMPILER_TYPE as CLANG or GCC",
)
def METHOD_NAME(platform_type: TestPlatform) -> None:
if platform_type in [TestPlatform.OSS, TestPlatform.FBCODE]:
return
raise Exception(
f"Can't parse platform type: {platform_type}.",
" Please set environment variable COMPILER_TYPE as OSS or FBCODE",
)
def check_test_type(test_type: str, target: str) -> None:
if test_type in [TestType.CPP.value, TestType.PY.value]:
return
raise Exception(
f"Can't parse test type: {test_type}.",
f" Please check the type of buck target: {target}",
)
def raise_no_test_found_exception(
cpp_binary_folder: str, python_binary_folder: str
) -> NoReturn:
raise RuntimeError(
f"No cpp and python tests found in folder **{cpp_binary_folder} and **{python_binary_folder}**"
)
|
3,450 |
pytest runtest makereport
|
# Copyright (C) 2015-2022, Wazuh Inc.
# Created by Wazuh, Inc. <[email protected]>.
# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
import pytest
from py.xml import html
import re
from numpydoc.docscrape import FunctionDoc
def pytest_addoption(parser):
# Get command line options
parser.addoption(
"--artifacts_path",
action="store",
type=str,
help="Path where information of all cluster nodes can be found (logs, stats CSVs, etc)."
)
# Fixtures
@pytest.fixture()
def artifacts_path(pytestconfig):
return pytestconfig.getoption("artifacts_path")
# HTML report
class HTMLStyle(html):
class body(html.body):
style = html.Style(background_color='#F0F0EE')
class table(html.table):
style = html.Style(border='2px solid #005E8C', margin='16px 0px', color='#005E8C',
font_size='15px')
class colored_td(html.td):
style = html.Style(color='#005E8C', padding='5px', border='2px solid #005E8C', text_align='center',
white_space='pre-wrap', font_size='14px')
class td(html.td):
style = html.Style(padding='5px', border='2px solid #005E8C', text_align='left',
white_space='pre-wrap', font_size='14px')
class th(html.th):
style = html.Style(color='#0094ce', padding='5px', border='2px solid #005E8C', text_align='center',
font_weight='bold', font_size='15px')
class h1(html.h1):
style = html.Style(color='#0094ce')
class h2(html.h2):
style = html.Style(color='#0094ce')
def pytest_html_report_title(report):
report.title = 'Wazuh cluster reliability tests'
def pytest_html_results_table_header(cells):
cells.insert(2, html.th('Description'))
cells.pop()
def pytest_html_results_table_row(report, cells):
try:
cells.insert(2, html.td(report.description))
cells.pop()
except AttributeError:
pass
@pytest.hookimpl(hookwrapper=True)
def METHOD_NAME(item, call):
def atoi(text):
return int(text) if text.isdigit() else text
# Define HTML style
pytest_html = item.config.pluginmanager.getplugin('html')
pytest_html.html.body = HTMLStyle.body
pytest_html.html.table = HTMLStyle.table
pytest_html.html.th = HTMLStyle.th
pytest_html.html.td = HTMLStyle.td
pytest_html.html.h1 = HTMLStyle.h1
pytest_html.html.h2 = HTMLStyle.h2
pytest_html.html.h3 = HTMLStyle.h3
pytest_html.html.p = HTMLStyle.b
documentation = FunctionDoc(item.function)
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
report.description = '. '.join(documentation["Summary"])
if report.when == 'teardown':
# Attach error logs per each node in the 'test_cluster_error_logs' test.
if report.head_line == 'test_cluster_error_logs' and item.module.nodes_with_errors:
extra.append(pytest_html.extras.html("<h2>Error logs</h2>"))
# Keys are human/natural sorted.
for node, logs in sorted(item.module.nodes_with_errors.items(),
key=lambda d: [atoi(c) for c in re.split(r'(\d+)', d[0])]):
extra.append(pytest_html.extras.html(f'<p><b>{node}:</b>\n' + '\n'.join(
log_line.decode() for log_line in logs) + '</p>'))
extra.append(pytest_html.extras.html("</p><h2>Test output</h2>"))
# Attach wrong order logs per each node in the 'test_check_logs_order' tests (both master's and workers').
elif report.head_line == 'test_check_logs_order_workers' or report.head_line == 'test_check_logs_order_master' \
and item.module.incorrect_order:
extra.append(pytest_html.extras.html("<h2>Wrong worker logs order</h2>" if 'workers' in report.head_line
else "<h2>Wrong master logs order</h2>"))
# Keys are human/natural sorted.
for key in sorted(item.module.incorrect_order.keys(),
key=lambda d: [atoi(c) for c in re.split(r'(\d+)', d)]):
extra.append(pytest_html.extras.html(f"<p><b>{key}:</b>\n"))
for failed_task in item.module.incorrect_order[key]:
extra.append(pytest_html.extras.html('<b> - Log type:</b> {log_type}\n'
'<b> Expected logs:</b> {expected_logs}\n'
'<b> Found log:</b> {found_log}'.format(**failed_task)))
extra.append(pytest_html.extras.html("</p><h2>Test output</h2>"))
# Attach repeated Integrity synchronizations per each node in the 'test_cluster_sync' test.
elif report.head_line == 'test_cluster_sync' and item.module.repeated_syncs:
extra.append(pytest_html.extras.html("<h2>Repeated Integrity synchronizations</h2>"))
output = []
# Keys are human/natural sorted.
for worker, values in sorted(item.module.repeated_syncs.items(),
key=lambda d: [atoi(c) for c in re.split(r'(\d+)', d[0])]):
output.append('<b>{worker} - Log found {repeat_counter} times in a row:</b>\n'
'{log}'.format(**values, worker=worker))
extra.append(pytest_html.extras.html('<p>' + '\n\n'.join(output) + '</p>'))
extra.append(pytest_html.extras.html("</p><h2>Test output</h2>"))
# Attach nodes were some tasks were repeted or not completed in the requested order from the
# 'test_cluster_task_order' test.
elif report.head_line == 'test_cluster_task_order' and item.module.incorrect_order:
for key in item.module.incorrect_order:
extra.append(pytest_html.extras.html("<h2>Wrong task order.</h2>"))
extra.append(pytest_html.extras.html(f"<p><b>Concatenated tasks '{key}' and "
f"'{item.module.incorrect_order[key]['child_task']}'"
f" failed due to {item.module.incorrect_order[key]['status']}"
f" logs:\n\t{item.module.incorrect_order[key]['log']}</b>"))
extra.append(pytest_html.extras.html("</p><h2>Test output</h2>"))
report.extra = extra
|
3,451 |
get sampled df
|
"""
This module provides the `SparkDataFrameConverter` class,
which allows converting a `pyspark` `DataFrame`
into a list of dictionaries representing series.
"""
from types import ModuleType
from typing import List, Tuple
from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE
from ipyvizzu.data.converters.df.defaults import MAX_ROWS
from ipyvizzu.data.converters.df.converter import DataFrameConverter
from ipyvizzu.data.infer_type import InferType
from ipyvizzu.data.type_alias import (
DimensionValue,
MeasureValue,
SeriesValues,
)
class SparkDataFrameConverter(DataFrameConverter):
"""
Converts a `pyspark` `DataFrame` into a list of dictionaries representing series.
Each dictionary contains information about the series `name`, `values` and `type`.
Parameters:
df: The `pyspark` `DataFrame` to convert.
default_measure_value:
Default value to use for missing measure values. Defaults to 0.
default_dimension_value:
Default value to use for missing dimension values. Defaults to an empty string.
max_rows: The maximum number of rows to include in the converted series list.
If the `df` contains more rows,
a random sample of the given number of rows (approximately) will be taken.
Example:
Get series list from `DataFrame` columns:
converter = SparkDataFrameConverter(df)
series_list = converter.get_series_list()
"""
# pylint: disable=too-few-public-methods
def __init__(
self,
df: "pyspark.sql.DataFrame", # type: ignore
default_measure_value: MeasureValue = NAN_MEASURE,
default_dimension_value: DimensionValue = NAN_DIMENSION,
max_rows: int = MAX_ROWS,
) -> None:
super().__init__(default_measure_value, default_dimension_value, max_rows)
self._pyspark, self._pyspark_func = self._get_pyspark()
self._df = self.METHOD_NAME(df)
def _get_pyspark(self) -> Tuple[ModuleType, ModuleType]:
try:
import pyspark # pylint: disable=import-outside-toplevel
from pyspark.sql import functions # pylint: disable=import-outside-toplevel
return pyspark, functions
except ImportError as error:
raise ImportError(
"pyspark is not available. Please install pyspark to use this feature."
) from error
def METHOD_NAME(
self, df: "pyspark.sql.DataFrame" # type: ignore
) -> "pyspark.sql.DataFrame": # type: ignore
row_number = df.count()
if self._is_max_rows_exceeded(row_number):
fraction = self._max_rows / row_number
sample_df = df.sample(withReplacement=False, fraction=fraction, seed=42)
return sample_df.limit(self._max_rows)
return df
def _get_columns(self) -> List[str]:
return self._df.columns
def _convert_to_series_values_and_type(
self, obj: str
) -> Tuple[SeriesValues, InferType]:
column_name = obj
column = self._df.select(column_name)
integer_type = self._pyspark.sql.types.IntegerType
double_type = self._pyspark.sql.types.DoubleType
if isinstance(column.schema[column_name].dataType, (integer_type, double_type)):
return self._convert_to_measure_values(column_name), InferType.MEASURE
return self._convert_to_dimension_values(column_name), InferType.DIMENSION
def _convert_to_measure_values(self, obj: str) -> List[MeasureValue]:
column_name = obj
func = self._pyspark_func
df = self._df.withColumn(
column_name,
func.when(
func.col(column_name).isNull(), self._default_measure_value
).otherwise(func.col(column_name)),
)
df_rdd = (
df.withColumn(column_name, func.col(column_name).cast("float"))
.select(column_name)
.rdd
)
return df_rdd.flatMap(list).collect()
def _convert_to_dimension_values(self, obj: str) -> List[DimensionValue]:
column_name = obj
func = self._pyspark_func
df = self._df.withColumn(
column_name,
func.when(
func.col(column_name).isNull(), self._default_dimension_value
).otherwise(func.col(column_name)),
)
df_rdd = (
df.withColumn(column_name, func.col(column_name).cast("string"))
.select(column_name)
.rdd
)
return df_rdd.flatMap(list).collect()
|
3,452 |
parse default
|
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
from tests.spiders import MockServerSpider
class InjectArgumentsDownloaderMiddleware:
"""
Make sure downloader middlewares are able to update the keyword arguments
"""
def process_request(self, request, spider):
if request.callback.__name__ == "parse_downloader_mw":
request.cb_kwargs["from_process_request"] = True
return None
def process_response(self, request, response, spider):
if request.callback.__name__ == "parse_downloader_mw":
request.cb_kwargs["from_process_response"] = True
return response
class InjectArgumentsSpiderMiddleware:
"""
Make sure spider middlewares are able to update the keyword arguments
"""
def process_start_requests(self, start_requests, spider):
for request in start_requests:
if request.callback.__name__ == "parse_spider_mw":
request.cb_kwargs["from_process_start_requests"] = True
yield request
def process_spider_input(self, response, spider):
request = response.request
if request.callback.__name__ == "parse_spider_mw":
request.cb_kwargs["from_process_spider_input"] = True
return None
def process_spider_output(self, response, result, spider):
for element in result:
if (
isinstance(element, Request)
and element.callback.__name__ == "parse_spider_mw_2"
):
element.cb_kwargs["from_process_spider_output"] = True
yield element
class KeywordArgumentsSpider(MockServerSpider):
name = "kwargs"
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
InjectArgumentsDownloaderMiddleware: 750,
},
"SPIDER_MIDDLEWARES": {
InjectArgumentsSpiderMiddleware: 750,
},
}
checks = []
def start_requests(self):
data = {"key": "value", "number": 123, "callback": "some_callback"}
yield Request(self.mockserver.url("/first"), self.parse_first, cb_kwargs=data)
yield Request(
self.mockserver.url("/general_with"), self.parse_general, cb_kwargs=data
)
yield Request(self.mockserver.url("/general_without"), self.parse_general)
yield Request(self.mockserver.url("/no_kwargs"), self.parse_no_kwargs)
yield Request(
self.mockserver.url("/default"), self.METHOD_NAME, cb_kwargs=data
)
yield Request(
self.mockserver.url("/takes_less"), self.parse_takes_less, cb_kwargs=data
)
yield Request(
self.mockserver.url("/takes_more"), self.parse_takes_more, cb_kwargs=data
)
yield Request(self.mockserver.url("/downloader_mw"), self.parse_downloader_mw)
yield Request(self.mockserver.url("/spider_mw"), self.parse_spider_mw)
def parse_first(self, response, key, number):
self.checks.append(key == "value")
self.checks.append(number == 123)
self.crawler.stats.inc_value("boolean_checks", 2)
yield response.follow(
self.mockserver.url("/two"),
self.parse_second,
cb_kwargs={"new_key": "new_value"},
)
def parse_second(self, response, new_key):
self.checks.append(new_key == "new_value")
self.crawler.stats.inc_value("boolean_checks")
def parse_general(self, response, **kwargs):
if response.url.endswith("/general_with"):
self.checks.append(kwargs["key"] == "value")
self.checks.append(kwargs["number"] == 123)
self.checks.append(kwargs["callback"] == "some_callback")
self.crawler.stats.inc_value("boolean_checks", 3)
elif response.url.endswith("/general_without"):
self.checks.append(
kwargs == {} # pylint: disable=use-implicit-booleaness-not-comparison
)
self.crawler.stats.inc_value("boolean_checks")
def parse_no_kwargs(self, response):
self.checks.append(response.url.endswith("/no_kwargs"))
self.crawler.stats.inc_value("boolean_checks")
def METHOD_NAME(self, response, key, number=None, default=99):
self.checks.append(response.url.endswith("/default"))
self.checks.append(key == "value")
self.checks.append(number == 123)
self.checks.append(default == 99)
self.crawler.stats.inc_value("boolean_checks", 4)
def parse_takes_less(self, response, key, callback):
"""
Should raise
TypeError: parse_takes_less() got an unexpected keyword argument 'number'
"""
def parse_takes_more(self, response, key, number, callback, other):
"""
Should raise
TypeError: parse_takes_more() missing 1 required positional argument: 'other'
"""
def parse_downloader_mw(
self, response, from_process_request, from_process_response
):
self.checks.append(bool(from_process_request))
self.checks.append(bool(from_process_response))
self.crawler.stats.inc_value("boolean_checks", 2)
def parse_spider_mw(
self, response, from_process_spider_input, from_process_start_requests
):
self.checks.append(bool(from_process_spider_input))
self.checks.append(bool(from_process_start_requests))
self.crawler.stats.inc_value("boolean_checks", 2)
return Request(self.mockserver.url("/spider_mw_2"), self.parse_spider_mw_2)
def parse_spider_mw_2(self, response, from_process_spider_output):
self.checks.append(bool(from_process_spider_output))
self.crawler.stats.inc_value("boolean_checks", 1)
class CallbackKeywordArgumentsTestCase(TestCase):
maxDiff = None
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_callback_kwargs(self):
crawler = get_crawler(KeywordArgumentsSpider)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
self.assertTrue(all(crawler.spider.checks))
self.assertEqual(
len(crawler.spider.checks), crawler.stats.get_value("boolean_checks")
)
# check exceptions for argument mismatch
exceptions = {}
for line in log.records:
for key in ("takes_less", "takes_more"):
if key in line.getMessage():
exceptions[key] = line
self.assertEqual(exceptions["takes_less"].exc_info[0], TypeError)
self.assertTrue(
str(exceptions["takes_less"].exc_info[1]).endswith(
"parse_takes_less() got an unexpected keyword argument 'number'"
),
msg="Exception message: " + str(exceptions["takes_less"].exc_info[1]),
)
self.assertEqual(exceptions["takes_more"].exc_info[0], TypeError)
self.assertTrue(
str(exceptions["takes_more"].exc_info[1]).endswith(
"parse_takes_more() missing 1 required positional argument: 'other'"
),
msg="Exception message: " + str(exceptions["takes_more"].exc_info[1]),
)
|
3,453 |
calc new mode
|
#
# This file is part of the CernVM File System.
#
import argparse
class TreeNode:
def __init__(self, mode):
self.children = {}
self.mode = mode
def getString(self, prefix, wildcard):
stringRes = ''
if self.mode == '!':
stringRes = '!' + prefix + '\n'
if not wildcard:
if self.mode == '^':
stringRes = '^'+prefix+'*\n'
elif self.mode == '*':
stringRes = prefix + '/*\n'
elif self.mode == '/':
if len(prefix) == 0:
stringRes = "^/\n"
stringRes = '^' + prefix + '\n'
for key, val in self.children.items():
stringRes+=val.getString(prefix+'/'+key, self.mode == '*')
return stringRes
def __str__(self):
return self.getString("", False)
class DiffBuilder:
def __init__(self, args):
self.infiles = args.infiles
self.outfile = args.outfile
self.depth = args.depth
self.root = TreeNode('/')
def build_diff(self):
with open(self.infiles[0], 'r') as specFile0:
for curLine in specFile0:
(curLine, mode) = self.get_info(curLine)
path_parts = curLine.split('/')
curNode = self.add_node(path_parts, mode)
curNode.mode = self.METHOD_NAME(curNode.mode, mode)
for curfile in self.infiles[1:]:
with open(curfile, 'r') as curSpecFile:
for curLine in curSpecFile:
(curLine, mode) = self.get_info(curLine)
path_parts = curLine.split('/')
if (mode == '!'):
curNode = self.add_node(path_parts, mode)
curNode.mode = self.METHOD_NAME(curNode.mode, mode)
else:
curNode = self.root
passthrough = '-' if mode=='!' else '_'
curDepth = 0
mergeable = True
for part in path_parts:
curDepth+=1
if not part in curNode.children\
and curDepth > self.depth\
and mergeable:
print("Found mergeable")
curNode.mode = self.METHOD_NAME(curNode.mode, '*')
break
elif not part in curNode.children:
mergeable = False
curNode.children[part] = TreeNode(passthrough)
curNode = curNode.children[part]
curNode.mode = self.METHOD_NAME(curNode.mode, passthrough)
curNode.mode = self.METHOD_NAME(curNode.mode, mode)
with open(self.outfile, "w") as specFile:
specFile.write(str(self.root))
def add_node(self, path_parts, mode):
curNode = self.root
passthrough = '-' if mode=='!' else '_'
for part in path_parts:
if not part in curNode.children:
curNode.children[part] = TreeNode(passthrough)
curNode = curNode.children[part]
curNode.mode = self.METHOD_NAME(curNode.mode, passthrough)
return curNode
def METHOD_NAME(self, old, update):
if update == '!':
return update
if old == '-':
return update
if update == '-':
return old
if old == '_':
return update
if old == '/' and update in ['^', '*']:
return update
if old == '^' and update == '*':
return update
return old
def get_info(self, curLine):
curLine = curLine.strip()
mode = curLine[0]
wildcard = False
if (curLine[-1] == '*'):
wildcard = True
curLine = curLine[:-1]
if (mode == '/'):
if (wildcard):
mode = '*'
curLine = curLine[1:]
else:
if not wildcard and mode=='^':
mode = '/'
curLine = curLine[2:]
return (curLine, mode)
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument("depth",
type=int,
help="The trace log file")
argparser.add_argument("infiles",
type=str,
nargs="+",
help="The trace log file")
argparser.add_argument("outfile",
type=str,
help="The output file")
return argparser.parse_args()
def main():
args = parse_args()
diffBuilder = DiffBuilder(args)
diffBuilder.build_diff()
if __name__ == "__main__":
main(
|
3,454 |
deps
|
# Copyright (C) 2017 - Daniele Forghieri
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""gvsbuild deps print / .gv graph."""
from typing import List
import typer
# Verify we can import from the script directory
try:
import gvsbuild.utils.utils
except ImportError:
# We are probably using an embedded installation
print(
"Error importing utility (running the embedded interpreter ?), fixing paths ..."
)
import os
import sys
# Get the script dir
script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
# and add it at the beginning, emulating the standard python startup
sys.path.insert(0, script_dir)
import gvsbuild.groups # noqa: F401
import gvsbuild.projects # noqa: F401
import gvsbuild.tools # noqa: F401
from gvsbuild.utils.base_project import Project, ProjectType
from gvsbuild.utils.utils import ordered_set
def print_deps(flatten=False, add_all=False):
done = []
def dump_single_dep(st, name, flatten):
if flatten:
if not st:
done.append(name)
else:
if st:
# dependency
print(f"{st}{name}")
else:
print(f" > {name}")
st = " "
done.append(name)
rt = False
p = Project._dict[name]
if p.dependencies:
for d in p.dependencies:
add = True
if not add_all:
ty = Project._dict[d].type
if ty != ProjectType.PROJECT:
add = False
if add:
rt = True
if d in done:
if not flatten:
print(f"{st} {d} *")
else:
done.append(d)
dump_single_dep(f"{st} ", d, flatten)
return rt
prj = [x.name for x in Project._projects if x.type == ProjectType.PROJECT]
print("Projects dependencies:")
for n in prj:
done = []
if flatten:
print(f"> {n}")
if dump_single_dep("", n, flatten):
if flatten:
done.remove(n)
for t in sorted(done):
print(f" {t}")
else:
print("")
def make_graph(
out_file,
put_all=False,
invert_dep=False,
add_tools=False,
add_groups=False,
skip=None,
):
gr_colors = [
0x000080,
0x008000,
0x008080,
0x800000,
0x800080,
0x808000,
0x808080,
0x0000F0,
0x00F000,
0x00F0F0,
0xF00000,
0xF000F0,
0xF0F000,
0xF00080,
0xF08000,
0xF08080,
0x80F000,
0x80F080,
0x00F080,
0x0080F0,
0x8000F0,
0x8080F0,
]
gr_index = 0
to_skip = set(skip)
with open(out_file, "w", encoding="utf-8") as fo:
print(f"Writing file {out_file}")
used = set()
fo.write("digraph gtk3dep {\n")
for n in Project._names:
if n not in to_skip:
t = Project._dict[n]
add = True
if t.type == ProjectType.TOOL:
add = add_tools
elif t.type == ProjectType.GROUP:
add = add_groups
else:
add = True
if add:
if t.dependencies:
gr_index += 1
gr_index %= len(gr_colors)
for d in t.dependencies:
if d in to_skip:
print(f"Skip '{d}' for '{n}'")
else:
if invert_dep:
fo.write(
f' "{d}" -> "{n}" [color="#{gr_colors[gr_index]:06x}"];\n'
)
else:
fo.write(
f' "{n}" -> "{d}" [color="#{gr_colors[gr_index]:06x}"];\n'
)
used.add(d)
else:
used.add(t.name)
if put_all:
# Puts all projects that are not referenced from others
for n in Project._names:
if n not in used:
fo.write(f' "BUILD" -> "{n}" [color="#c00080"];\n')
fo.write("};\n")
def compute_deps(proj):
if hasattr(proj, "all_dependencies"):
return
dependencies = ordered_set()
for dep in proj.dependencies:
compute_deps(dep)
for p in dep.all_dependencies:
dependencies.add(p)
dependencies.add(dep)
proj.all_dependencies = dependencies
def METHOD_NAME(
flatten: bool = typer.Option(False, help="Flatten the dependencies"),
dep_tools: bool = typer.Option(
False,
help="Include tools in the dependencies",
),
graph: bool = typer.Option(
False, help="Generate a graphviz file", rich_help_panel="Graphing Options"
),
graph_all: bool = typer.Option(
False,
help="Also include unreferenced projects to the graph",
rich_help_panel="Graphing Options",
),
add_tools: bool = typer.Option(
False, help="Include tools in the graph", rich_help_panel="Graphing Options"
),
add_groups: bool = typer.Option(
False,
help="Include group projects in the graph",
rich_help_panel="Graphing Options",
),
gv_file: str = typer.Option(
"wingtk.gv", help="Graphviz output file", rich_help_panel="Graphing Options"
),
invert: bool = typer.Option(
False, help="Invert the dependencies", rich_help_panel="Graphing Options"
),
skip: List[str] = typer.Option(
None,
help="A comma separated list of projects not to graph",
rich_help_panel="Graphing Options",
),
):
Project.add_all()
# do what's asked
if graph:
# .gv graph
make_graph(
out_file=gv_file,
put_all=graph_all,
invert_dep=invert,
add_tools=add_tools,
add_groups=add_groups,
skip=skip,
)
else:
# simple dep print
print_deps(flatten=flatten, add_all=dep_tools)
|
3,455 |
cancel
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
#
#
# THIS IMPORT MUST COME FIRST
# import mainUtils FIRST to get python version check
#
from qautils.gppylib.mainUtils import *
import os, sys, traceback
gProgramName = os.path.split(sys.argv[0])[-1]
from qautils.gppylib.commands.base import setExecutionContextFactory, ExecutionContext,CommandResult
from qautils.gppylib import gplog
from qautils.gppylib.commands import unix
from qautils.gppylib.system import configurationInterface as configInterface
from qautils.gppylib.system import configurationImplGpdb as systemConf
# todo: find proper home for this
gCommandLineToCommandSimulator = {}
def clearCommandSimulators():
gCommandLineToCommandSimulator = {}
def addCommandSimulator(commandLine, simulator ):
gCommandLineToCommandSimulator[commandLine] = simulator
class TestExecutionContext(ExecutionContext):
# todo: clean this up (make private), but only when completed in LocalExecutionContext is inspected
completed = False
halt = False
def __init__(self, execution_context_id, remoteHost, stdin):
self.execution_context_id = execution_context_id
self.remoteHost = remoteHost
self.stdin = stdin
def execute(self,cmd):
testOutput("exec %s" % cmd.cmdStr)
simulator = gCommandLineToCommandSimulator.get(cmd.cmdStr)
if simulator is None:
(rc,stdoutValue,stderrValue) = (0, [], [])
else:
(rc,stdoutValue,stderrValue) = simulator.simulate(cmd.cmdStr)
self.completed=True
result = CommandResult(rc,"".join(stdoutValue),"".join(stderrValue), self.completed, self.halt)
cmd.set_results(result)
def interrupt(self):
raise Exception("not implemented") # implement this when needed for testing
def METHOD_NAME(self):
raise Exception("not implemented") # implement this when needed for testing
class TestExecutionContextFactory:
def createExecutionContext(self,execution_context_id, remoteHost, stdin):
return TestExecutionContext(execution_context_id, remoteHost, stdin)
gTestResults = []
gTestOutput = None
def testOutput(o) :
global gTestOutput
if gTestOutput is not None:
gTestOutput.append(str(o))
def finishTest(expectedOutputStr):
global gTestOutput
global gTestName
output = "\n".join(gTestOutput)
if output == expectedOutputStr:
gTestResults.append((gTestName, True, None))
else:
# todo: on diff, produce a nicer diff output for large strings!
msg = "Test %s failed. EXPECTED OUTPUT (surrounding triple quotes added by this output):\n\"\"\"%s\"\"\"\n\n" \
"ACTUAL OUTPUT (surrounding triple quotes added by this output):\n\"\"\"%s\"\"\"" % (gTestName, expectedOutputStr, output)
gTestResults.append((gTestName, False,msg))
gTestOutput = None
gTestName = None
def startTest(testName):
global gTestOutput
global gTestName
gTestOutput = []
gTestName = testName
def printTestResults():
global gTestResults
numFailures = 0
numSuccesses = 0
for test in gTestResults:
if ( test[1]):
numSuccesses += 1
print >> sys.stderr, "SUCCESS: %s passed" % test[0]
else:
numFailures += 1
print >> sys.stderr, "FAILURE: %s failed\n%s\n\n" % (test[0], test[2])
if numFailures == 0:
print >> sys.stderr, "ALL %s TESTS SUCCEEDED" % numSuccesses
else:
print >> sys.stderr, "%s tests succeeded" % numSuccesses
print >> sys.stderr, "%s tests FAILED" % numFailures
def resetTestResults():
global gTestResults
gTestResults = []
def test_main( testName, newProgramArgs, createOptionParserFn, createCommandFn, extraOutputGenerators, expectedOutput) :
global gTestOutput
# update args
previousArgs = sys.argv
sys.argv = []
sys.argv.append(getProgramName())
sys.argv.extend(newProgramArgs)
# register command factory
setExecutionContextFactory(TestExecutionContextFactory())
commandObject=None
parser = None
startTest(testName)
try:
gplog.setup_tool_logging(gProgramName,unix.getLocalHostname(),unix.getUserName(),nonuser=False)
parser = createOptionParserFn()
(options, args) = parser.parse_args()
gplog.enable_verbose_logging()
commandObject = createCommandFn(options, args)
exitCode = commandObject.run()
testOutput("sys.exit %s" % exitCode)
except ProgramArgumentValidationException, e:
testOutput( "Validation error: %s" % e.getMessage())
except ExceptionNoStackTraceNeeded, e:
testOutput( str(e))
except Exception, e:
testOutput( "EXCEPTION: %s\n%s" % (e, traceback.format_exc()))
except KeyboardInterrupt:
sys.exit('\nUser Interrupted')
finally:
if commandObject:
commandObject.cleanup()
# clean up test settings
sys.argv = previousArgs
setExecutionContextFactory(None)
if extraOutputGenerators is not None:
for gen in extraOutputGenerators:
gen.generate()
finishTest(expectedOutput)
def simple_test(testname, fnToCall, argsToFn, expectedOutput):
startTest(testname)
try:
fnToCall(argsToFn)
except Exception, e:
testOutput( "EXCEPTION: %s\n%s" % (e, traceback.format_exc()))
finishTest(expectedOutput)
def testTableOutput(lines):
lineWidth = []
for line in lines:
while len(lineWidth) < len(line):
lineWidth.append(0)
for i, field in enumerate(line):
lineWidth[i] = max(len(field), lineWidth[i])
# now print it all!
for line in lines:
outLine = []
for i, field in enumerate(line):
outLine.append(field.ljust(lineWidth[i] + 1))
msg = " | ".join(outLine)
testOutput(msg.strip())
def testOutputGpArray(gpArray):
segs = gpArray.getDbList()
def compareByDbId(left,right):
if left.getSegmentDbId() < right.getSegmentDbId(): return -1
elif left.getSegmentDbId() > right.getSegmentDbId(): return 1
else: return 0
segs.sort(compareByDbId)
lines = []
lines.append([
"dbid", "content", "role", "preferred_role", "mode", "status",
"hostname", "address", "port", "datadir", "replication_port"
])
for seg in segs:
line = [
str(seg.getSegmentDbId()),
str(seg.getSegmentContentId()),
str(seg.getSegmentRole()),
str(seg.getSegmentPreferredRole()),
str(seg.getSegmentMode()),
str(seg.getSegmentStatus()),
str(seg.getSegmentHostName()),
str(seg.getSegmentAddress()),
str(seg.getSegmentPort()),
str(seg.getSegmentDataDirectory()),
str(seg.getSegmentReplicationPort()),
]
lines.append(line)
testTableOutput(lines)
|
3,456 |
run bpython
|
import fcntl
import os
import pty
import struct
import sys
import termios
import textwrap
import unittest
from bpython.test import TEST_CONFIG
from bpython.config import getpreferredencoding
try:
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ProcessProtocol
from twisted.trial.unittest import TestCase as TrialTestCase
except ImportError:
class TrialTestCase: # type: ignore [no-redef]
pass
reactor = None # type: ignore
try:
import urwid
have_urwid = True
except ImportError:
have_urwid = False
def set_win_size(fd, rows, columns):
s = struct.pack("HHHH", rows, columns, 0, 0)
fcntl.ioctl(fd, termios.TIOCSWINSZ, s)
class CrashersTest:
backend = "cli"
def METHOD_NAME(self, input):
"""
Run bpython (with `backend` as backend) in a subprocess and
enter the given input. Uses a test config that disables the
paste detection.
Returns bpython's output.
"""
result = Deferred()
encoding = getpreferredencoding()
class Protocol(ProcessProtocol):
STATES = (SEND_INPUT, COLLECT) = range(2)
def __init__(self):
self.data = ""
self.delayed_call = None
self.states = iter(self.STATES)
self.state = next(self.states)
def outReceived(self, data):
self.data += data.decode(encoding)
if self.delayed_call is not None:
self.delayed_call.cancel()
self.delayed_call = reactor.callLater(0.5, self.next)
def next(self):
self.delayed_call = None
if self.state == self.SEND_INPUT:
index = self.data.find(">>> ")
if index >= 0:
self.data = self.data[index + 4 :]
self.transport.write(input.encode(encoding))
self.state = next(self.states)
elif self.data == "\x1b[6n":
# this is a cursor position query
# respond that cursor is on row 2, column 1
self.transport.write("\x1b[2;1R".encode(encoding))
else:
self.transport.closeStdin()
if self.transport.pid is not None:
self.delayed_call = None
self.transport.signalProcess("TERM")
def processExited(self, reason):
if self.delayed_call is not None:
self.delayed_call.cancel()
result.callback(self.data)
(master, slave) = pty.openpty()
set_win_size(slave, 25, 80)
reactor.spawnProcess(
Protocol(),
sys.executable,
(
sys.executable,
"-m",
f"bpython.{self.backend}",
"--config",
str(TEST_CONFIG),
"-q", # prevents version greeting
),
env={
"TERM": "vt100",
"LANG": os.environ.get("LANG", "C.UTF-8"),
},
usePTY=(master, slave, os.ttyname(slave)),
)
return result
def test_issue108(self):
input = textwrap.dedent(
"""\
def spam():
u"y\\xe4y"
\b
spam("""
)
deferred = self.METHOD_NAME(input)
return deferred.addCallback(self.check_no_traceback)
def test_issue133(self):
input = textwrap.dedent(
"""\
def spam(a, (b, c)):
pass
\b
spam(1"""
)
return self.METHOD_NAME(input).addCallback(self.check_no_traceback)
def check_no_traceback(self, data):
self.assertNotIn("Traceback", data)
@unittest.skipIf(reactor is None, "twisted is not available")
class CurtsiesCrashersTest(TrialTestCase, CrashersTest):
backend = "curtsies"
@unittest.skipIf(reactor is None, "twisted is not available")
class CursesCrashersTest(TrialTestCase, CrashersTest):
backend = "cli"
@unittest.skipUnless(have_urwid, "urwid is required")
@unittest.skipIf(reactor is None, "twisted is not available")
class UrwidCrashersTest(TrialTestCase, CrashersTest):
backend = "urwid"
if __name__ == "__main__":
unittest.main()
|
3,457 |
test readonly list del
|
import re
from typing import Any, Callable, Dict, List, Union
from pytest import mark, param, raises
from omegaconf import DictConfig, ListConfig, OmegaConf, ReadonlyConfigError
@mark.parametrize(
"src, func, expectation",
[
param(
{},
lambda c: c.__setitem__("a", 1),
raises(ReadonlyConfigError, match="a"),
id="dict_setitem",
),
param(
{"a": None},
lambda c: c.__setitem__("a", {"b": 10}),
raises(ReadonlyConfigError, match="a"),
id="dict_setitem",
),
param(
{"a": {"b": {"c": 1}}},
lambda c: c.__getattr__("a").__getattr__("b").__setitem__("c", 1),
raises(ReadonlyConfigError, match="a.b.c"),
id="dict_nested_setitem",
),
param(
{},
lambda c: OmegaConf.update(c, "a.b", 10),
raises(ReadonlyConfigError, match="a"),
id="dict_update",
),
param(
{"a": 10},
lambda c: c.__setattr__("a", 1),
raises(ReadonlyConfigError, match="a"),
id="dict_setattr",
),
param(
{"a": 10},
lambda c: c.pop("a"),
raises(ReadonlyConfigError, match="a"),
id="dict_pop",
),
param(
{"a": 10},
lambda c: c.__delitem__("a"),
raises(ReadonlyConfigError, match="a"),
id="dict_delitem",
),
param(
{"a": 10},
lambda c: c.__delattr__("a"),
raises(ReadonlyConfigError, match="a"),
id="dict_delattr",
),
# list
param(
[],
lambda c: c.__setitem__(0, 1),
raises(ReadonlyConfigError, match="0"),
id="list_setitem",
),
param(
[],
lambda c: OmegaConf.update(c, "0.b", 10),
raises(ReadonlyConfigError, match="[0]"),
id="list_update",
),
param([10], lambda c: c.pop(), raises(ReadonlyConfigError), id="list_pop"),
param(
[0],
lambda c: c.__delitem__(0),
raises(ReadonlyConfigError, match="[0]"),
id="list_delitem",
),
],
)
def test_readonly(
src: Union[Dict[str, Any], List[Any]], func: Callable[[Any], Any], expectation: Any
) -> None:
c = OmegaConf.create(src)
OmegaConf.set_readonly(c, True)
with expectation:
func(c)
assert c == src
@mark.parametrize("src", [{}, []])
def test_readonly_flag(src: Union[Dict[str, Any], List[Any]]) -> None:
c = OmegaConf.create(src)
assert not OmegaConf.is_readonly(c)
OmegaConf.set_readonly(c, True)
assert OmegaConf.is_readonly(c)
OmegaConf.set_readonly(c, False)
assert not OmegaConf.is_readonly(c)
OmegaConf.set_readonly(c, None)
assert not OmegaConf.is_readonly(c)
def test_readonly_nested_list() -> None:
c = OmegaConf.create([[1]])
assert isinstance(c, ListConfig)
assert not OmegaConf.is_readonly(c)
assert not OmegaConf.is_readonly(c[0])
OmegaConf.set_readonly(c, True)
assert OmegaConf.is_readonly(c)
assert OmegaConf.is_readonly(c[0])
OmegaConf.set_readonly(c, False)
assert not OmegaConf.is_readonly(c)
assert not OmegaConf.is_readonly(c[0])
OmegaConf.set_readonly(c, None)
assert not OmegaConf.is_readonly(c)
assert not OmegaConf.is_readonly(c[0])
OmegaConf.set_readonly(c[0], True)
assert not OmegaConf.is_readonly(c)
assert OmegaConf.is_readonly(c[0])
def test_readonly_list_insert() -> None:
c = OmegaConf.create([])
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match="[0]"):
c.insert(0, 10)
assert c == []
def test_readonly_list_insert_deep() -> None:
src: List[Dict[str, Any]] = [dict(a=[dict(b=[])])]
c = OmegaConf.create(src)
assert isinstance(c, ListConfig)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match=re.escape("[0].a[0].b[0]")):
c[0].a[0].b.insert(0, 10)
assert c == src
def test_readonly_list_append() -> None:
c = OmegaConf.create([])
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match="[0]"):
c.append(10)
assert c == []
def test_readonly_list_change_item() -> None:
c = OmegaConf.create([1, 2, 3])
assert isinstance(c, ListConfig)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match="[1]"):
c[1] = 10
assert c == [1, 2, 3]
def test_readonly_list_pop() -> None:
c = OmegaConf.create([1, 2, 3])
assert isinstance(c, ListConfig)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match="[1]"):
c.pop(1)
assert c == [1, 2, 3]
def METHOD_NAME() -> None:
c = OmegaConf.create([1, 2, 3])
assert isinstance(c, ListConfig)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError, match="[1]"):
del c[1]
assert c == [1, 2, 3]
def test_readonly_list_sort() -> None:
c = OmegaConf.create([3, 1, 2])
assert isinstance(c, ListConfig)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError):
c.sort()
assert c == [3, 1, 2]
def test_readonly_from_cli() -> None:
c = OmegaConf.create({"foo": {"bar": [1]}})
assert isinstance(c, DictConfig)
OmegaConf.set_readonly(c, True)
cli = OmegaConf.from_dotlist(["foo.bar=[2]"])
cfg2 = OmegaConf.merge(c, cli)
assert OmegaConf.is_readonly(c)
assert OmegaConf.is_readonly(cfg2)
@mark.parametrize(
"cfg1, cfg2",
[
param({"foo": {"bar": 10}}, {"foo": {"bar": 20}}, id="override_value"),
param({"foo": {"bar": 10}}, {"foo": {"yup": 20}}, id="adding_key"),
param({"a": 1}, {"b": 2}, id="adding_key"),
param({"a": 1}, OmegaConf.create({"b": 2}), id="adding_key"),
],
)
def test_merge_with_readonly(cfg1: Dict[str, Any], cfg2: Dict[str, Any]) -> None:
c = OmegaConf.create(cfg1)
OmegaConf.set_readonly(c, True)
with raises(ReadonlyConfigError):
c.merge_with(cfg2)
@mark.parametrize(
"readonly_key, cfg1, cfg2, expected",
[
param(
"",
{"foo": {"bar": 10}},
{"foo": {}},
{"foo": {"bar": 10}},
id="merge_empty_dict",
),
param(
"foo",
{"foo": {"bar": 10}},
{"xyz": 10},
{"foo": {"bar": 10}, "xyz": 10},
id="merge_different_node",
),
],
)
def test_merge_with_readonly_nop(
readonly_key: str,
cfg1: Dict[str, Any],
cfg2: Dict[str, Any],
expected: Dict[str, Any],
) -> None:
c = OmegaConf.create(cfg1)
OmegaConf.set_readonly(OmegaConf.select(c, readonly_key), True)
c.merge_with(cfg2)
assert c == OmegaConf.create(expected)
|
3,458 |
process option executable
|
"""
Config module.
"""
from importlib import import_module
from typing import Any, List
from rebulk import Rebulk
_regex_prefix = 're:'
_import_prefix = 'import:'
_import_cache = {}
_eval_prefix = 'eval:'
_eval_cache = {}
_pattern_types = ('regex', 'string')
_default_module_names = {
'validator': 'guessit.rules.common.validators',
'formatter': 'guessit.rules.common.formatters'
}
def _process_option(name: str, value: Any):
if name in ('validator', 'conflict_solver', 'formatter'):
if isinstance(value, dict):
return {item_key: _process_option(name, item_value) for item_key, item_value in value.items()}
if value is not None:
return METHOD_NAME(value, _default_module_names.get(name))
return value
def _import(value: str, default_module_name=None):
if '.' in value:
module_name, target = value.rsplit(':', 1)
else:
module_name = default_module_name
target = value
import_id = module_name + ":" + target
if import_id in _import_cache:
return _import_cache[import_id]
mod = import_module(module_name)
imported = mod
for item in target.split("."):
imported = getattr(imported, item)
_import_cache[import_id] = imported
return imported
def _eval(value: str):
compiled = _eval_cache.get(value)
if not compiled:
compiled = compile(value, '<string>', 'eval')
return eval(compiled) # pylint:disable=eval-used
def METHOD_NAME(value: str, default_module_name=None):
if value.startswith(_import_prefix):
value = value[len(_import_prefix):]
return _import(value, default_module_name)
if value.startswith(_eval_prefix):
value = value[len(_eval_prefix):]
return _eval(value)
if value.startswith('lambda ') or value.startswith('lambda:'):
return _eval(value)
return value
def _process_callable_entry(callable_spec: str, rebulk: Rebulk, entry: dict):
METHOD_NAME(callable_spec)(rebulk, **entry)
def _build_entry_decl(entry, options, value):
entry_decl = dict(options.get(None, {}))
if not value.startswith('_'):
entry_decl['value'] = value
if isinstance(entry, str):
if entry.startswith(_regex_prefix):
entry_decl["regex"] = [entry[len(_regex_prefix):]]
else:
entry_decl["string"] = [entry]
else:
entry_decl.update(entry)
if "pattern" in entry_decl:
legacy_pattern = entry.pop("pattern")
if legacy_pattern.startswith(_regex_prefix):
entry_decl["regex"] = [legacy_pattern[len(_regex_prefix):]]
else:
entry_decl["string"] = [legacy_pattern]
return entry_decl
def load_patterns(rebulk: Rebulk,
pattern_type: str,
patterns: List[str],
options: dict = None):
"""
Load patterns for a prepared config entry
:param rebulk: Rebulk builder to use.
:param pattern_type: Pattern type.
:param patterns: Patterns
:param options: kwargs options to pass to rebulk pattern function.
:return:
"""
default_options = options.get(None) if options else None
item_options = dict(default_options) if default_options else {}
pattern_type_option = options.get(pattern_type)
if pattern_type_option:
item_options.update(pattern_type_option)
item_options = {name: _process_option(name, value) for name, value in item_options.items()}
getattr(rebulk, pattern_type)(*patterns, **item_options)
def load_config_patterns(rebulk: Rebulk,
config: dict,
options: dict = None):
"""
Load patterns defined in given config.
:param rebulk: Rebulk builder to use.
:param config: dict containing pattern definition.
:param options: Additional pattern options to use.
:type options: Dict[Dict[str, str]] A dict where key is the pattern type (regex, string, functional) and value is
the default kwargs options to pass.
:return:
"""
if options is None:
options = {}
for value, raw_entries in config.items():
entries = raw_entries if isinstance(raw_entries, list) else [raw_entries]
for entry in entries:
if isinstance(entry, dict) and "callable" in entry.keys():
_process_callable_entry(entry.pop("callable"), rebulk, entry)
continue
entry_decl = _build_entry_decl(entry, options, value)
for pattern_type in _pattern_types:
patterns = entry_decl.get(pattern_type)
if not patterns:
continue
if not isinstance(patterns, list):
patterns = [patterns]
patterns_entry_decl = dict(entry_decl)
for pattern_type_to_remove in _pattern_types:
patterns_entry_decl.pop(pattern_type_to_remove, None)
current_pattern_options = dict(options)
current_pattern_options[None] = patterns_entry_decl
load_patterns(rebulk, pattern_type, patterns, current_pattern_options)
|
3,459 |
listdir ifcfg2
|
import errno
import textwrap
from os.path import basename
import mock
import six
from leapp.libraries.actor import ifcfgscanner
from leapp.libraries.common.testutils import make_OSError, produce_mocked
from leapp.libraries.stdlib import api
from leapp.models import IfCfg
_builtins_open = "builtins.open" if six.PY3 else "__builtin__.open"
def _listdir_ifcfg(path):
if path == ifcfgscanner.SYSCONFIG_DIR:
return ["ifcfg-net0"]
raise make_OSError(errno.ENOENT)
def METHOD_NAME(path):
if path == ifcfgscanner.SYSCONFIG_DIR:
return ["ifcfg-net0", "ifcfg-net1"]
raise make_OSError(errno.ENOENT)
def _exists_ifcfg(filename):
return basename(filename).startswith("ifcfg-")
def _exists_keys(filename):
if _exists_ifcfg(filename):
return True
return basename(filename).startswith("keys-")
def test_no_conf(monkeypatch):
"""
No report if there are no ifcfg files.
"""
monkeypatch.setattr(ifcfgscanner, "listdir", lambda _: ())
monkeypatch.setattr(api, "produce", produce_mocked())
ifcfgscanner.process()
assert not api.produce.called
def test_ifcfg1(monkeypatch):
"""
Parse a single ifcfg file.
"""
ifcfg_file = textwrap.dedent("""
TYPE=Wireless # Some comment
# Another comment
ESSID=wep1
NAME="wep1"
MODE='Managed' # comment
WEP_KEY_FLAGS=ask
SECURITYMODE=open
DEFAULTKEY=1
KEY_TYPE=key
""")
mock_config = mock.mock_open(read_data=ifcfg_file)
with mock.patch(_builtins_open, mock_config):
monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
monkeypatch.setattr(api, "produce", produce_mocked())
ifcfgscanner.process()
assert api.produce.called == 1
assert len(api.produce.model_instances) == 1
ifcfg = api.produce.model_instances[0]
assert isinstance(ifcfg, IfCfg)
assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
assert ifcfg.secrets is None
assert len(ifcfg.properties) == 8
assert ifcfg.properties[0].name == "TYPE"
assert ifcfg.properties[0].value == "Wireless"
assert ifcfg.properties[1].name == "ESSID"
assert ifcfg.properties[1].value == "wep1"
assert ifcfg.properties[2].name == "NAME"
assert ifcfg.properties[2].value == "wep1"
assert ifcfg.properties[3].name == "MODE"
assert ifcfg.properties[3].value == "Managed"
def test_ifcfg2(monkeypatch):
"""
Parse two ifcfg files.
"""
mock_config = mock.mock_open(read_data="TYPE=Ethernet")
with mock.patch(_builtins_open, mock_config):
monkeypatch.setattr(ifcfgscanner, "listdir", METHOD_NAME)
monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
monkeypatch.setattr(api, "produce", produce_mocked())
ifcfgscanner.process()
assert api.produce.called == 2
assert len(api.produce.model_instances) == 2
ifcfg = api.produce.model_instances[0]
assert isinstance(ifcfg, IfCfg)
def test_ifcfg_key(monkeypatch):
"""
Report ifcfg secrets from keys- file.
"""
mock_config = mock.mock_open(read_data="KEY_PASSPHRASE1=Hell0")
with mock.patch(_builtins_open, mock_config):
monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_keys)
monkeypatch.setattr(api, "produce", produce_mocked())
ifcfgscanner.process()
assert api.produce.called == 1
assert len(api.produce.model_instances) == 1
ifcfg = api.produce.model_instances[0]
assert isinstance(ifcfg, IfCfg)
assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
assert len(ifcfg.secrets) == 1
assert ifcfg.secrets[0].name == "KEY_PASSPHRASE1"
assert ifcfg.secrets[0].value is None
|
3,460 |
execute update cache
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import requests
import grpc
from loguru import logger as LOG
import kv_pb2 as KV
import http_pb2 as HTTP
import kv_pb2_grpc as Service
from google.protobuf.empty_pb2 import Empty as Empty
class WikiCacherExecutor:
API_VERSION = "v1"
PROJECT = "wikipedia"
LANGUAGE = "en"
CACHE_TABLE = "wiki_descriptions"
supported_endpoints = None
def __init__(
self,
node_public_rpc_address,
credentials,
base_url="https://api.wikimedia.org",
label=None,
):
self.node_public_rpc_address = node_public_rpc_address
self.base_url = base_url
if label is not None:
self.prefix = f"[{label}] "
else:
self.prefix = ""
self.credentials = credentials
self.handled_requests_count = 0
@staticmethod
def get_supported_endpoints(topics):
endpoints = []
for topic in topics:
endpoints.append(("POST", "/update_cache/" + topic))
endpoints.append(("GET", "/article_description/" + topic))
return endpoints
def _api_base(self):
return "/".join(
(
self.base_url,
"core",
self.API_VERSION,
self.PROJECT,
self.LANGUAGE,
)
)
def _get_description(self, title):
url = "/".join((self._api_base(), "page", title, "description"))
LOG.debug(f"{self.prefix}Requesting {url}")
r = requests.get(url, timeout=3)
if r.status_code == 200:
return r.json()["description"]
LOG.error(f"{self.prefix}{r}")
def METHOD_NAME(self, kv_stub, request, response):
prefix = "/update_cache/"
title = request.uri[len(prefix) :]
description = self._get_description(title)
if description is None:
response.status_code = HTTP.HttpStatusCode.BAD_GATEWAY
response.body = f"Error when fetching article with title '{title}'".encode(
"utf-8"
)
else:
kv_stub.Put(
KV.KVKeyValue(
table=self.CACHE_TABLE,
key=title.encode("utf-8"),
value=description.encode("utf-8"),
)
)
response.status_code = HTTP.HttpStatusCode.OK
response.body = f"Successfully updated cache with description of '{title}':\n\n{description}".encode(
"utf-8"
)
def _execute_get_description(self, kv_stub, request, response):
prefix = "/article_description/"
title = request.uri[len(prefix) :]
result = kv_stub.Get(
KV.KVKey(table=self.CACHE_TABLE, key=title.encode("utf-8"))
)
if not result.HasField("optional"):
response.status_code = HTTP.HttpStatusCode.NOT_FOUND
response.body = f"No description for '{title}' in cache".encode("utf-8")
else:
response.status_code = HTTP.HttpStatusCode.OK
response.body = result.optional.value
def run_loop(self, activated_event=None):
LOG.info(f"{self.prefix}Beginning executor loop")
with grpc.secure_channel(
target=self.node_public_rpc_address,
credentials=self.credentials,
) as channel:
stub = Service.KVStub(channel)
for work in stub.Activate(Empty()):
if work.HasField("activated"):
if activated_event is not None:
activated_event.set()
continue
if work.HasField("work_done"):
break
assert work.HasField("request_description")
request = work.request_description
self.handled_requests_count += 1
response = KV.ResponseDescription(
status_code=HTTP.HttpStatusCode.NOT_FOUND
)
if request.method == "POST" and request.uri.startswith(
"/update_cache/"
):
LOG.info(f"{self.prefix}Updating article in cache: {request.uri}")
self.METHOD_NAME(stub, request, response)
elif request.method == "GET" and request.uri.startswith(
"/article_description/"
):
LOG.info(
f"{self.prefix}Retrieving description from cache: {request.uri}"
)
self._execute_get_description(stub, request, response)
else:
LOG.error(
f"{self.prefix}Unhandled request: {request.method} {request.uri}"
)
response.status_code = HTTP.HttpStatusCode.NOT_FOUND
response.body = (
f"No resource found at {request.method} {request.uri}".encode(
"utf-8"
)
)
stub.EndTx(response)
LOG.info(f"{self.prefix}Ended executor loop")
def terminate(self, *args):
LOG.debug("Terminating...")
with grpc.secure_channel(
target=self.node_public_rpc_address,
credentials=self.credentials,
) as channel:
stub = Service.KVStub(channel)
stub.Deactivate(Empty())
LOG.info("Terminated")
|
3,461 |
expand tuples
|
# mako/pyparser.py
# Copyright 2006-2022 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
import operator
import _ast
from docassemble.base.mako import _ast_util
from docassemble.base.mako import compat
from docassemble.base.mako import exceptions
from docassemble.base.mako import util
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = {"True", "False", "None", "print"}
# the "id" attribute on a function node
arg_id = operator.attrgetter("arg")
util.restore__ast(_ast)
def parse(code, mode="exec", **exception_kwargs):
"""Parse an expression into AST"""
try:
return _ast_util.parse(code, "<unknown>", mode)
except Exception as e:
raise exceptions.SyntaxException(
"(%s) %s (%r)"
% (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50],
),
**exception_kwargs,
) from e
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.visit(node.type)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def METHOD_NAME(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
yield from arg.elts
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union(
[arg_id(arg) for arg in self.METHOD_NAME(node.args.args)]
)
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif (
node.id not in reserved
and node.id not in self.listener.declared_identifiers
and node.id not in self.local_ident_stack
):
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split(".")[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
elif name.name == "*":
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.",
**self.exception_kwargs,
)
else:
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
ldi = self.listener.declared_identifiers
self.listener.declared_identifiers = ldi.union(
p.declared_identifiers
)
lui = self.listener.undeclared_identifiers
self.listener.undeclared_identifiers = lui.union(
p.undeclared_identifiers
)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg.arg)
kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
if node.args.kwarg:
kwargnames.append(node.args.kwarg.arg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.kwargnames = kwargnames
self.listener.kwdefaults = node.args.kw_defaults
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator:
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(" " * 4)
self.generator.visit(astnode)
def value(self):
return "".join(self.generator.result)
|
3,462 |
test df to records mixed emoji type
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, import-outside-toplevel
from datetime import datetime
import pytest
from pandas import Timestamp
from pandas._libs.tslibs import NaT
from superset.dataframe import df_to_records
from superset.superset_typing import DbapiDescription
def test_df_to_records() -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
data = [("a1", "b1", "c1"), ("a2", "b2", "c2")]
cursor_descr: DbapiDescription = [
(column, "string", None, None, None, None, False) for column in ("a", "b", "c")
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == [
{"a": "a1", "b": "b1", "c": "c1"},
{"a": "a2", "b": "b2", "c": "c2"},
]
def test_df_to_records_NaT_type() -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
data = [(NaT,), (Timestamp("2023-01-06 20:50:31.749000+0000", tz="UTC"),)]
cursor_descr: DbapiDescription = [
("date", "timestamp with time zone", None, None, None, None, False)
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == [
{"date": None},
{"date": "2023-01-06 20:50:31.749000+00:00"},
]
def METHOD_NAME() -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
data = [
("What's up?", "This is a string text", 1),
("What's up?", "This is a string with an 😍 added", 2),
("What's up?", NaT, 3),
("What's up?", "Last emoji 😁", 4),
]
cursor_descr: DbapiDescription = [
("question", "varchar", None, None, None, None, False),
("response", "varchar", None, None, None, None, False),
("count", "integer", None, None, None, None, False),
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == [
{"question": "What's up?", "response": "This is a string text", "count": 1},
{
"question": "What's up?",
"response": "This is a string with an 😍 added",
"count": 2,
},
{
"question": "What's up?",
"response": None,
"count": 3,
},
{
"question": "What's up?",
"response": "Last emoji 😁",
"count": 4,
},
]
def test_df_to_records_mixed_accent_type() -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
data = [
("What's up?", "This is a string text", 1),
("What's up?", "This is a string with áccent", 2),
("What's up?", NaT, 3),
("What's up?", "móre áccent", 4),
]
cursor_descr: DbapiDescription = [
("question", "varchar", None, None, None, None, False),
("response", "varchar", None, None, None, None, False),
("count", "integer", None, None, None, None, False),
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == [
{"question": "What's up?", "response": "This is a string text", "count": 1},
{
"question": "What's up?",
"response": "This is a string with áccent",
"count": 2,
},
{
"question": "What's up?",
"response": None,
"count": 3,
},
{
"question": "What's up?",
"response": "móre áccent",
"count": 4,
},
]
def test_js_max_int() -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
data = [(1, 1239162456494753670, "c1"), (2, 100, "c2")]
cursor_descr: DbapiDescription = [
("a", "int", None, None, None, None, False),
("b", "int", None, None, None, None, False),
("c", "string", None, None, None, None, False),
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == [
{"a": 1, "b": "1239162456494753670", "c": "c1"},
{"a": 2, "b": 100, "c": "c2"},
]
@pytest.mark.parametrize(
"input_, expected",
[
pytest.param(
[
(datetime.strptime("1677-09-22 00:12:43", "%Y-%m-%d %H:%M:%S"), 1),
(datetime.strptime("2262-04-11 23:47:17", "%Y-%m-%d %H:%M:%S"), 2),
],
[
{
"a": datetime.strptime("1677-09-22 00:12:43", "%Y-%m-%d %H:%M:%S"),
"b": 1,
},
{
"a": datetime.strptime("2262-04-11 23:47:17", "%Y-%m-%d %H:%M:%S"),
"b": 2,
},
],
id="timestamp conversion fail",
),
pytest.param(
[
(datetime.strptime("1677-09-22 00:12:44", "%Y-%m-%d %H:%M:%S"), 1),
(datetime.strptime("2262-04-11 23:47:16", "%Y-%m-%d %H:%M:%S"), 2),
],
[
{"a": Timestamp("1677-09-22 00:12:44"), "b": 1},
{"a": Timestamp("2262-04-11 23:47:16"), "b": 2},
],
id="timestamp conversion success",
),
],
)
def test_max_pandas_timestamp(input_, expected) -> None:
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import SupersetResultSet
cursor_descr: DbapiDescription = [
("a", "datetime", None, None, None, None, False),
("b", "int", None, None, None, None, False),
]
results = SupersetResultSet(input_, cursor_descr, BaseEngineSpec)
df = results.to_pandas_df()
assert df_to_records(df) == expected
|
3,463 |
check view call
|
#!/usr/bin/env python3
"""
Prober that is compatible with cloudprober.
The ProberSplit queries two nodes for blocks and chunks at random heights and
compares the results. The expectation is that the block and chunks at each
height will be identical even when fetched from two different nodes. It also
executes a contract view call on both nodes and compares the results.
The prober runs continuously for the duration specified in the command line
arguments. It runs at least one block and chunk check at a random height.
The intended goal of this prober is ensure that a legacy archival node and a
split storage archival node contain the same data.
Run like this:
./prober_split.py --chain-id testnet --split-url http://split.archival.node:3030 --duration-ms 20000
"""
import argparse
import datetime
import random
import sys
import subprocess
from datetime import datetime, timedelta
from prober_util import *
def check_genesis(legacy_url: str, split_url: str) -> int:
legacy_genesis_height = get_genesis_height(legacy_url)
split_genesis_height = get_genesis_height(split_url)
if legacy_genesis_height != split_genesis_height:
logger.error(
"The genesis height is different. legacy: {}, split {}",
legacy_genesis_height,
split_genesis_height,
)
sys.exit(1)
return legacy_genesis_height
def check_head(legacy_url: str, split_url: str, genesis_height: int) -> int:
legacy_head_height = get_head(legacy_url)
split_head_height = get_head(split_url)
if legacy_head_height <= genesis_height:
logger.error(
'{} head must be higher than genesis. Got {} and {}',
legacy_url,
legacy_head_height,
genesis_height,
)
sys.exit(1)
if split_head_height <= genesis_height:
logger.error(
'{} head must be higher than genesis. Got {} and {}',
split_url,
split_head_height,
genesis_height,
)
sys.exit(1)
return min(legacy_head_height, split_head_height)
def check_blocks(legacy_url: str, split_url: str, height: int):
logger.info(f"Checking blocks at height {height}.")
legacy_block = get_block(height, legacy_url)
split_block = get_block(height, split_url)
if legacy_block != split_block:
logger.error(
f"Block check failed, the legacy block and the split block are different",
f"\nlegacy block\n{pretty_print(legacy_block)}"
f"\nsplit block\n{pretty_print(split_block)}")
sys.exit(1)
return legacy_block
def check_chunks(legacy_url: str, split_url: str, block):
if block is None:
return
logger.info(f"Checking chunks.")
for chunk in block['chunks']:
legacy_chunk = get_chunk(chunk, legacy_url)
split_chunk = get_chunk(chunk, split_url)
if legacy_chunk != split_chunk:
logger.error(
f"Chunk check failed, the legacy chunk and the split chunk are different"
f"\nlegacy chunk\n{pretty_print(legacy_chunk)}"
f"\nsplit chunk\n{pretty_print(split_chunk)}")
sys.exit(1)
def METHOD_NAME(legacy_url, split_url):
logger.info(f"Checking view call.")
# This is the example contract function call from
# https://docs.near.org/api/rpc/contracts#call-a-contract-function
params = {
"request_type": "call_function",
"finality": "final",
"account_id": "dev-1588039999690",
"method_name": "get_num",
"args_base64": "e30="
}
legacy_resp = json_rpc('query', params, legacy_url)
split_resp = json_rpc('query', params, split_url)
if legacy_resp['result']['result'] != split_resp['result']['result']:
logger.error(
f'View call check failed, the legacy response and the split response are different'
f'\nlegacy response\n{legacy_resp}'
f'\nsplit response\n{split_resp}')
sys.exit(1)
# Query gcp for the archive nodes, pick a random one and return its url.
def get_random_legacy_url(chain_id):
cmd = [
'gcloud',
'compute',
'instances',
'list',
]
logger.info(" ".join(cmd))
result = subprocess.run(cmd, text=True, capture_output=True)
stdout = result.stdout
lines = stdout.split('\n')
pattern = f'{chain_id}-rpc-archive-public'
lines = list(filter(lambda line: pattern in line, lines))
line = random.choice(lines)
tokens = line.split()
external_ip = tokens[4]
logger.info(f'Selected random legacy node - {external_ip}')
return f'http://{external_ip}:3030'
def main():
start_time = datetime.now()
parser = argparse.ArgumentParser(
description='Run a prober for split archival nodes')
parser.add_argument('--chain-id', required=True, type=str)
parser.add_argument('--split-url', required=True, type=str)
parser.add_argument('--duration-ms', default=2000, type=int)
parser.add_argument('--log-level', default="INFO")
args = parser.parse_args()
logger.setLevel(args.log_level)
# log an empty line for cloudprober nice formatting
logger.info('')
logger.info('Running Prober Split')
legacy_url = get_random_legacy_url(args.chain_id)
split_url = args.split_url
duration = timedelta(milliseconds=args.duration_ms)
genesis_height = check_genesis(legacy_url, split_url)
head = check_head(legacy_url, split_url, genesis_height)
logger.info(f'The genesis height is {genesis_height}.')
logger.info(f'The head height is {head}')
METHOD_NAME(legacy_url, split_url)
# Verify multiple heights - optimization to allow the prober to verify
# multiple heights in a single run.
count = 0
none_count = 0
while True:
# Pick a random number and then check the block and chunks at that height.
height = random.randint(genesis_height, head)
block = check_blocks(legacy_url, split_url, height)
check_chunks(legacy_url, split_url, block)
count += 1
none_count += block is None
current_time = datetime.now()
current_duration = current_time - start_time
if current_duration >= duration:
break
time.sleep(0.200)
logger.info(
f"Success. Validated {count} blocks. There were {none_count} missing blocks."
)
if __name__ == '__main__':
main()
|
3,464 |
get option value from boolean
|
from __future__ import annotations
from collections import namedtuple
from typing import Any, Iterable, Mapping, Optional, Union
from sentry.models.user import User
from sentry.notifications.types import (
FineTuningAPIKey,
NotificationScopeType,
NotificationSettingOptionValues,
NotificationSettingTypes,
UserOptionsSettingsKey,
)
from sentry.services.hybrid_cloud.user.model import RpcUser
LegacyUserOptionClone = namedtuple(
"LegacyUserOptionClone",
[
"user",
"project_id",
"organization_id",
"key",
"value",
],
)
USER_OPTION_SETTINGS = {
UserOptionsSettingsKey.DEPLOY: {
"key": "deploy-emails",
"default": "3",
"type": int,
},
UserOptionsSettingsKey.SELF_ACTIVITY: {
"key": "self_notifications",
"default": "0",
"type": bool,
},
UserOptionsSettingsKey.SELF_ASSIGN: {
"key": "self_assign_issue",
"default": "0",
"type": bool,
},
UserOptionsSettingsKey.SUBSCRIBE_BY_DEFAULT: {
"key": "subscribe_by_default",
"default": "1",
"type": bool,
},
UserOptionsSettingsKey.WORKFLOW: {
"key": "workflow:notifications",
"default": "1",
"type": int,
},
}
KEYS_TO_LEGACY_KEYS = {
NotificationSettingTypes.DEPLOY: "deploy-emails",
NotificationSettingTypes.ISSUE_ALERTS: "mail:alert",
NotificationSettingTypes.WORKFLOW: "workflow:notifications",
}
KEY_VALUE_TO_LEGACY_VALUE = {
NotificationSettingTypes.DEPLOY: {
NotificationSettingOptionValues.ALWAYS: 2,
NotificationSettingOptionValues.COMMITTED_ONLY: 3,
NotificationSettingOptionValues.NEVER: 4,
},
NotificationSettingTypes.ISSUE_ALERTS: {
NotificationSettingOptionValues.ALWAYS: 1,
NotificationSettingOptionValues.NEVER: 0,
},
NotificationSettingTypes.WORKFLOW: {
NotificationSettingOptionValues.ALWAYS: 0,
NotificationSettingOptionValues.SUBSCRIBE_ONLY: 1,
NotificationSettingOptionValues.NEVER: 2,
},
}
LEGACY_VALUE_TO_KEY = {
NotificationSettingTypes.DEPLOY: {
-1: NotificationSettingOptionValues.DEFAULT,
2: NotificationSettingOptionValues.ALWAYS,
3: NotificationSettingOptionValues.COMMITTED_ONLY,
4: NotificationSettingOptionValues.NEVER,
},
NotificationSettingTypes.ISSUE_ALERTS: {
-1: NotificationSettingOptionValues.DEFAULT,
0: NotificationSettingOptionValues.NEVER,
1: NotificationSettingOptionValues.ALWAYS,
},
NotificationSettingTypes.WORKFLOW: {
-1: NotificationSettingOptionValues.DEFAULT,
0: NotificationSettingOptionValues.ALWAYS,
1: NotificationSettingOptionValues.SUBSCRIBE_ONLY,
2: NotificationSettingOptionValues.NEVER,
},
}
def get_legacy_key(type: NotificationSettingTypes, scope_type: NotificationScopeType) -> str | None:
"""Temporary mapping from new enum types to legacy strings."""
if scope_type == NotificationScopeType.USER and type == NotificationSettingTypes.ISSUE_ALERTS:
return "subscribe_by_default"
return KEYS_TO_LEGACY_KEYS.get(type)
def get_legacy_value(type: NotificationSettingTypes, value: NotificationSettingOptionValues) -> str:
"""
Temporary mapping from new enum types to legacy strings. Each type has a separate mapping.
"""
return str(KEY_VALUE_TO_LEGACY_VALUE.get(type, {}).get(value))
def METHOD_NAME(value: bool) -> NotificationSettingOptionValues:
if value:
return NotificationSettingOptionValues.ALWAYS
else:
return NotificationSettingOptionValues.NEVER
def get_option_value_from_int(
type: NotificationSettingTypes, value: int
) -> NotificationSettingOptionValues | None:
return LEGACY_VALUE_TO_KEY.get(type, {}).get(value)
def get_type_from_fine_tuning_key(key: FineTuningAPIKey) -> NotificationSettingTypes | None:
return {
FineTuningAPIKey.ALERTS: NotificationSettingTypes.ISSUE_ALERTS,
FineTuningAPIKey.DEPLOY: NotificationSettingTypes.DEPLOY,
FineTuningAPIKey.WORKFLOW: NotificationSettingTypes.WORKFLOW,
}.get(key)
def get_type_from_user_option_settings_key(
key: UserOptionsSettingsKey,
) -> NotificationSettingTypes | None:
return {
UserOptionsSettingsKey.DEPLOY: NotificationSettingTypes.DEPLOY,
UserOptionsSettingsKey.WORKFLOW: NotificationSettingTypes.WORKFLOW,
UserOptionsSettingsKey.SUBSCRIBE_BY_DEFAULT: NotificationSettingTypes.ISSUE_ALERTS,
}.get(key)
def get_key_from_legacy(key: str) -> NotificationSettingTypes | None:
return {
"deploy-emails": NotificationSettingTypes.DEPLOY,
"mail:alert": NotificationSettingTypes.ISSUE_ALERTS,
"subscribe_by_default": NotificationSettingTypes.ISSUE_ALERTS,
"workflow:notifications": NotificationSettingTypes.WORKFLOW,
}.get(key)
def get_key_value_from_legacy(
key: str, value: Any
) -> tuple[NotificationSettingTypes | None, NotificationSettingOptionValues | None]:
type = get_key_from_legacy(key)
if type not in LEGACY_VALUE_TO_KEY:
return None, None
option_value = LEGACY_VALUE_TO_KEY.get(type, {}).get(int(value))
return type, option_value
def get_legacy_object(
notification_setting: Any,
user_mapping: Optional[Mapping[int, Union[User, RpcUser]]] = None,
) -> Any:
type = NotificationSettingTypes(notification_setting.type)
value = NotificationSettingOptionValues(notification_setting.value)
scope_type = NotificationScopeType(notification_setting.scope_type)
key = get_legacy_key(type, scope_type)
data = {
"key": key,
"value": get_legacy_value(type, value),
"user": user_mapping.get(notification_setting.user_id) if user_mapping else None,
"project_id": None,
"organization_id": None,
}
if scope_type == NotificationScopeType.PROJECT:
data["project_id"] = notification_setting.scope_identifier
if scope_type == NotificationScopeType.ORGANIZATION:
data["organization_id"] = notification_setting.scope_identifier
return LegacyUserOptionClone(**data)
def map_notification_settings_to_legacy(
notification_settings: Iterable[Any],
user_mapping: Mapping[int, Union[User, RpcUser]],
) -> list[Any]:
"""A hack for legacy serializers. Pretend a list of NotificationSettings is a list of UserOptions."""
return [
get_legacy_object(notification_setting, user_mapping)
for notification_setting in notification_settings
]
def get_parent_mappings(
notification_settings: Iterable[Any],
) -> tuple[Mapping[int, Any], Mapping[int, Any]]:
"""Prefetch a list of Project or Organization objects for the Serializer."""
from sentry.models.organization import Organization
from sentry.models.project import Project
project_ids = []
organization_ids = []
for notification_setting in notification_settings:
if notification_setting.scope_type == NotificationScopeType.PROJECT.value:
project_ids.append(notification_setting.scope_identifier)
if notification_setting.scope_type == NotificationScopeType.ORGANIZATION.value:
organization_ids.append(notification_setting.scope_identifier)
projects = Project.objects.filter(id__in=project_ids)
organizations = Organization.objects.filter(id__in=organization_ids)
project_mapping = {project.id: project for project in projects}
organization_mapping = {organization.id: organization for organization in organizations}
return project_mapping, organization_mapping
|
3,465 |
test add repeated item
|
import pytest
from reconcile.utils.aggregated_list import (
AggregatedDiffRunner,
AggregatedList,
)
class TestAggregatedList:
@staticmethod
def test_add_item():
alist = AggregatedList()
params = {"a": 1, "b": 2}
items = ["qwerty"]
alist.add(params, items)
assert len(alist.dump()) == 1
assert alist.dump()[0]["items"] == items
assert alist.dump()[0]["params"] == params
@staticmethod
def METHOD_NAME():
alist = AggregatedList()
params = {"a": 1, "b": 2}
item = "qwerty"
items = [item, item]
alist.add(params, items)
assert len(alist.dump()) == 1
assert alist.dump()[0]["items"] == [item]
assert alist.dump()[0]["params"] == params
@staticmethod
def test_add_different_params():
alist = AggregatedList()
params1 = {"b": 1, "a": 2}
items1 = ["qwerty1"]
params2 = {"a": 1, "b": 3}
items2 = ["qwerty2"]
alist.add(params1, items1)
alist.add(params2, items2)
assert len(alist.dump()) == 2
hp1 = AggregatedList.hash_params(params1)
hp2 = AggregatedList.hash_params(params2)
assert alist.get_by_params_hash(hp1)["items"] == items1
assert alist.get_by_params_hash(hp2)["items"] == items2
@staticmethod
def test_get_py_params_hash():
alist = AggregatedList()
params1 = {"a": 1, "b": 2, "c": 3}
params2 = {"b": 2, "c": 3, "a": 1}
params3 = {"c": 3, "a": 1, "b": 2}
params4 = {"a": 1, "c": 3, "b": 2}
params5 = {"a": 1}
items1 = ["qwerty1"]
items2 = ["qwerty2"]
alist.add(params1, items1)
alist.add(params2, items1)
alist.add(params3, items1)
alist.add(params4, items1)
alist.add(params5, items2)
hp1 = AggregatedList.hash_params(params1)
hp2 = AggregatedList.hash_params(params2)
hp3 = AggregatedList.hash_params(params3)
hp4 = AggregatedList.hash_params(params4)
hp5 = AggregatedList.hash_params(params5)
assert hp1 == hp2
assert hp1 == hp2
assert hp1 == hp3
assert hp1 == hp4
assert hp1 != hp5
assert alist.get_by_params_hash(hp1)["items"] == items1
assert alist.get_by_params_hash(hp5)["items"] == items2
@staticmethod
def test_diff_insert():
left = AggregatedList()
right = AggregatedList()
right.add({"a": 1}, ["qwerty"])
diff = left.diff(right)
assert not diff["delete"]
assert not diff["update-insert"]
assert not diff["update-delete"]
assert diff["insert"] == [{"params": {"a": 1}, "items": ["qwerty"]}]
@staticmethod
def test_diff_delete():
left = AggregatedList()
right = AggregatedList()
left.add({"a": 1}, ["qwerty"])
diff = left.diff(right)
assert not diff["insert"]
assert not diff["update-insert"]
assert not diff["update-delete"]
assert diff["delete"] == [{"params": {"a": 1}, "items": ["qwerty"]}]
@staticmethod
def test_diff_update_insert():
left = AggregatedList()
right = AggregatedList()
left.add({"a": 1}, ["qwerty1"])
right.add({"a": 1}, ["qwerty1", "qwerty2"])
diff = left.diff(right)
assert not diff["insert"]
assert not diff["delete"]
assert not diff["update-delete"]
assert diff["update-insert"] == [{"items": ["qwerty2"], "params": {"a": 1}}]
@staticmethod
def test_diff_update_delete():
left = AggregatedList()
right = AggregatedList()
left.add({"a": 1}, ["qwerty1", "qwerty2"])
right.add({"a": 1}, ["qwerty1"])
diff = left.diff(right)
assert diff["insert"] == []
assert diff["delete"] == []
assert not diff["update-insert"]
assert diff["update-delete"] == [{"items": ["qwerty2"], "params": {"a": 1}}]
class TestAggregatedDiffRunner:
@staticmethod
def test_run():
left = AggregatedList()
right = AggregatedList()
# test insert
right.add({"on": "insert"}, ["i"])
# test delete
left.add({"on": "delete"}, ["d"])
# test update-insert
left.add({"on": "update-insert"}, ["ui1"])
right.add({"on": "update-insert"}, ["ui1", "ui2"])
# test update-delete
left.add({"on": "update-delete"}, ["ud1", "ud2"])
right.add({"on": "update-delete"}, ["ud1"])
on_insert = []
on_delete = []
on_update_insert = []
on_update_delete = []
def recorder(ls):
return lambda p, i: ls.append([p, i])
runner = AggregatedDiffRunner(left.diff(right))
runner.register("insert", recorder(on_insert))
runner.register("delete", recorder(on_delete))
runner.register("update-insert", recorder(on_update_insert))
runner.register("update-delete", recorder(on_update_delete))
runner.run()
assert on_insert == [[{"on": "insert"}, ["i"]]]
assert on_delete == [[{"on": "delete"}, ["d"]]]
assert on_update_insert == [[{"on": "update-insert"}, ["ui2"]]]
assert on_update_delete == [[{"on": "update-delete"}, ["ud2"]]]
@staticmethod
def test_run_cond_true():
left = AggregatedList()
right = AggregatedList()
right.add({"on": "insert"}, ["qwerty"])
runner = AggregatedDiffRunner(left.diff(right))
recorder = []
runner.register("insert", lambda p, i: recorder.append("True"), lambda p: True)
runner.run()
assert recorder == ["True"]
@staticmethod
def test_run_cond_false():
left = AggregatedList()
right = AggregatedList()
right.add({"on": "insert"}, ["qwerty"])
runner = AggregatedDiffRunner(left.diff(right))
recorder = []
runner.register("insert", lambda p, i: recorder.append("True"), lambda p: False)
runner.run()
assert not recorder
@staticmethod
def test_unknown_diff_on():
left = AggregatedList()
right = AggregatedList()
runner = AggregatedDiffRunner(left.diff(right))
with pytest.raises(Exception):
runner.register("qwerty", lambda p, i: True, lambda p: True)
|
3,466 |
parse args
|
#!/usr/bin/python3
#
# Copyright (C) 2019 Mario Limonciello <[email protected]>
#
# SPDX-License-Identifier: LGPL-2.1+
import dbus
import os.path
import sys
import tempfile
import gi
try:
gi.require_version("Fwupd", "2.0")
except ValueError:
print("Missing gobject-introspection packages. Try to install gir1.2-fwupd-2.0.")
sys.exit(1)
from gi.repository import Fwupd # pylint: disable=wrong-import-position
from simple_client import get_daemon_property, install, check_exists, modify_config
from add_capsule_header import add_header
from firmware_packager import make_firmware_metainfo, create_firmware_cab
class Variables:
def __init__(self, device_guid, version):
self.device_guid = device_guid
self.developer_name = "Dell Inc"
self.firmware_name = "New firmware"
self.firmware_summary = "Unknown"
self.firmware_description = "Unknown"
self.firmware_homepage = "https://support.dell.com"
self.contact_info = "Unknown"
self.release_version = version
self.release_description = "Unknown"
self.update_protocol = "org.uefi.capsule"
self.version_format = "dell-bios"
def METHOD_NAME():
"""Parse arguments for this client"""
import argparse
parser = argparse.ArgumentParser(description="Interact with fwupd daemon")
parser.add_argument("exe", nargs="?", help="exe file")
parser.add_argument("deviceid", nargs="?", help="DeviceID to operate on(optional)")
args = parser.METHOD_NAME()
return args
def generate_cab(infile, directory, guid, version):
output = os.path.join(directory, "firmware.bin")
ret = add_header(infile, output, guid)
if ret:
sys.exit(ret)
variables = Variables(guid, version)
make_firmware_metainfo(variables, directory)
create_firmware_cab(variables, directory)
cab = os.path.join(directory, "firmware.cab")
print("Generated CAB file %s" % cab)
return cab
def find_uefi_device(client, deviceid):
devices = client.get_devices()
for item in devices:
# match the device we were given
if deviceid:
if item.get_id() != deviceid:
continue
# internal
if not item.has_flag(1 << 0):
continue
# needs reboot
if not item.has_flag(1 << 8):
continue
# return the first hit for UEFI plugin
if item.get_plugin() == "uefi" or item.get_plugin() == "uefi_capsule":
print("Installing to %s" % item.get_name())
return item.get_guid_default(), item.get_id(), item.get_version()
print("Couldn't find any UEFI devices")
sys.exit(1)
def set_conf_only_trusted(client, setval):
prop = "OnlyTrusted"
current_val = get_daemon_property(prop)
if current_val:
pass
elif setval:
pass
else:
return False
modify_config(client, prop, str(setval).lower())
return get_daemon_property(prop) == setval
def prompt_reboot():
print("An update requires a reboot to complete")
while True:
res = input("Restart now? (Y/N) ")
if res.lower() == "n":
print("Reboot your machine manually to finish the update.")
break
if res.lower() != "y":
continue
# reboot using logind
obj = dbus.SystemBus().get_object(
"org.freedesktop.login1", "/org/freedesktop/login1"
)
obj.Reboot(True, dbus_interface="org.freedesktop.login1.Manager")
if __name__ == "__main__":
ARGS = METHOD_NAME()
CLIENT = Fwupd.Client()
check_exists(ARGS.exe)
try:
is_restore_required = set_conf_only_trusted(CLIENT, False)
directory = tempfile.mkdtemp()
guid, deviceid, version = find_uefi_device(CLIENT, ARGS.deviceid)
cab = generate_cab(ARGS.exe, directory, guid, version)
install(CLIENT, cab, deviceid, True, True)
except Exception as e:
print(e)
if is_restore_required:
set_conf_only_trusted(CLIENT, True)
prompt_reboot()
|
3,467 |
on validation end
|
import warnings
from packaging import version
import optuna
from optuna.storages._cached_storage import _CachedStorage
from optuna.storages._rdb.storage import RDBStorage
# Define key names of `Trial.system_attrs`.
_EPOCH_KEY = "ddp_pl:epoch"
_INTERMEDIATE_VALUE = "ddp_pl:intermediate_value"
_PRUNED_KEY = "ddp_pl:pruned"
with optuna._imports.try_import() as _imports:
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback
if not _imports.is_successful():
Callback = object # type: ignore[assignment, misc] # NOQA[F811]
LightningModule = object # type: ignore[assignment, misc] # NOQA[F811]
Trainer = object # type: ignore[assignment, misc] # NOQA[F811]
class PyTorchLightningPruningCallback(Callback):
"""PyTorch Lightning callback to prune unpromising trials.
See `the example <https://github.com/optuna/optuna-examples/blob/
main/pytorch/pytorch_lightning_simple.py>`__
if you want to add a pruning callback which observes accuracy.
Args:
trial:
A :class:`~optuna.trial.Trial` corresponding to the current evaluation of the
objective function.
monitor:
An evaluation metric for pruning, e.g., ``val_loss`` or
``val_acc``. The metrics are obtained from the returned dictionaries from e.g.
``pytorch_lightning.LightningModule.training_step`` or
``pytorch_lightning.LightningModule.validation_epoch_end`` and the names thus depend on
how this dictionary is formatted.
.. note::
For the distributed data parallel training, the version of PyTorchLightning needs to be
higher than or equal to v1.6.0. In addition, :class:`~optuna.study.Study` should be
instantiated with RDB storage.
.. note::
If you would like to use PyTorchLightningPruningCallback in a distributed training
environment, you need to evoke `PyTorchLightningPruningCallback.check_pruned()`
manually so that :class:`~optuna.exceptions.TrialPruned` is properly handled.
"""
def __init__(self, trial: optuna.trial.Trial, monitor: str) -> None:
_imports.check()
super().__init__()
self._trial = trial
self.monitor = monitor
self.is_ddp_backend = False
def on_fit_start(self, trainer: Trainer, pl_module: "pl.LightningModule") -> None:
self.is_ddp_backend = trainer._accelerator_connector.is_distributed
if self.is_ddp_backend:
if version.parse(pl.__version__) < version.parse( # type: ignore[attr-defined]
"1.6.0"
):
raise ValueError("PyTorch Lightning>=1.6.0 is required in DDP.")
# If it were not for this block, fitting is started even if unsupported storage
# is used. Note that the ValueError is transformed into ProcessRaisedException inside
# torch.
if not (
isinstance(self._trial.study._storage, _CachedStorage)
and isinstance(self._trial.study._storage._backend, RDBStorage)
):
raise ValueError(
"optuna.integration.PyTorchLightningPruningCallback"
" supports only optuna.storages.RDBStorage in DDP."
)
# It is necessary to store intermediate values directly in the backend storage because
# they are not properly propagated to main process due to cached storage.
# TODO(Shinichi) Remove intermediate_values from system_attr after PR #4431 is merged.
if trainer.is_global_zero:
self._trial.storage.set_trial_system_attr(
self._trial._trial_id,
_INTERMEDIATE_VALUE,
dict(),
)
def METHOD_NAME(self, trainer: Trainer, pl_module: LightningModule) -> None:
# Trainer calls `on_validation_end` for sanity check. Therefore, it is necessary to avoid
# calling `trial.report` multiple times at epoch 0. For more details, see
# https://github.com/PyTorchLightning/pytorch-lightning/issues/1391.
if trainer.sanity_checking:
return
current_score = trainer.callback_metrics.get(self.monitor)
if current_score is None:
message = (
f"The metric '{self.monitor}' is not in the evaluation logs for pruning. "
"Please make sure you set the correct metric name."
)
warnings.warn(message)
return
epoch = pl_module.current_epoch
should_stop = False
# Determine if the trial should be terminated in a single process.
if not self.is_ddp_backend:
self._trial.report(current_score.item(), step=epoch)
if not self._trial.should_prune():
return
raise optuna.TrialPruned(f"Trial was pruned at epoch {epoch}.")
# Determine if the trial should be terminated in a DDP.
if trainer.is_global_zero:
self._trial.report(current_score.item(), step=epoch)
should_stop = self._trial.should_prune()
# Update intermediate value in the storage.
_trial_id = self._trial._trial_id
_study = self._trial.study
_trial_system_attrs = _study._storage.get_trial_system_attrs(_trial_id)
intermediate_values = _trial_system_attrs.get(_INTERMEDIATE_VALUE)
intermediate_values[epoch] = current_score.item() # type: ignore[index]
self._trial.storage.set_trial_system_attr(
self._trial._trial_id, _INTERMEDIATE_VALUE, intermediate_values
)
# Terminate every process if any world process decides to stop.
should_stop = trainer.strategy.broadcast(should_stop)
trainer.should_stop = trainer.should_stop or should_stop
if not should_stop:
return
if trainer.is_global_zero:
# Update system_attr from global zero process.
self._trial.storage.set_trial_system_attr(self._trial._trial_id, _PRUNED_KEY, True)
self._trial.storage.set_trial_system_attr(self._trial._trial_id, _EPOCH_KEY, epoch)
def check_pruned(self) -> None:
"""Raise :class:`optuna.TrialPruned` manually if pruned.
Currently, ``intermediate_values`` are not properly propagated between processes due to
storage cache. Therefore, necessary information is kept in trial_system_attrs when the
trial runs in a distributed situation. Please call this method right after calling
``pytorch_lightning.Trainer.fit()``.
If a callback doesn't have any backend storage for DDP, this method does nothing.
"""
_trial_id = self._trial._trial_id
_study = self._trial.study
# Confirm if storage is not InMemory in case this method is called in a non-distributed
# situation by mistake.
if not isinstance(_study._storage, _CachedStorage):
return
_trial_system_attrs = _study._storage._backend.get_trial_system_attrs(_trial_id)
is_pruned = _trial_system_attrs.get(_PRUNED_KEY)
intermediate_values = _trial_system_attrs.get(_INTERMEDIATE_VALUE)
# Confirm if DDP backend is used in case this method is called from a non-DDP situation by
# mistake.
if intermediate_values is None:
return
for epoch, score in intermediate_values.items():
self._trial.report(score, step=int(epoch))
if is_pruned:
epoch = _trial_system_attrs.get(_EPOCH_KEY)
raise optuna.TrialPruned(f"Trial was pruned at epoch {epoch}.")
|
3,468 |
batches
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from enum import IntEnum
from sawtooth_validator.protobuf.block_pb2 import BlockHeader
NULL_BLOCK_IDENTIFIER = "0000000000000000"
class BlockStatus(IntEnum):
"""
The status of a block as the journal is concerned.
"""
# Block is present but not yet validated
Unknown = 0
# Block failed block validation.
Invalid = 1
# Block has been validated and id valid to appear in a chain.
Valid = 2
# We know about the block, possibly by a successor, but we do not have it.
Missing = 3
# The block is currently being validated
InValidation = 4
class BlockWrapper:
"""
Utility class to make accessing block members more convenient.
This also add storage of the weight and status used by the Journal
components to track the state of a block. This is the object type
stored in the Block Cache.
"""
def __init__(self, block):
self.block = block
self._block_header = None
@staticmethod
def wrap(block):
if isinstance(block, BlockWrapper):
return block
return BlockWrapper(block)
@property
def METHOD_NAME(self):
"""
Returns the consensus object of the block.
"""
return self.block.METHOD_NAME
@property
def consensus(self):
"""
Returns the consensus object of the block.
"""
return self.header.consensus
def get_block(self):
"""
Return the wrapped block object.
"""
return self.block
@property
def header(self):
"""
Returns the header of the block
"""
if self._block_header is None:
self._block_header = BlockHeader()
self._block_header.ParseFromString(self.block.header)
return self._block_header
@property
def header_signature(self):
"""
Returns the header signature of the block
"""
return self.block.header_signature
@property
def identifier(self):
"""
Returns the identifier of the block, currently the
header signature
"""
return self.block.header_signature
@property
def block_num(self):
"""
Returns the depth or block_number
"""
return self.header.block_num
@property
def state_root_hash(self):
"""
Returns the state root hash
"""
return self.header.state_root_hash
@property
def previous_block_id(self):
"""
Returns the identifier of the previous block.
"""
return self.header.previous_block_id
@property
def signer_public_key(self):
return self.header.signer_public_key
@staticmethod
def state_view_for_block(block_wrapper, state_view_factory):
"""
Returns the state view for an arbitrary block.
Args:
block_wrapper (BlockWrapper): The block for which a state
view is to be returned
state_view_factory (StateViewFactory): The state view factory
used to create the StateView object
Returns:
StateView object associated with the block
"""
state_root_hash = \
block_wrapper.state_root_hash \
if block_wrapper is not None else None
return state_view_factory.create_view(state_root_hash)
def get_state_view(self, state_view_factory):
"""
Returns the state view associated with this block
Args:
state_view_factory (StateViewFactory): The state view factory
used to create the StateView object
Returns:
StateView object
"""
return BlockWrapper.state_view_for_block(self, state_view_factory)
@staticmethod
def settings_view_for_block(block_wrapper, settings_view_factory):
"""
Returns the settings view for an arbitrary block.
Args:
block_wrapper (BlockWrapper): The block for which a settings
view is to be returned
settings_view_factory (SettingsViewFactory): The settings
view factory used to create the SettingsView object
Returns:
SettingsView object associated with the block
"""
state_root_hash = \
block_wrapper.state_root_hash \
if block_wrapper is not None else None
return settings_view_factory.create_settings_view(state_root_hash)
def get_settings_view(self, settings_view_factory):
"""
Returns the settings view associated with this block
Args:
settings_view_factory (SettingsViewFactory): The settings
view factory used to create the SettingsView object
Returns:
SettingsView object
"""
return BlockWrapper.settings_view_for_block(
self, settings_view_factory)
def __repr__(self):
return "{}({}, S:{}, P:{})". \
format(self.identifier, self.block_num,
self.state_root_hash, self.previous_block_id)
def __str__(self):
return "{} (block_num:{}, state:{}, previous_block_id:{})".format(
self.identifier,
self.block_num,
self.state_root_hash,
self.previous_block_id,
)
|
3,469 |
is preempt requested
|
# The Notices and Disclaimers for Ocean Worlds Autonomy Testbed for Exploration
# Research and Simulation can be found in README.md in the root directory of
# this repository.
import rospy
import actionlib
from abc import ABC, abstractmethod
class ActionServerBase(ABC):
"""A base class that standardizes handling of action goals, results, feedback,
and logging for all OceanWATERS actions.
"""
def __init__(self):
self._server = actionlib.SimpleActionServer(
self.name,
self.action_type,
execute_cb = self.__on_action_called,
auto_start = False
)
"""The string the action server is registered under. Must be overridden!"""
@property
@abstractmethod
def name(self):
pass
"""The following *_type properties are auto-generated from the *.action file
and made for import in the corresponding package. All must be overridden!
"""
@property
@abstractmethod
def action_type(self):
pass
@property
@abstractmethod
def goal_type(self):
pass
@property
@abstractmethod
def feedback_type(self):
pass
@property
@abstractmethod
def result_type(self):
pass
@abstractmethod
def execute_action(self, goal):
"""Called whenever the action is called. Must be overridden!"""
pass
def _start_server(self):
"""Child class calls this after it has initialized its data members."""
self._server.start()
def METHOD_NAME(self):
"""Check if a preempt has been requested."""
return self._server.is_preempt_requested()
def _publish_feedback(self, **kwargs):
"""Publish action feedback during execution of the action. This is not
required if action has an empty feedback type.
kwargs -- Keyword arguments that match fields in feedback_type.
"""
try:
feedback = self.feedback_type(**kwargs)
except AttributeError as err:
rospy.logerr_once(err)
self._server.publish_feedback(feedback)
def _set_succeeded(self, msg, **kwargs):
"""Declare action succeeded, and publish its result.
msg -- Message that explains why success occurred. It is logged to ROS
info and passed to action clients.
kwargs -- Keyword arguments that match fields in result_type. Leave empty
if result type is empty.
"""
rospy.loginfo(self.__format_result_msg(f"{self.name}: Succeeded", msg))
result, msg = self.__create_result(msg, **kwargs)
self._server.set_succeeded(result, msg)
def _set_preempted(self, msg, **kwargs):
"""Declare action was preempted, and publish its results.
msg -- Message that explains why action was preempted. It is logged to
ROS info and passed to action clients.
kwargs -- Keyword arguments that match fields in result_type. Leave empty
if result type is empty or no results were produced before
preempt.
"""
rospy.loginfo(self.__format_result_msg(f"{self.name}: Preempted", msg))
result, msg = self.__create_result(msg, **kwargs)
self._server.set_preempted(result, msg)
def _set_aborted(self, msg, **kwargs):
"""Declare action was aborted, and publish its results.
msg -- Message that explains why action was aborted. It is logged to
ROS error and passed to action clients.
kwargs -- Keyword arguments that match fields in result_type. Leave empty
if result type is empty or no results were produced before abort.
"""
rospy.logerr(self.__format_result_msg(f"{self.name}: Aborted", msg))
result, msg = self.__create_result(msg, **kwargs)
self._server.set_aborted(result, msg)
def __on_action_called(self, goal):
if not isinstance(goal, self.goal_type):
rospy.logerr("Action server passed an unexpected action goal type." \
"This should never happen!")
return
rospy.loginfo(f"{self.name} action started")
self.execute_action(goal)
rospy.loginfo(f"{self.name} action complete")
def __create_result(self, msg, **kwargs):
result = self.result_type()
try:
result = self.result_type(**kwargs)
except AttributeError as err:
attribute_err = f"{err}; an empty action result will be published"
rospy.logerr(attribute_err)
# append the error to pre-existing message so action clients have all the
# information about what happened
msg += "\n" + attribute_err
return result, msg
@staticmethod
def __format_result_msg(prefix, msg=""):
if msg == "":
return prefix
else:
return f"{prefix} - {msg}"
|
3,470 |
import capsulebv
|
"""This script contains classes to import collision objects."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2020, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from generated.formats.nif import classes as NifClasses
from io_scene_niftools.modules.nif_import.collision import Collision
from io_scene_niftools.modules.nif_import.object import Object
class Bound(Collision):
def import_bounding_volume(self, bounding_volume):
"""Imports a NiCollisionData's bounding_volume """
bvt = bounding_volume.collision_type
# sphere
if bvt == 0:
return self.import_spherebv(bounding_volume.sphere)
# box
elif bvt == 1:
return self.import_boxbv(bounding_volume.box)
# capsule
elif bvt == 2:
return self.METHOD_NAME(bounding_volume.capsule)
# union - a bundle
elif bvt == 4:
volumes = []
for sub_vol in bounding_volume.union.bounding_volumes:
volumes.extend(self.import_bounding_volume(sub_vol))
return volumes
# don't support 5 Half Space for now
return []
def import_bounding_box(self, n_block):
"""Import a NiNode's bounding box or attached BSBound extra data."""
if not n_block or not isinstance(n_block, NifClasses.NiNode):
return []
# we have a ninode with bounding box
if n_block.has_bounding_volume:
b_name = 'Bounding Volume'
if n_block.bounding_volume.collision_type == NifClasses.BoundVolumeType.BOX_BV:
# Ninode's bbox behaves like a seperate mesh.
# bounding_box center(n_block.bounding_box.translation) is relative to the bound_box
n_bl_trans = n_block.translation
n_bbox = n_block.bounding_volume.box
n_b_trans = n_bbox.translation
minx = n_b_trans.x - n_bl_trans.x - n_bbox.radius.x
miny = n_b_trans.y - n_bl_trans.y - n_bbox.radius.y
minz = n_b_trans.z - n_bl_trans.z - n_bbox.radius.z
maxx = n_b_trans.x - n_bl_trans.x + n_bbox.radius.x
maxy = n_b_trans.y - n_bl_trans.y + n_bbox.radius.y
maxz = n_b_trans.z - n_bl_trans.z + n_bbox.radius.z
bbox_center = n_b_trans.as_list()
else:
raise NotImplementedError("Non-box bounding volume are not yet supported.")
# we may still have a BSBound extra data attached to this node
else:
for n_extra in n_block.get_extra_datas():
# TODO [extra][data] Move to property processor
if isinstance(n_extra, NifClasses.BSBound):
b_name = 'BSBound'
center = n_extra.center
dims = n_extra.dimensions
minx = - dims.x
miny = - dims.y
minz = - dims.z
maxx = + dims.x
maxy = + dims.y
maxz = + dims.z
bbox_center = center.as_list()
break
# none was found
else:
return []
# create blender object
b_obj = Object.box_from_extents(b_name, minx, maxx, miny, maxy, minz, maxz)
# probably only on NiNodes with BB
if hasattr(n_block, "flags"):
b_obj.niftools.flags = n_block.flags
b_obj.location = bbox_center
self.set_b_collider(b_obj, radius=max(maxx, maxy, maxz))
return [b_obj, ]
def import_spherebv(self, sphere):
r = sphere.radius
c = sphere.center
b_obj = Object.box_from_extents("sphere", -r, r, -r, r, -r, r)
b_obj.location = (c.x, c.y, c.z)
self.set_b_collider(b_obj, bounds_type="SPHERE", display_type='SPHERE', radius=r)
return [b_obj]
def import_boxbv(self, box):
offset = box.center
# ignore for now, seems to be a unity 3x3 matrix
axes = box.axis
x, y, z = box.extent
b_obj = Object.box_from_extents("box", -x, x, -y, y, -z, z)
b_obj.location = (offset.x, offset.y, offset.z)
self.set_b_collider(b_obj, radius=(x + y + z) / 3)
return [b_obj]
def METHOD_NAME(self, capsule):
offset = capsule.center
# always a normalized vector
direction = capsule.origin
extent = capsule.extent
radius = capsule.radius
# positions of the box verts
minx = miny = -radius
maxx = maxy = +radius
minz = -(extent + 2 * radius) / 2
maxz = +(extent + 2 * radius) / 2
# create blender object
b_obj = Object.box_from_extents("capsule", minx, maxx, miny, maxy, minz, maxz)
# apply transform in local space
b_obj.matrix_local = self.center_origin_to_matrix(offset, direction)
self.set_b_collider(b_obj, bounds_type="CAPSULE", display_type="CAPSULE", radius=radius)
return [b_obj
|
3,471 |
test warn seed unnecessary
|
import pytest
import os
from rasa.shared.core.events import UserUttered, SessionStarted
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.core.domain import Domain
from rasa.shared.exceptions import RasaException
from rasa.core.evaluation.marker_tracker_loader import (
MarkerTrackerLoader,
STRATEGY_ALL,
STRATEGY_SAMPLE_N,
STRATEGY_FIRST_N,
)
from rasa.core.tracker_store import InMemoryTrackerStore, TrackerStore, SQLTrackerStore
@pytest.fixture
async def marker_trackerstore() -> TrackerStore:
"""Sets up a TrackerStore with 5 trackers in it."""
domain = Domain.empty()
store = InMemoryTrackerStore(domain)
for i in range(5):
tracker = DialogueStateTracker(str(i), None)
tracker.update_with_events([UserUttered(str(j)) for j in range(10)], domain)
await store.save(tracker)
return store
async def test_load_sessions(tmp_path):
"""Tests loading a tracker with multiple sessions."""
domain = Domain.empty()
store = SQLTrackerStore(domain, db=os.path.join(tmp_path, "temp.db"))
tracker = DialogueStateTracker("test123", None)
tracker.update_with_events(
[
UserUttered("0"),
UserUttered("1"),
SessionStarted(),
UserUttered("2"),
UserUttered("3"),
],
domain,
)
await store.save(tracker)
loader = MarkerTrackerLoader(store, STRATEGY_ALL)
result = [tracker async for tracker in loader.load()]
assert len(result) == 1 # contains only one tracker
assert len(result[0].events) == len(tracker.events)
async def test_load_sample(marker_trackerstore: TrackerStore):
"""Tests loading trackers using 'sample' strategy."""
loader = MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, 3)
result = [tracker async for tracker in loader.load()]
assert len(result) == 3
senders = set()
for item in result:
assert await marker_trackerstore.exists(item.sender_id)
assert item.sender_id not in senders
senders.add(item.sender_id)
async def test_load_sample_with_seed(marker_trackerstore: TrackerStore):
"""Tests loading trackers using 'sample' strategy with seed set."""
loader = MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, 3, seed=3)
result = [tracker async for tracker in loader.load()]
expected_ids = ["1", "4", "3"]
assert len(result) == 3
for item, expected in zip(result, expected_ids):
assert item.sender_id == expected
assert await marker_trackerstore.exists(item.sender_id)
async def test_load_first_n(marker_trackerstore: TrackerStore):
"""Tests loading trackers using 'first_n' strategy."""
loader = MarkerTrackerLoader(marker_trackerstore, STRATEGY_FIRST_N, 3)
result = [tracker async for tracker in loader.load()]
assert len(result) == 3
for item in result:
assert await marker_trackerstore.exists(item.sender_id)
async def test_load_all(marker_trackerstore: TrackerStore):
"""Tests loading trackers using 'all' strategy."""
loader = MarkerTrackerLoader(marker_trackerstore, STRATEGY_ALL)
result = [tracker async for tracker in loader.load()]
assert len(result) == len(list(await marker_trackerstore.keys()))
for item in result:
assert await marker_trackerstore.exists(item.sender_id)
def test_exception_invalid_strategy(marker_trackerstore: TrackerStore):
"""Tests an exception is thrown when an invalid strategy is used."""
with pytest.raises(RasaException):
MarkerTrackerLoader(marker_trackerstore, "summon")
def test_exception_no_count(marker_trackerstore: TrackerStore):
"""Tests an exception is thrown when no count is given for non-'all' strategies."""
with pytest.raises(RasaException):
MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N)
def test_exception_zero_count(marker_trackerstore: TrackerStore):
"""Tests an exception is thrown when an invalid count is given."""
with pytest.raises(RasaException):
MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, 0)
def test_exception_negative_count(marker_trackerstore: TrackerStore):
"""Tests an exception is thrown when an invalid count is given."""
with pytest.raises(RasaException):
MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, -1)
def METHOD_NAME(marker_trackerstore: TrackerStore):
"""Tests a warning is thrown when 'seed' is set for non-'sample' strategies."""
with pytest.warns(UserWarning):
MarkerTrackerLoader(marker_trackerstore, STRATEGY_FIRST_N, 3, seed=5)
def test_warn_count_all_unnecessary(marker_trackerstore: TrackerStore):
"""Tests a warning is thrown when 'count' is set for strategy 'all'."""
with pytest.warns(UserWarning):
MarkerTrackerLoader(marker_trackerstore, STRATEGY_ALL, 3)
async def test_warn_count_exceeds_store(marker_trackerstore: TrackerStore):
"""Tests a warning is thrown when 'count' is larger than the number of trackers."""
loader = MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, 6)
with pytest.warns(UserWarning):
# Need to force the generator to evaluate to produce the warning
[tracker async for tracker in loader.load()]
|
3,472 |
rank
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from .operations.events_operations import EventsOperations
from . import models
class PersonalizerClientConfiguration(Configuration):
"""Configuration for PersonalizerClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoint.
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = '{Endpoint}/personalizer/v1.0'
super(PersonalizerClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-cognitiveservices-personalizer/{}'.format(VERSION))
self.endpoint = endpoint
self.credentials = credentials
class PersonalizerClient(SDKClient):
"""Personalizer Service is an Azure Cognitive Service that makes it easy to target content and experiences without complex pre-analysis or cleanup of past data. Given a context and featurized content, the Personalizer Service returns your content in a ranked list. As rewards are sent in response to the ranked list, the reinforcement learning algorithm will improve the model and improve performance of future rank calls.
:ivar config: Configuration for client.
:vartype config: PersonalizerClientConfiguration
:ivar events: Events operations
:vartype events: azure.cognitiveservices.personalizer.operations.EventsOperations
:param endpoint: Supported Cognitive Services endpoint.
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
self.config = PersonalizerClientConfiguration(endpoint, credentials)
super(PersonalizerClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = 'v1.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.events = EventsOperations(
self._client, self.config, self._serialize, self._deserialize)
def METHOD_NAME(
self, rank_request, custom_headers=None, raw=False, **operation_config):
"""A Personalizer rank request.
:param rank_request: A Personalizer request.
:type rank_request:
~azure.cognitiveservices.personalizer.models.RankRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RankResponse or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.personalizer.models.RankResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.personalizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.METHOD_NAME.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(rank_request, 'RankRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RankResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
METHOD_NAME.metadata = {'url': '/rank'}
|
3,473 |
test rmsf gmx
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import biotite.structure as struc
import biotite.structure.io as strucio
import biotite.structure.io.mmtf as mmtf
from os.path import join
import numpy as np
import pytest
from ..util import data_dir
@pytest.fixture
def stack():
stack = struc.AtomArrayStack(depth=3, length=5)
stack.coord = np.arange(45).reshape((3,5,3))
return stack
@pytest.mark.parametrize("as_coord", [False, True])
def test_rmsd(stack, as_coord):
if as_coord:
stack = stack.coord
assert struc.rmsd(stack[0], stack).tolist() \
== pytest.approx([0.0, 25.98076211, 51.96152423])
assert struc.rmsd(stack[0], stack[1]) \
== pytest.approx(25.9807621135)
@pytest.mark.parametrize("as_coord", [False, True])
def test_rmsf(stack, as_coord):
if as_coord:
stack = stack.coord
assert struc.rmsf(struc.average(stack), stack).tolist() \
== pytest.approx([21.21320344] * 5)
@pytest.fixture
def load_stack_superimpose():
stack = strucio.load_structure(join(
data_dir("structure"), "1l2y.mmtf"
))
# Superimpose with first frame
bb_mask = struc.filter_peptide_backbone(stack[0])
supimp, _ = struc.superimpose(stack[0], stack, atom_mask=bb_mask)
return stack, supimp
def test_rmsd_gmx(load_stack_superimpose):
"""
Comparison of RMSD values computed with Biotite with results
obtained from GROMACS 2021.5.
"""
stack, supimp = load_stack_superimpose
rmsd = struc.rmsd(stack[0], supimp)/10
# Gromacs RMSDs -> Without mass-weighting:
# echo "Backbone Protein" | \
# gmx rms -s 1l2y.gro -f 1l2y.xtc -o rmsd.xvg -mw no
rmsd_gmx = np.array([
0.0005037, 0.1957698, 0.2119313, 0.2226127, 0.184382,
0.2210998, 0.2712815, 0.1372861, 0.2348654, 0.1848784,
0.1893576, 0.2500543, 0.1946374, 0.2101624, 0.2180645,
0.1836762, 0.1681345, 0.2363865, 0.2287371, 0.2546207,
0.1604872, 0.2167119, 0.2176063, 0.2069806, 0.2535706,
0.2682233, 0.2252388, 0.2419151, 0.2343987, 0.1902994,
0.2334525, 0.2010523, 0.215444, 0.1786632, 0.2652018,
0.174061, 0.2591569, 0.2602662
])
assert np.allclose(rmsd, rmsd_gmx, atol=1e-03)
def test_rmspd_gmx(load_stack_superimpose):
"""
Comparison of the RMSPD computed with Biotite with results
obtained from GROMACS 2021.5.
"""
stack, _ = load_stack_superimpose
rmspd = struc.rmspd(stack[0], stack)/10
# Gromacs RMSDist:
# echo "Protein" | \
# gmx rmsdist -f 1l2y.xtc -s 1l2y.gro -o rmsdist.xvg -sumh no -pbc no
rmspd_gmx = np.array([
0.000401147, 0.125482, 0.138913, 0.138847, 0.113917,
0.132915, 0.173084, 0.103089, 0.156309, 0.114694,
0.12964, 0.15875, 0.12876, 0.128983, 0.137031,
0.126059, 0.106726, 0.154244, 0.144405, 0.174041,
0.10417, 0.130936, 0.141216, 0.125559, 0.171342,
0.165306, 0.137616, 0.154447, 0.146337, 0.116433,
0.154976, 0.128477, 0.150537, 0.111494, 0.173234,
0.116638, 0.169524, 0.15953
])
assert np.allclose(rmspd, rmspd_gmx, atol=1e-03)
def METHOD_NAME(load_stack_superimpose):
"""
Comparison of RMSF values computed with Biotite with results
obtained from GROMACS 2021.5.
"""
stack, supimp = load_stack_superimpose
ca_mask = ((stack[0].atom_name == "CA") & (stack[0].element == "C"))
rmsf = struc.rmsf(struc.average(supimp[:, ca_mask]), supimp[:, ca_mask])/10
# Gromacs RMSF:
# echo "C-alpha" | gmx rmsf -s 1l2y.gro -f 1l2y.xtc -o rmsf.xvg -res
rmsf_gmx = np.array([
0.1379, 0.036, 0.0261, 0.0255, 0.029, 0.0204, 0.0199,
0.0317, 0.0365, 0.0249, 0.0269, 0.032, 0.0356, 0.0446,
0.059, 0.037, 0.0331, 0.0392, 0.0403, 0.0954
])
assert np.allclose(rmsf, rmsf_gmx, atol=1e-02
|
3,474 |
new
|
from sys import exit
import click
from click.types import convert_type
from flask.cli import AppGroup
from sqlalchemy.orm.exc import NoResultFound
from redash import models
from redash.query_runner import (
get_configuration_schema_for_query_runner_type,
query_runners,
)
from redash.utils import json_loads
from redash.utils.configuration import ConfigurationContainer
manager = AppGroup(help="Data sources management commands.")
@manager.command(name="list")
@click.option(
"--org",
"organization",
default=None,
help="The organization the user belongs to (leave blank for " "all organizations).",
)
def list_command(organization=None):
"""List currently configured data sources."""
if organization:
org = models.Organization.get_by_slug(organization)
data_sources = models.DataSource.query.filter(models.DataSource.org == org)
else:
data_sources = models.DataSource.query
for i, ds in enumerate(data_sources.order_by(models.DataSource.name)):
if i > 0:
print("-" * 20)
print("Id: {}\nName: {}\nType: {}\nOptions: {}".format(ds.id, ds.name, ds.type, ds.options.to_json()))
@manager.command(name="list_types")
def list_types():
print("Enabled Query Runners:")
types = sorted(query_runners.keys())
for query_runner_type in types:
print(query_runner_type)
print("Total of {}.".format(len(types)))
def validate_data_source_type(type):
if type not in query_runners.keys():
print(
'Error: the type "{}" is not supported (supported types: {}).'.format(
type, ", ".join(query_runners.keys())
)
)
print("OJNK")
exit(1)
@manager.command()
@click.argument("name")
@click.option(
"--org",
"organization",
default="default",
help="The organization the user belongs to " "(leave blank for 'default').",
)
def test(name, organization="default"):
"""Test connection to data source by issuing a trivial query."""
try:
org = models.Organization.get_by_slug(organization)
data_source = models.DataSource.query.filter(
models.DataSource.name == name, models.DataSource.org == org
).one()
print("Testing connection to data source: {} (id={})".format(name, data_source.id))
try:
data_source.query_runner.test_connection()
except Exception as e:
print("Failure: {}".format(e))
exit(1)
else:
print("Success")
except NoResultFound:
print("Couldn't find data source named: {}".format(name))
exit(1)
@manager.command()
@click.argument("name", default=None, required=False)
@click.option("--type", default=None, help="new type for the data source")
@click.option("--options", default=None, help="updated options for the data source")
@click.option(
"--org",
"organization",
default="default",
help="The organization the user belongs to (leave blank for " "'default').",
)
def METHOD_NAME(name=None, type=None, options=None, organization="default"):
"""Create new data source."""
if name is None:
name = click.prompt("Name")
if type is None:
print("Select type:")
for i, query_runner_name in enumerate(query_runners.keys()):
print("{}. {}".format(i + 1, query_runner_name))
idx = 0
while idx < 1 or idx > len(list(query_runners.keys())):
idx = click.prompt("[{}-{}]".format(1, len(query_runners.keys())), type=int)
type = list(query_runners.keys())[idx - 1]
else:
validate_data_source_type(type)
query_runner = query_runners[type]
schema = query_runner.configuration_schema()
if options is None:
types = {"string": str, "number": int, "boolean": bool}
options_obj = {}
for k, prop in schema["properties"].items():
required = k in schema.get("required", [])
default_value = "<<DEFAULT_VALUE>>"
if required:
default_value = None
prompt = prop.get("title", k.capitalize())
if required:
prompt = "{} (required)".format(prompt)
else:
prompt = "{} (optional)".format(prompt)
_type = types[prop["type"]]
def value_proc(value):
if value == default_value:
return default_value
return convert_type(_type, default_value)(value)
value = click.prompt(
prompt,
default=default_value,
type=_type,
show_default=False,
value_proc=value_proc,
)
if value != default_value:
options_obj[k] = value
options = ConfigurationContainer(options_obj, schema)
else:
options = ConfigurationContainer(json_loads(options), schema)
if not options.is_valid():
print("Error: invalid configuration.")
exit(1)
print("Creating {} data source ({}) with options:\n{}".format(type, name, options.to_json()))
data_source = models.DataSource.create_with_group(
name=name,
type=type,
options=options,
org=models.Organization.get_by_slug(organization),
)
models.db.session.commit()
print("Id: {}".format(data_source.id))
@manager.command()
@click.argument("name")
@click.option(
"--org",
"organization",
default="default",
help="The organization the user belongs to (leave blank for " "'default').",
)
def delete(name, organization="default"):
"""Delete data source by name."""
try:
org = models.Organization.get_by_slug(organization)
data_source = models.DataSource.query.filter(
models.DataSource.name == name, models.DataSource.org == org
).one()
print("Deleting data source: {} (id={})".format(name, data_source.id))
models.db.session.delete(data_source)
models.db.session.commit()
except NoResultFound:
print("Couldn't find data source named: {}".format(name))
exit(1)
def update_attr(obj, attr, new_value):
if new_value is not None:
old_value = getattr(obj, attr)
print("Updating {}: {} -> {}".format(attr, old_value, new_value))
setattr(obj, attr, new_value)
@manager.command()
@click.argument("name")
@click.option("--name", "new_name", default=None, help="new name for the data source")
@click.option("--options", default=None, help="updated options for the data source")
@click.option("--type", default=None, help="new type for the data source")
@click.option(
"--org",
"organization",
default="default",
help="The organization the user belongs to (leave blank for " "'default').",
)
def edit(name, new_name=None, options=None, type=None, organization="default"):
"""Edit data source settings (name, options, type)."""
try:
if type is not None:
validate_data_source_type(type)
org = models.Organization.get_by_slug(organization)
data_source = models.DataSource.query.filter(
models.DataSource.name == name, models.DataSource.org == org
).one()
update_attr(data_source, "name", new_name)
update_attr(data_source, "type", type)
if options is not None:
schema = get_configuration_schema_for_query_runner_type(data_source.type)
options = json_loads(options)
data_source.options.set_schema(schema)
data_source.options.update(options)
models.db.session.add(data_source)
models.db.session.commit()
except NoResultFound:
print("Couldn't find data source named: {}".format(name))
|
3,475 |
try resync autojump threshold
|
import time
from math import inf
from .. import _core
from .._abc import Clock
from .._util import Final
from ._run import GLOBAL_RUN_CONTEXT
################################################################
# The glorious MockClock
################################################################
# Prior art:
# https://twistedmatrix.com/documents/current/api/twisted.internet.task.Clock.html
# https://github.com/ztellman/manifold/issues/57
class MockClock(Clock, metaclass=Final):
"""A user-controllable clock suitable for writing tests.
Args:
rate (float): the initial :attr:`rate`.
autojump_threshold (float): the initial :attr:`autojump_threshold`.
.. attribute:: rate
How many seconds of clock time pass per second of real time. Default is
0.0, i.e. the clock only advances through manuals calls to :meth:`jump`
or when the :attr:`autojump_threshold` is triggered. You can assign to
this attribute to change it.
.. attribute:: autojump_threshold
The clock keeps an eye on the run loop, and if at any point it detects
that all tasks have been blocked for this many real seconds (i.e.,
according to the actual clock, not this clock), then the clock
automatically jumps ahead to the run loop's next scheduled
timeout. Default is :data:`math.inf`, i.e., to never autojump. You can
assign to this attribute to change it.
Basically the idea is that if you have code or tests that use sleeps
and timeouts, you can use this to make it run much faster, totally
automatically. (At least, as long as those sleeps/timeouts are
happening inside Trio; if your test involves talking to external
service and waiting for it to timeout then obviously we can't help you
there.)
You should set this to the smallest value that lets you reliably avoid
"false alarms" where some I/O is in flight (e.g. between two halves of
a socketpair) but the threshold gets triggered and time gets advanced
anyway. This will depend on the details of your tests and test
environment. If you aren't doing any I/O (like in our sleeping example
above) then just set it to zero, and the clock will jump whenever all
tasks are blocked.
.. note:: If you use ``autojump_threshold`` and
`wait_all_tasks_blocked` at the same time, then you might wonder how
they interact, since they both cause things to happen after the run
loop goes idle for some time. The answer is:
`wait_all_tasks_blocked` takes priority. If there's a task blocked
in `wait_all_tasks_blocked`, then the autojump feature treats that
as active task and does *not* jump the clock.
"""
def __init__(self, rate: float = 0.0, autojump_threshold: float = inf):
# when the real clock said 'real_base', the virtual time was
# 'virtual_base', and since then it's advanced at 'rate' virtual
# seconds per real second.
self._real_base = 0.0
self._virtual_base = 0.0
self._rate = 0.0
self._autojump_threshold = 0.0
# kept as an attribute so that our tests can monkeypatch it
self._real_clock = time.perf_counter
# use the property update logic to set initial values
self.rate = rate
self.autojump_threshold = autojump_threshold
def __repr__(self) -> str:
return "<MockClock, time={:.7f}, rate={} @ {:#x}>".format(
self.current_time(), self._rate, id(self)
)
@property
def rate(self) -> float:
return self._rate
@rate.setter
def rate(self, new_rate: float) -> None:
if new_rate < 0:
raise ValueError("rate must be >= 0")
else:
real = self._real_clock()
virtual = self._real_to_virtual(real)
self._virtual_base = virtual
self._real_base = real
self._rate = float(new_rate)
@property
def autojump_threshold(self) -> float:
return self._autojump_threshold
@autojump_threshold.setter
def autojump_threshold(self, new_autojump_threshold: float) -> None:
self._autojump_threshold = float(new_autojump_threshold)
self.METHOD_NAME()
# runner.clock_autojump_threshold is an internal API that isn't easily
# usable by custom third-party Clock objects. If you need access to this
# functionality, let us know, and we'll figure out how to make a public
# API. Discussion:
#
# https://github.com/python-trio/trio/issues/1587
def METHOD_NAME(self) -> None:
try:
runner = GLOBAL_RUN_CONTEXT.runner
if runner.is_guest:
runner.force_guest_tick_asap()
except AttributeError:
pass
else:
runner.clock_autojump_threshold = self._autojump_threshold
# Invoked by the run loop when runner.clock_autojump_threshold is
# exceeded.
def _autojump(self) -> None:
statistics = _core.current_statistics()
jump = statistics.seconds_to_next_deadline
if 0 < jump < inf:
self.jump(jump)
def _real_to_virtual(self, real: float) -> float:
real_offset = real - self._real_base
virtual_offset = self._rate * real_offset
return self._virtual_base + virtual_offset
def start_clock(self) -> None:
self.METHOD_NAME()
def current_time(self) -> float:
return self._real_to_virtual(self._real_clock())
def deadline_to_sleep_time(self, deadline: float) -> float:
virtual_timeout = deadline - self.current_time()
if virtual_timeout <= 0:
return 0
elif self._rate > 0:
return virtual_timeout / self._rate
else:
return 999999999
def jump(self, seconds: float) -> None:
"""Manually advance the clock by the given number of seconds.
Args:
seconds (float): the number of seconds to jump the clock forward.
Raises:
ValueError: if you try to pass a negative value for ``seconds``.
"""
if seconds < 0:
raise ValueError("time can't go backwards")
self._virtual_base += seconds
|
3,476 |
save options
|
"""Configuration window for Match tool."""
#pylint: disable=E0401,C0111,W0613
from pyrevit import HOST_APP
from pyrevit import forms
from pyrevit import script
class MatchPropConfigWindow(forms.WPFWindow):
def __init__(self, xaml_file_name):
forms.WPFWindow.__init__(self, xaml_file_name)
self._config = script.get_config()
# base
self.halftone.IsChecked = \
self._config.get_option('halftone', True)
self.transparency.IsChecked = \
self._config.get_option('transparency', True)
# projection lines
self.proj_line_color.IsChecked = \
self._config.get_option('proj_line_color', True)
self.proj_line_pattern.IsChecked = \
self._config.get_option('proj_line_pattern', True)
self.proj_line_weight.IsChecked = \
self._config.get_option('proj_line_weight', True)
# projection forground pattern
self.proj_fill_color.IsChecked = \
self._config.get_option('proj_fill_color', True)
self.proj_fill_pattern.IsChecked = \
self._config.get_option('proj_fill_pattern', True)
self.proj_fill_pattern_visibility.IsChecked = \
self._config.get_option('proj_fill_pattern_visibility', True)
# projection background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self.proj_bg_fill_color.IsChecked = \
self._config.get_option('proj_bg_fill_color', True)
self.proj_bg_fill_pattern.IsChecked = \
self._config.get_option('proj_bg_fill_pattern', True)
self.proj_bg_fill_pattern_visibility.IsChecked = \
self._config.get_option('proj_bg_fill_pattern_visibility', True)
# cut lines
self.cut_line_color.IsChecked = \
self._config.get_option('cut_line_color', True)
self.cut_line_pattern.IsChecked = \
self._config.get_option('cut_line_pattern', True)
self.cut_line_weight.IsChecked = \
self._config.get_option('cut_line_weight', True)
# cut forground pattern
self.cut_fill_color.IsChecked = \
self._config.get_option('cut_fill_color', True)
self.cut_fill_pattern.IsChecked = \
self._config.get_option('cut_fill_pattern', True)
self.cut_fill_pattern_visibility.IsChecked = \
self._config.get_option('cut_fill_pattern_visibility', True)
# cut background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self.cut_bg_fill_color.IsChecked = \
self._config.get_option('cut_bg_fill_color', True)
self.cut_bg_fill_pattern.IsChecked = \
self._config.get_option('cut_bg_fill_pattern', True)
self.cut_bg_fill_pattern_visibility.IsChecked = \
self._config.get_option('cut_bg_fill_pattern_visibility', True)
# dim overrides
self.dim_override.IsChecked = \
self._config.get_option('dim_override', True)
self.dim_textposition.IsChecked = \
self._config.get_option('dim_textposition', False)
self.dim_above.IsChecked = self._config.get_option('dim_above', True)
self.dim_below.IsChecked = self._config.get_option('dim_below', True)
self.dim_prefix.IsChecked = self._config.get_option('dim_prefix', True)
self.dim_suffix.IsChecked = self._config.get_option('dim_suffix', True)
script.save_config()
def set_all(self, state):
self.halftone.IsChecked = state
self.transparency.IsChecked = state
self.proj_line_color.IsChecked = state
self.proj_line_pattern.IsChecked = state
self.proj_line_weight.IsChecked = state
self.proj_fill_color.IsChecked = state
self.proj_fill_pattern.IsChecked = state
self.proj_fill_pattern_visibility.IsChecked = state
self.proj_bg_fill_color.IsChecked = state
self.proj_bg_fill_pattern.IsChecked = state
self.proj_bg_fill_pattern_visibility.IsChecked = state
self.cut_line_color.IsChecked = state
self.cut_line_pattern.IsChecked = state
self.cut_line_weight.IsChecked = state
self.cut_fill_color.IsChecked = state
self.cut_fill_pattern.IsChecked = state
self.cut_fill_pattern_visibility.IsChecked = state
self.cut_bg_fill_color.IsChecked = state
self.cut_bg_fill_pattern.IsChecked = state
self.cut_bg_fill_pattern_visibility.IsChecked = state
self.dim_override.IsChecked = state
self.dim_textposition.IsChecked = state
self.dim_above.IsChecked = state
self.dim_below.IsChecked = state
self.dim_prefix.IsChecked = state
self.dim_suffix.IsChecked = state
def check_all(self, sender, args):
self.set_all(True)
def check_none(self, sender, args):
self.set_all(False)
def METHOD_NAME(self, sender, args):
# base
self._config.halftone = self.halftone.IsChecked
self._config.transparency = self.transparency.IsChecked
# projection lines
self._config.proj_line_color = self.proj_line_color.IsChecked
self._config.proj_line_pattern = self.proj_line_pattern.IsChecked
self._config.proj_line_weight = self.proj_line_weight.IsChecked
# projection forground pattern
self._config.proj_fill_color = self.proj_fill_color.IsChecked
self._config.proj_fill_pattern = self.proj_fill_pattern.IsChecked
self._config.proj_fill_pattern_visibility = \
self.proj_fill_pattern_visibility.IsChecked
# projection background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self._config.proj_bg_fill_color = \
self.proj_bg_fill_color.IsChecked
self._config.proj_bg_fill_pattern = \
self.proj_bg_fill_pattern.IsChecked
self._config.proj_bg_fill_pattern_visibility = \
self.proj_bg_fill_pattern_visibility.IsChecked
# cut lines
self._config.cut_line_color = self.cut_line_color.IsChecked
self._config.cut_line_pattern = self.cut_line_pattern.IsChecked
self._config.cut_line_weight = self.cut_line_weight.IsChecked
# cut forground pattern
self._config.cut_fill_color = self.cut_fill_color.IsChecked
self._config.cut_fill_pattern = self.cut_fill_pattern.IsChecked
self._config.cut_fill_pattern_visibility = \
self.cut_fill_pattern_visibility.IsChecked
# cut background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self._config.cut_bg_fill_color = \
self.cut_bg_fill_color.IsChecked
self._config.cut_bg_fill_pattern = \
self.cut_bg_fill_pattern.IsChecked
self._config.cut_bg_fill_pattern_visibility = \
self.cut_bg_fill_pattern_visibility.IsChecked
# dim overrides
self._config.dim_override = self.dim_override.IsChecked
self._config.dim_textposition = self.dim_textposition.IsChecked
self._config.dim_above = self.dim_above.IsChecked
self._config.dim_below = self.dim_below.IsChecked
self._config.dim_prefix = self.dim_prefix.IsChecked
self._config.dim_suffix = self.dim_suffix.IsChecked
script.save_config()
self.Close()
if HOST_APP.is_newer_than(2019, or_equal=True):
MatchPropConfigWindow('MatchConfigWindow.xaml').ShowDialog()
else:
MatchPropConfigWindow('MatchConfigWindowLegacy.xaml').ShowDialog()
|
3,477 |
right
|
# Title: Dijkstra's Algorithm for finding single source shortest path from scratch
# Author: Shubham Malik
# References: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
# Part of Cosmos by OpenGenus Foundation
import math
import sys
class PriorityQueue:
# Based on Min Heap
def __init__(self):
self.cur_size = 0
self.array = []
self.pos = {} # To store the pos of node in array
def isEmpty(self):
return self.cur_size == 0
def min_heapify(self, idx):
lc = self.left(idx)
rc = self.METHOD_NAME(idx)
if lc < self.cur_size and self.array(lc)[0] < self.array(idx)[0]:
smallest = lc
else:
smallest = idx
if rc < self.cur_size and self.array(rc)[0] < self.array(smallest)[0]:
smallest = rc
if smallest != idx:
self.swap(idx, smallest)
self.min_heapify(smallest)
def insert(self, tup):
# Inserts a node into the Priority Queue
self.pos[tup[1]] = self.cur_size
self.cur_size += 1
self.array.append((sys.maxsize, tup[1]))
self.decrease_key((sys.maxsize, tup[1]), tup[0])
def extract_min(self):
# Removes and returns the min element at top of priority queue
min_node = self.array[0][1]
self.array[0] = self.array[self.cur_size - 1]
self.cur_size -= 1
self.min_heapify(1)
del self.pos[min_node]
return min_node
def left(self, i):
# returns the index of left child
return 2 * i + 1
def METHOD_NAME(self, i):
# returns the index of right child
return 2 * i + 2
def par(self, i):
# returns the index of parent
return math.floor(i / 2)
def swap(self, i, j):
# swaps array elements at indices i and j
# update the pos{}
self.pos[self.array[i][1]] = j
self.pos[self.array[j][1]] = i
temp = self.array[i]
self.array[i] = self.array[j]
self.array[j] = temp
def decrease_key(self, tup, new_d):
idx = self.pos[tup[1]]
# assuming the new_d is atmost old_d
self.array[idx] = (new_d, tup[1])
while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]:
self.swap(idx, self.par(idx))
idx = self.par(idx)
class Graph:
def __init__(self, num):
self.adjList = {} # To store graph: u -> (v,w)
self.num_nodes = num # Number of nodes in graph
# To store the distance from source vertex
self.dist = [0] * self.num_nodes
self.par = [-1] * self.num_nodes # To store the path
def add_edge(self, u, v, w):
# Edge going from node u to v and v to u with weight w
# u (w)-> v, v (w) -> u
# Check if u already in graph
if u in self.adjList.keys():
self.adjList[u].append((v, w))
else:
self.adjList[u] = [(v, w)]
# Assuming undirected graph
if v in self.adjList.keys():
self.adjList[v].append((u, w))
else:
self.adjList[v] = [(u, w)]
def show_graph(self):
# u -> v(w)
for u in self.adjList:
print(
u,
"->",
" -> ".join(str("{}({})".format(v, w)) for v, w in self.adjList[u]),
)
def dijkstra(self, src):
# Flush old junk values in par[]
self.par = [-1] * self.num_nodes
# src is the source node
self.dist[src] = 0
Q = PriorityQueue()
Q.insert((0, src)) # (dist from src, node)
for u in self.adjList.keys():
if u != src:
self.dist[u] = sys.maxsize # Infinity
self.par[u] = -1
while not Q.isEmpty():
u = Q.extract_min() # Returns node with the min dist from source
# Update the distance of all the neighbours of u and
# if their prev dist was INFINITY then push them in Q
for v, w in self.adjList[u]:
new_dist = self.dist[u] + w
if self.dist[v] > new_dist:
if self.dist[v] == sys.maxsize:
Q.insert((new_dist, v))
else:
Q.decrease_key((self.dist[v], v), new_dist)
self.dist[v] = new_dist
self.par[v] = u
# Show the shortest distances from src
self.show_distances(src)
def show_distances(self, src):
print("Distance from node: {}".format(src))
for u in range(self.num_nodes):
print("Node {} has distance: {}".format(u, self.dist[u]))
def show_path(self, src, dest):
# To show the shortest path from src to dest
# WARNING: Use it *after* calling dijkstra
path = []
cost = 0
temp = dest
# Backtracking from dest to src
while self.par[temp] != -1:
path.append(temp)
if temp != src:
for v, w in self.adjList[temp]:
if v == self.par[temp]:
cost += w
break
temp = self.par[temp]
path.append(src)
path.reverse()
print("----Path to reach {} from {}----".format(dest, src))
for u in path:
print("{}".format(u), end=" ")
if u != dest:
print("-> ", end="")
print("\nTotal cost of path: ", cost)
if __name__ == "__main__":
graph = Graph(9)
graph.add_edge(0, 1, 4)
graph.add_edge(0, 7, 8)
graph.add_edge(1, 2, 8)
graph.add_edge(1, 7, 11)
graph.add_edge(2, 3, 7)
graph.add_edge(2, 8, 2)
graph.add_edge(2, 5, 4)
graph.add_edge(3, 4, 9)
graph.add_edge(3, 5, 14)
graph.add_edge(4, 5, 10)
graph.add_edge(5, 6, 2)
graph.add_edge(6, 7, 1)
graph.add_edge(6, 8, 6)
graph.add_edge(7, 8, 7)
graph.show_graph()
graph.dijkstra(0)
graph.show_path(0, 4)
# OUTPUT
# 0 -> 1(4) -> 7(8)
# 1 -> 0(4) -> 2(8) -> 7(11)
# 7 -> 0(8) -> 1(11) -> 6(1) -> 8(7)
# 2 -> 1(8) -> 3(7) -> 8(2) -> 5(4)
# 3 -> 2(7) -> 4(9) -> 5(14)
# 8 -> 2(2) -> 6(6) -> 7(7)
# 5 -> 2(4) -> 3(14) -> 4(10) -> 6(2)
# 4 -> 3(9) -> 5(10)
# 6 -> 5(2) -> 7(1) -> 8(6)
# Distance from node: 0
# Node 0 has distance: 0
# Node 1 has distance: 4
# Node 2 has distance: 12
# Node 3 has distance: 19
# Node 4 has distance: 21
# Node 5 has distance: 11
# Node 6 has distance: 9
# Node 7 has distance: 8
# Node 8 has distance: 14
# ----Path to reach 4 from 0----
# 0 -> 7 -> 6 -> 5 -> 4
# Total cost of path: 21
|
3,478 |
plot xvs y
|
"""
Estimate Sobol' indices for the Ishigami function by a sampling method: a quick start guide to sensitivity analysis
===================================================================================================================
"""
# %%
#
# In this example, we estimate the Sobol' indices for the :ref:`Ishigami function <use-case-ishigami>` by sampling methods.
#
# %%
# Introduction
# ------------
#
# In this example we are going to quantify the correlation between the input variables and the output variable of a model thanks to Sobol indices.
#
# Sobol indices are designed to evaluate the importance of a single variable or a specific set of variables.
# Here the Sobol indices are estimated by sampling from the distributions of the input variables and propagating uncertainty through a function.
#
# In theory, Sobol indices range from 0 to 1; the closer an index value is to 1, the better the associated input variable explains the function output.
#
# Let us denote by :math:`d` the input dimension of the model.
#
# Sobol' indices can be computed at different orders.
#
# * First order indices evaluate the importance of one input variable at a time.
#
# * Total indices give the relative importance of one input variable and all its interactions with other variables.
# Alternatively, they can be viewed as measuring how much wriggle room remains to the output when all but one input variables are fixed.
#
# * In general, we are only interested in first order and total Sobol' indices.
# There are situations, however, where we want to estimate interactions.
# Second order indices evaluate the importance of every pair of input variables. The number of second order indices is:
#
# .. math::
# \binom{d}{2} = \frac{d \times \left( d-1\right)}{2}.
#
# In practice, when the number of input variables is not small (say, when :math:`d>5`), then the number of second order indices is too large to be easily analyzed.
# In these situations, we limit the analysis to the first order and total Sobol' indices.
# %%
# Define the model
# ----------------
# %%
from openturns.usecases import ishigami_function
import openturns as ot
import pylab as pl
import openturns.viewer
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)
# %%
# We load the Ishigami model from the usecases model :
im = ishigami_function.IshigamiModel()
# %%
# The `IshigamiModel` data class contains the input distribution :math:`X=(X_1, X_2, X_3)` in `im.distributionX` and the Ishigami function in `im.model`.
# We also have access to the input variable names with
input_names = im.distributionX.getDescription()
# %%
# Draw the function
# -----------------
# %%
n = 10000
sampleX = im.distributionX.getSample(n)
sampleY = im.model(sampleX)
# %%
def METHOD_NAME(sampleX, sampleY, figsize=(15, 3)):
dimX = sampleX.getDimension()
inputdescr = sampleX.getDescription()
fig = pl.figure(figsize=figsize)
for i in range(dimX):
ax = fig.add_subplot(1, dimX, i + 1)
graph = ot.Graph("", inputdescr[i], "Y", True, "")
cloud = ot.Cloud(sampleX[:, i], sampleY)
graph.add(cloud)
_ = ot.viewer.View(graph, figure=fig, axes=[ax])
return fig
METHOD_NAME(sampleX, sampleY, figsize=(10, 4))
# %%
graph = ot.HistogramFactory().build(sampleY).drawPDF()
view = viewer.View(graph)
# %%
# We see that the distribution of the output has two modes.
# %%
# Estimate the Sobol' indices
# ---------------------------
# %%
# We first create a design of experiments with the `SobolIndicesExperiment`.
# Since we are not interested in second order indices for the moment, we use the default value of the third argument (we will come back to this topic later).
# %%
size = 1000
sie = ot.SobolIndicesExperiment(im.distributionX, size)
inputDesign = sie.generate()
input_names = im.distributionX.getDescription()
inputDesign.setDescription(input_names)
inputDesign.getSize()
# %%
# We see that 5000 function evaluations are required to estimate the first order and total Sobol' indices.
# %%
# Then we evaluate the outputs corresponding to this design of experiments.
# %%
outputDesign = im.model(inputDesign)
# %%
# Then we estimate the Sobol' indices with the `SaltelliSensitivityAlgorithm`.
# %%
sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign, outputDesign, size)
# %%
# The `getFirstOrderIndices` and `getTotalOrderIndices` method respectively return estimates of all first order and total Sobol' indices.
# %%
sensitivityAnalysis.getFirstOrderIndices()
# %%
sensitivityAnalysis.getTotalOrderIndices()
# %%
# The `draw` method produces the following graph. The vertical bars represent the 95% confidence intervals of the estimates.
# %%
graph = sensitivityAnalysis.draw()
view = viewer.View(graph)
# %%
# - We see that the variable :math:`X_1`, with a total Sobol' index close
# to 0.6, is the most significant variable, taking into account both its direct
# effect and its interactions with other variables.
# Its first order index is close to 0.3, which implies that its interactions
# alone produce almost 30% (0.6 - 0.3) of the total variance.
# - The variable :math:`X_2` has the highest first order index: approximately 0.4.
# However, it has little interaction with other variables since its total order indice is close to its first order index.
# - The variable :math:`X_3` has a first order index close to zero.
# However, it has an impact to the total variance thanks to its interactions with :math:`X_1`.
# - We see that the variability of the estimates is quite high even with a relatively large sample size.
# Moreover, since the exact first order Sobol' index for :math:`X_3` is zero, its estimate has a 50% chance of being nonpositive.
# %%
# Estimate the second order indices
# ---------------------------------
# %%
size = 1000
computeSecondOrder = True
sie = ot.SobolIndicesExperiment(im.distributionX, size, computeSecondOrder)
inputDesign = sie.generate()
print(inputDesign.getSize())
inputDesign.setDescription(input_names)
outputDesign = im.model(inputDesign)
# %%
# We see that 8000 function evaluations are now required; that is 3000 more evaluations than in the previous situation.
# %%
sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign, outputDesign, size)
# %%
second_order = sensitivityAnalysis.getSecondOrderIndices()
for i in range(im.dim):
for j in range(i):
print("2nd order indice (%d,%d)=%g" % (i, j, second_order[i, j]))
# %%
# This shows that the only significant interaction is the one between :math:`X_1` and :math:`X_3` (beware of Python's index shift: 0 denotes the first input variable).
# %%
# Using a different estimator
# ---------------------------
#
# We have used the `SaltelliSensitivityAlgorithm` class to estimate the indices. Others are available in the library:
#
# * `SaltelliSensitivityAlgorithm`
# * `MartinezSensitivityAlgorithm`
# * `JansenSensitivityAlgorithm`
# * `MauntzKucherenkoSensitivityAlgorithm`
#
# %%
# In order to compare the results with another method, we use the `MartinezSensitivityAlgorithm` class.
# %%
sensitivityAnalysis = ot.MartinezSensitivityAlgorithm(inputDesign, outputDesign, size)
# %%
graph = sensitivityAnalysis.draw()
view = viewer.View(graph)
plt.show()
# %%
# We see that the results do not change significantly in this particular situation.
|
3,479 |
remove
|
import functools
import os
import stat
from conans import AutoToolsBuildEnvironment, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.43.0"
class NetSnmpConan(ConanFile):
name = "net-snmp"
description = (
"Simple Network Management Protocol (SNMP) is a widely used protocol "
"for monitoring the health and welfare of network equipment "
"(eg. routers), computer equipment and even devices like UPSs."
)
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.net-snmp.org/"
topics = "snmp"
license = "BSD-3-Clause"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_ipv6": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_ipv6": True,
}
requires = "openssl/1.1.1m"
exports_sources = "patches/*"
@property
def _is_msvc(self):
return self.settings.compiler in ("Visual Studio", "msvc")
def validate(self):
if self.settings.os == "Windows" and not self._is_msvc:
raise ConanInvalidConfiguration(
"net-snmp is setup to build only with MSVC on Windows"
)
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def build_requirements(self):
if self._is_msvc:
self.build_requires("strawberryperl/5.30.0.1")
@property
def _is_debug(self):
return self.settings.build_type == "Debug"
def _patch_msvc(self):
ssl_info = self.deps_cpp_info["openssl"]
openssl_root = ssl_info.rootpath.replace("\\", "/")
search_replace = [
(
r'$default_openssldir . "\\include"',
f"'{openssl_root}/include'"
),
(r'$default_openssldir . "\\lib\\VC"', f"'{openssl_root}/lib'"),
("$openssl = false", "$openssl = true")
]
if self._is_debug:
search_replace.append(("$debug = false", "$debug = true"))
if self.options.shared:
search_replace.append((
"$link_dynamic = false",
"$link_dynamic = true"
))
if self.options.with_ipv6:
search_replace.append(("$b_ipv6 = false", "$b_ipv6 = true"))
for search, replace in search_replace:
tools.replace_in_file("win32\\build.pl", search, replace)
runtime = self.settings.compiler.runtime
tools.replace_in_file("win32\\Configure", '"/runtime', f'"/{runtime}')
link_lines = "\n".join(
f'# pragma comment(lib, "{lib}.lib")'
for lib in ssl_info.libs + ssl_info.system_libs
)
config = r"win32\net-snmp\net-snmp-config.h.in"
tools.replace_in_file(config, "/* Conan: system_libs */", link_lines)
def _build_msvc(self):
if self.should_configure:
self._patch_msvc()
self.run("perl build.pl", cwd="win32")
if self.should_build:
with tools.vcvars(self):
self.run("nmake /nologo libsnmp", cwd="win32")
@functools.lru_cache(1)
def _configure_autotools(self):
disabled_link_type = "static" if self.options.shared else "shared"
debug_flag = "enable" if self._is_debug else "disable"
ipv6_flag = "enable" if self.options.with_ipv6 else "disable"
ssl_path = self.deps_cpp_info["openssl"].rootpath
args = [
"--with-defaults",
"--without-rpm",
"--without-pcre",
"--disable-agent",
"--disable-applications",
"--disable-manuals",
"--disable-scripts",
"--disable-mibs",
"--disable-embedded-perl",
f"--disable-{disabled_link_type}",
f"--{debug_flag}-debugging",
f"--{ipv6_flag}-ipv6",
f"--with-openssl={ssl_path}",
]
autotools = AutoToolsBuildEnvironment(self)
autotools.libs = []
autotools.configure(args=args)
return autotools
def _patch_unix(self):
tools.replace_in_file(
"configure",
"-install_name \\$rpath/",
"-install_name @rpath/"
)
crypto_libs = self.deps_cpp_info["openssl"].system_libs
if len(crypto_libs) != 0:
crypto_link_flags = " -l".join(crypto_libs)
tools.replace_in_file(
"configure",
'LIBCRYPTO="-l${CRYPTO}"',
'LIBCRYPTO="-l${CRYPTO} -l%s"' % (crypto_link_flags,)
)
tools.replace_in_file(
"configure",
'LIBS="-lcrypto $LIBS"',
f'LIBS="-lcrypto -l{crypto_link_flags} $LIBS"'
)
def build(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
if self._is_msvc:
self._build_msvc()
else:
self._patch_unix()
os.chmod("configure", os.stat("configure").st_mode | stat.S_IEXEC)
self._configure_autotools()\
.make(target="snmplib", args=["NOAUTODEPS=1"])
def _package_msvc(self):
cfg = "debug" if self._is_debug else "release"
self.copy("netsnmp.dll", "bin", fr"win32\bin\{cfg}")
self.copy("netsnmp.lib", "lib", fr"win32\lib\{cfg}")
self.copy("include/net-snmp/*.h")
for directory in ["", "agent/", "library/"]:
self.copy(f"net-snmp/{directory}*.h", "include", "win32")
self.copy("COPYING", "licenses")
def METHOD_NAME(self, path):
if os.path.isdir(path):
tools.rmdir(path)
else:
os.remove(path)
def _package_unix(self):
self._configure_autotools().install(args=["NOAUTODEPS=1"])
tools.remove_files_by_mask(self.package_folder, "README")
tools.rmdir(os.path.join(self.package_folder, "bin"))
lib_dir = os.path.join(self.package_folder, "lib")
for entry in os.listdir(lib_dir):
if not entry.startswith("libnetsnmp.") or entry.endswith(".la"):
self.METHOD_NAME(os.path.join(lib_dir, entry))
self.copy("COPYING", "licenses")
def package(self):
if self._is_msvc:
self._package_msvc()
else:
self._package_unix()
def package_info(self):
self.cpp_info.libs = ["netsnmp"]
self.cpp_info.requires = ["openssl::openssl"]
|
3,480 |
hundred non zero
|
from collections import defaultdict
import pynini
from fun_text_processing.text_normalization.de.utils import get_abs_path, load_labels
from fun_text_processing.text_normalization.en.graph_utils import (
DAMO_DIGIT,
DAMO_SIGMA,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
AND = "und"
def get_ties_digit(digit_path: str, tie_path: str) -> 'pynini.FstLike':
"""
getting all inverse normalizations for numbers between 21 - 100
Args:
digit_path: file to digit tsv
tie_path: file to tie tsv, e.g. 20, 30, etc.
Returns:
res: fst that converts numbers to their verbalization
"""
digits = defaultdict(list)
ties = defaultdict(list)
for k, v in load_labels(digit_path):
digits[v].append(k)
digits["1"] = ["ein"]
for k, v in load_labels(tie_path):
ties[v].append(k)
d = []
for i in range(21, 100):
s = str(i)
if s[1] == "0":
continue
for di in digits[s[1]]:
for ti in ties[s[0]]:
word = di + f" {AND} " + ti
d.append((word, s))
res = pynini.string_map(d)
return res
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"101" -> cardinal { integer: "ein hundert und zehn" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = False):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv")).invert()
graph_digit_no_one = pynini.string_file(get_abs_path("data/numbers/digit.tsv")).invert()
graph_one = pynini.string_file(get_abs_path("data/numbers/ones.tsv")).invert()
graph_digit = graph_digit_no_one | graph_one
self.digit = (graph_digit | graph_zero).optimize()
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv")).invert()
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv")).invert()
# separator = "."
def tens_no_zero():
return (
pynutil.delete("0") + graph_digit
| get_ties_digit(
get_abs_path("data/numbers/digit.tsv"), get_abs_path("data/numbers/ties.tsv")
).invert()
| graph_teen
| (graph_ties + pynutil.delete("0"))
)
def METHOD_NAME():
return (graph_digit_no_one + insert_space | pynini.cross("1", "ein ")) + pynutil.insert("hundert") + (
pynini.closure(insert_space + pynutil.insert(AND, weight=0.0001), 0, 1) + insert_space + tens_no_zero()
| pynutil.delete("00")
) | pynutil.delete("0") + tens_no_zero()
def thousand():
return (METHOD_NAME() + insert_space + pynutil.insert("tausend") | pynutil.delete("000")) + (
insert_space + METHOD_NAME() | pynutil.delete("000")
)
optional_plural_quantity_en = pynini.closure(pynutil.insert("en", weight=-0.0001), 0, 1)
optional_plural_quantity_n = pynini.closure(pynutil.insert("n", weight=-0.0001), 0, 1)
graph_million = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("million") + optional_plural_quantity_en,
pynutil.delete("000"),
)
graph_billion = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("milliarde") + optional_plural_quantity_n,
pynutil.delete("000"),
)
graph_trillion = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("billion") + optional_plural_quantity_en,
pynutil.delete("000"),
)
graph_quadrillion = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("billiarde") + optional_plural_quantity_n,
pynutil.delete("000"),
)
graph_quintillion = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("trillion") + optional_plural_quantity_en,
pynutil.delete("000"),
)
graph_sextillion = pynini.union(
METHOD_NAME() + insert_space + pynutil.insert("trilliarde") + optional_plural_quantity_n,
pynutil.delete("000"),
)
graph = pynini.union(
graph_sextillion
+ insert_space
+ graph_quintillion
+ insert_space
+ graph_quadrillion
+ insert_space
+ graph_trillion
+ insert_space
+ graph_billion
+ insert_space
+ graph_million
+ insert_space
+ thousand()
)
fix_syntax = [
("eins tausend", "ein tausend"),
("eins millionen", "eine million"),
("eins milliarden", "eine milliarde"),
("eins billionen", "eine billion"),
("eins billiarden", "eine billiarde"),
]
fix_syntax = pynini.union(*[pynini.cross(*x) for x in fix_syntax])
self.graph = (
((DAMO_DIGIT - "0" + pynini.closure(DAMO_DIGIT, 0)) - "0" - "1")
@ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", DAMO_SIGMA)
@ DAMO_DIGIT ** 24
@ graph
@ pynini.cdrewrite(delete_space, "[BOS]", "", DAMO_SIGMA)
@ pynini.cdrewrite(delete_space, "", "[EOS]", DAMO_SIGMA)
@ pynini.cdrewrite(pynini.cross(" ", " "), "", "", DAMO_SIGMA)
@ pynini.cdrewrite(fix_syntax, "[BOS]", "", DAMO_SIGMA)
)
self.graph |= graph_zero | pynini.cross("1", "eins")
# self.graph = pynini.cdrewrite(pynutil.delete(separator), "", "", DAMO_SIGMA) @ self.graph
self.graph = self.graph.optimize()
self.graph_hundred_component_at_least_one_none_zero_digit = (
((DAMO_DIGIT - "0" + pynini.closure(DAMO_DIGIT, 0)) - "0" - "1")
@ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", DAMO_SIGMA)
@ DAMO_DIGIT ** 3
@ METHOD_NAME()
) | pynini.cross("1", "eins")
self.graph_hundred_component_at_least_one_none_zero_digit = (
self.graph_hundred_component_at_least_one_none_zero_digit.optimize()
)
self.two_digit_non_zero = (
pynini.closure(DAMO_DIGIT, 1, 2) @ self.graph_hundred_component_at_least_one_none_zero_digit
)
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
3,481 |
add
|
import schemas
from chalicelib.core import integration_base
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
from chalicelib.utils import pg_client, helper
PROVIDER = schemas.IntegrationType.jira
def obfuscate_string(string):
return "*" * (len(string) - 4) + string[-4:]
class JIRAIntegration(integration_base.BaseIntegration):
def __init__(self, tenant_id, user_id):
self.__tenant_id = tenant_id
# TODO: enable super-constructor when OAuth is done
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
self._issue_handler = None
self._user_id = user_id
self.integration = self.get()
if self.integration is None:
return
self.integration["valid"] = True
if not self.integration["url"].endswith('atlassian.net'):
self.integration["valid"] = False
@property
def provider(self):
return PROVIDER
@property
def issue_handler(self):
if self.integration["url"].endswith('atlassian.net') and self._issue_handler is None:
try:
self._issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
username=self.integration["username"],
url=self.integration["url"])
except Exception as e:
self._issue_handler = None
self.integration["valid"] = False
return self._issue_handler
# TODO: remove this once jira-oauth is done
def get(self):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT username, token, url
FROM public.jira_cloud
WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id})
)
data = helper.dict_to_camel_case(cur.fetchone())
if data is None:
return
data["valid"] = True
if not data["url"].endswith('atlassian.net'):
data["valid"] = False
return data
def get_obfuscated(self):
if self.integration is None:
return None
integration = dict(self.integration)
integration["token"] = obfuscate_string(self.integration["token"])
integration["provider"] = self.provider.lower()
return integration
def update(self, changes, obfuscate=False):
with pg_client.PostgresClient() as cur:
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys()]
cur.execute(
cur.mogrify(f"""\
UPDATE public.jira_cloud
SET {','.join(sub_query)}
WHERE user_id=%(user_id)s
RETURNING username, token, url;""",
{"user_id": self._user_id,
**changes})
)
w = helper.dict_to_camel_case(cur.fetchone())
if obfuscate:
w["token"] = obfuscate_string(w["token"])
return self.get()
# TODO: make this generic for all issue tracking integrations
def _add(self, data):
print("a pretty defined abstract method")
return
def METHOD_NAME(self, username, token, url):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.jira_cloud(username, token, user_id,url)
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
RETURNING username, token, url;""",
{"user_id": self._user_id, "username": username,
"token": token, "url": url})
)
w = helper.dict_to_camel_case(cur.fetchone())
return self.get()
def delete(self):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
DELETE FROM public.jira_cloud
WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id})
)
return {"state": "success"}
def add_edit(self, data):
if self.integration is not None:
return self.update(
changes={
"username": data["username"],
"token": data["token"] \
if data.get("token") and len(data["token"]) > 0 and data["token"].find("***") == -1 \
else self.integration["token"],
"url": data["url"]
},
obfuscate=True
)
else:
return self.METHOD_NAME(
username=data["username"],
token=data["token"],
url=data["url"]
)
|
3,482 |
list steps
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with the Amazon EMR API to create
and manage clusters and job steps.
"""
import logging
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
def run_job_flow(
name, log_uri, keep_alive, applications, job_flow_role, service_role,
security_groups, steps, emr_client):
"""
Runs a job flow with the specified steps. A job flow creates a cluster of
instances and adds steps to be run on the cluster. Steps added to the cluster
are run as soon as the cluster is ready.
This example uses the 'emr-5.30.1' release. A list of recent releases can be
found here:
https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html.
:param name: The name of the cluster.
:param log_uri: The URI where logs are stored. This can be an Amazon S3 bucket URL,
such as 's3://my-log-bucket'.
:param keep_alive: When True, the cluster is put into a Waiting state after all
steps are run. When False, the cluster terminates itself when
the step queue is empty.
:param applications: The applications to install on each instance in the cluster,
such as Hive or Spark.
:param job_flow_role: The IAM role assumed by the cluster.
:param service_role: The IAM role assumed by the service.
:param security_groups: The security groups to assign to the cluster instances.
Amazon EMR adds all needed rules to these groups, so
they can be empty if you require only the default rules.
:param steps: The job flow steps to add to the cluster. These are run in order
when the cluster is ready.
:param emr_client: The Boto3 EMR client object.
:return: The ID of the newly created cluster.
"""
try:
response = emr_client.run_job_flow(
Name=name,
LogUri=log_uri,
ReleaseLabel='emr-6.9.0',
Instances={
'MasterInstanceType': 'm5.xlarge',
'SlaveInstanceType': 'm5.xlarge',
'InstanceCount': 3,
'KeepJobFlowAliveWhenNoSteps': keep_alive,
'EmrManagedMasterSecurityGroup': security_groups['manager'].id,
'EmrManagedSlaveSecurityGroup': security_groups['worker'].id,
},
Steps=[{
'Name': step['name'],
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '--deploy-mode', 'cluster',
step['script_uri'], *step['script_args']]
}
} for step in steps],
Applications=[{
'Name': app
} for app in applications],
JobFlowRole=job_flow_role.name,
ServiceRole=service_role.name,
EbsRootVolumeSize=10,
VisibleToAllUsers=True
)
cluster_id = response['JobFlowId']
logger.info("Created cluster %s.", cluster_id)
except ClientError:
logger.exception("Couldn't create cluster.")
raise
else:
return cluster_id
def describe_cluster(cluster_id, emr_client):
"""
Gets detailed information about a cluster.
:param cluster_id: The ID of the cluster to describe.
:param emr_client: The Boto3 EMR client object.
:return: The retrieved cluster information.
"""
try:
response = emr_client.describe_cluster(ClusterId=cluster_id)
cluster = response['Cluster']
logger.info("Got data for cluster %s.", cluster['Name'])
except ClientError:
logger.exception("Couldn't get data for cluster %s.", cluster_id)
raise
else:
return cluster
def terminate_cluster(cluster_id, emr_client):
"""
Terminates a cluster. This terminates all instances in the cluster and cannot
be undone. Any data not saved elsewhere, such as in an Amazon S3 bucket, is lost.
:param cluster_id: The ID of the cluster to terminate.
:param emr_client: The Boto3 EMR client object.
"""
try:
emr_client.terminate_job_flows(JobFlowIds=[cluster_id])
logger.info("Terminated cluster %s.", cluster_id)
except ClientError:
logger.exception("Couldn't terminate cluster %s.", cluster_id)
raise
def add_step(cluster_id, name, script_uri, script_args, emr_client):
"""
Adds a job step to the specified cluster. This example adds a Spark
step, which is run by the cluster as soon as it is added.
:param cluster_id: The ID of the cluster.
:param name: The name of the step.
:param script_uri: The URI where the Python script is stored.
:param script_args: Arguments to pass to the Python script.
:param emr_client: The Boto3 EMR client object.
:return: The ID of the newly added step.
"""
try:
response = emr_client.add_job_flow_steps(
JobFlowId=cluster_id,
Steps=[{
'Name': name,
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '--deploy-mode', 'cluster',
script_uri, *script_args]
}
}])
step_id = response['StepIds'][0]
logger.info("Started step with ID %s", step_id)
except ClientError:
logger.exception("Couldn't start step %s with URI %s.", name, script_uri)
raise
else:
return step_id
def METHOD_NAME(cluster_id, emr_client):
"""
Gets a list of steps for the specified cluster. In this example, all steps are
returned, including completed and failed steps.
:param cluster_id: The ID of the cluster.
:param emr_client: The Boto3 EMR client object.
:return: The list of steps for the specified cluster.
"""
try:
response = emr_client.METHOD_NAME(ClusterId=cluster_id)
steps = response['Steps']
logger.info("Got %s steps for cluster %s.", len(steps), cluster_id)
except ClientError:
logger.exception("Couldn't get steps for cluster %s.", cluster_id)
raise
else:
return steps
def describe_step(cluster_id, step_id, emr_client):
"""
Gets detailed information about the specified step, including the current state of
the step.
:param cluster_id: The ID of the cluster.
:param step_id: The ID of the step.
:param emr_client: The Boto3 EMR client object.
:return: The retrieved information about the specified step.
"""
try:
response = emr_client.describe_step(ClusterId=cluster_id, StepId=step_id)
step = response['Step']
logger.info("Got data for step %s.", step_id)
except ClientError:
logger.exception("Couldn't get data for step %s.", step_id)
raise
else:
return step
|
3,483 |
notify patch build created
|
from django.utils import timezone
from django.db import transaction
from squad.celery import app as celery
from squad.core.models import ProjectStatus, Build, DelayedReport
from squad.core.notification import send_status_notification
import requests
@celery.task
def maybe_notify_project_status(status_id):
"""
This is invoked for every new TestRun a build receives.
Notifications tasks work like the following:
1. First notification attempt with `maybe_notify_project_status`,
it uses the attribute `Project.wait_before_notification` which
waits for this number of seconds AFTER `Build.datetime`. This means
that if the build date keeps changing, notification will delay accordingly.
NOTE: the notification will be sent only if `ProjectStatus.finished=True`,
even after timeout expiration.
2. Second notifcation attempt is achieved at `notification_timeout`, which
waits for an absolute number of seconds, e.g. `Project.notification_timeout`,
before trying to send the notification. This serves as a fallback option to
send out notifications, even if `ProjectStatus.finished=False`.
"""
with transaction.atomic():
projectstatus = ProjectStatus.objects.select_for_update().get(pk=status_id)
build = projectstatus.build
project = build.project
if projectstatus.notified_on_timeout is None and project.notification_timeout:
notification_timeout.apply_async(args=[status_id], countdown=project.notification_timeout)
projectstatus.notified_on_timeout = False
projectstatus.save()
if project.wait_before_notification:
to_wait = project.wait_before_notification
time_passed = timezone.now() - build.datetime
if time_passed.seconds < to_wait:
# wait time did not pass yet; try again later
remaining_time = to_wait - time_passed.seconds + 1
maybe_notify_project_status.apply_async(
args=[status_id],
countdown=remaining_time,
)
return
if projectstatus.finished and not projectstatus.notified:
# check if there are any outstanding PluginScratch objects
if not projectstatus.build.pluginscratch_set.all():
send_status_notification(projectstatus)
build_id = build.id
with transaction.atomic():
atomic_build = Build.objects.select_for_update().get(pk=build_id)
if atomic_build.patch_notified is False:
notify_patch_build_finished.delay(build_id)
atomic_build.patch_notified = True
atomic_build.save()
@celery.task
def notify_project_status(status_id):
projectstatus = ProjectStatus.objects.get(pk=status_id)
send_status_notification(projectstatus)
@celery.task
def notification_timeout(status_id):
projectstatus = ProjectStatus.objects.get(pk=status_id)
if not projectstatus.notified and not projectstatus.notified_on_timeout:
send_status_notification(projectstatus)
projectstatus.notified_on_timeout = True
if projectstatus.build.project.force_finishing_builds_on_timeout:
projectstatus.finished = True
projectstatus.save()
@celery.task
def METHOD_NAME(build_id):
build = Build.objects.get(pk=build_id)
patch_source = build.patch_source
if patch_source:
plugin = patch_source.get_implementation()
plugin.METHOD_NAME(build)
@celery.task
def notify_patch_build_finished(build_id):
build = Build.objects.get(pk=build_id)
patch_source = build.patch_source
if patch_source:
plugin = patch_source.get_implementation()
plugin.notify_patch_build_finished(build)
@celery.task
def notify_delayed_report_callback(delayed_report_id):
delayed_report = DelayedReport.objects.get(pk=delayed_report_id)
if delayed_report.callback and not delayed_report.callback_notified:
data = {'text': delayed_report.output_text, 'html': delayed_report.output_html}
session = requests.Session()
req = requests.Request('POST', delayed_report.callback, data=data)
prepared_post = session.prepare_request(req)
if delayed_report.callback_token:
prepared_post.headers['Authorization'] = delayed_report.callback_token
prepared_post.headers['Auth-Token'] = delayed_report.callback_token
session.send(prepared_post)
delayed_report.callback_notified = True
delayed_report.save()
@celery.task
def notify_delayed_report_email(delayed_report_id):
delayed_report = DelayedReport.objects.get(pk=delayed_report_id)
if delayed_report.email_recipient and not delayed_report.email_recipient_notified:
delayed_report.send()
|
3,484 |
newlines
|
import abc
import builtins
import codecs
import sys
from _typeshed import FileDescriptorOrPath, ReadableBuffer, WriteableBuffer
from collections.abc import Callable, Iterable, Iterator
from os import _Opener
from types import TracebackType
from typing import IO, Any, BinaryIO, TextIO
from typing_extensions import Literal, Self
__all__ = [
"BlockingIOError",
"open",
"IOBase",
"RawIOBase",
"FileIO",
"BytesIO",
"StringIO",
"BufferedIOBase",
"BufferedReader",
"BufferedWriter",
"BufferedRWPair",
"BufferedRandom",
"TextIOBase",
"TextIOWrapper",
"UnsupportedOperation",
"SEEK_SET",
"SEEK_CUR",
"SEEK_END",
]
if sys.version_info >= (3, 8):
__all__ += ["open_code"]
DEFAULT_BUFFER_SIZE: Literal[8192]
SEEK_SET: Literal[0]
SEEK_CUR: Literal[1]
SEEK_END: Literal[2]
open = builtins.open
if sys.version_info >= (3, 8):
def open_code(path: str) -> IO[bytes]: ...
BlockingIOError = builtins.BlockingIOError
class UnsupportedOperation(OSError, ValueError): ...
class IOBase(metaclass=abc.ABCMeta):
def __iter__(self) -> Iterator[bytes]: ...
def __next__(self) -> bytes: ...
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def close(self) -> None: ...
def fileno(self) -> int: ...
def flush(self) -> None: ...
def isatty(self) -> bool: ...
def readable(self) -> bool: ...
read: Callable[..., Any]
def readlines(self, __hint: int = -1) -> list[bytes]: ...
def seek(self, __offset: int, __whence: int = ...) -> int: ...
def seekable(self) -> bool: ...
def tell(self) -> int: ...
def truncate(self, __size: int | None = ...) -> int: ...
def writable(self) -> bool: ...
write: Callable[..., Any]
def writelines(self, __lines: Iterable[ReadableBuffer]) -> None: ...
def readline(self, __size: int | None = -1) -> bytes: ...
def __del__(self) -> None: ...
@property
def closed(self) -> bool: ...
def _checkClosed(self, msg: str | None = ...) -> None: ... # undocumented
class RawIOBase(IOBase):
def readall(self) -> bytes: ...
def readinto(self, __buffer: WriteableBuffer) -> int | None: ...
def write(self, __b: ReadableBuffer) -> int | None: ...
def read(self, __size: int = -1) -> bytes | None: ...
class BufferedIOBase(IOBase):
raw: RawIOBase # This is not part of the BufferedIOBase API and may not exist on some implementations.
def detach(self) -> RawIOBase: ...
def readinto(self, __buffer: WriteableBuffer) -> int: ...
def write(self, __buffer: ReadableBuffer) -> int: ...
def readinto1(self, __buffer: WriteableBuffer) -> int: ...
def read(self, __size: int | None = ...) -> bytes: ...
def read1(self, __size: int = ...) -> bytes: ...
class FileIO(RawIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes
mode: str
name: FileDescriptorOrPath # type: ignore[assignment]
def __init__(
self, file: FileDescriptorOrPath, mode: str = ..., closefd: bool = ..., opener: _Opener | None = ...
) -> None: ...
@property
def closefd(self) -> bool: ...
def write(self, __b: ReadableBuffer) -> int: ...
def read(self, __size: int = -1) -> bytes: ...
def __enter__(self) -> Self: ...
class BytesIO(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes
def __init__(self, initial_bytes: ReadableBuffer = ...) -> None: ...
# BytesIO does not contain a "name" field. This workaround is necessary
# to allow BytesIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def __enter__(self) -> Self: ...
def getvalue(self) -> bytes: ...
def getbuffer(self) -> memoryview: ...
def read1(self, __size: int | None = -1) -> bytes: ...
class BufferedReader(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes
def __enter__(self) -> Self: ...
def __init__(self, raw: RawIOBase, buffer_size: int = ...) -> None: ...
def peek(self, __size: int = 0) -> bytes: ...
class BufferedWriter(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes
def __enter__(self) -> Self: ...
def __init__(self, raw: RawIOBase, buffer_size: int = ...) -> None: ...
def write(self, __buffer: ReadableBuffer) -> int: ...
class BufferedRandom(BufferedReader, BufferedWriter): # type: ignore[misc] # incompatible definitions of methods in the base classes
def __enter__(self) -> Self: ...
def seek(self, __target: int, __whence: int = 0) -> int: ... # stubtest needs this
class BufferedRWPair(BufferedIOBase):
def __init__(self, reader: RawIOBase, writer: RawIOBase, buffer_size: int = ...) -> None: ...
def peek(self, __size: int = ...) -> bytes: ...
class TextIOBase(IOBase):
encoding: str
errors: str | None
METHOD_NAME: str | tuple[str, ...] | None
def __iter__(self) -> Iterator[str]: ... # type: ignore[override]
def __next__(self) -> str: ... # type: ignore[override]
def detach(self) -> BinaryIO: ...
def write(self, __s: str) -> int: ...
def writelines(self, __lines: Iterable[str]) -> None: ... # type: ignore[override]
def readline(self, __size: int = ...) -> str: ... # type: ignore[override]
def readlines(self, __hint: int = -1) -> list[str]: ... # type: ignore[override]
def read(self, __size: int | None = ...) -> str: ...
class TextIOWrapper(TextIOBase, TextIO): # type: ignore[misc] # incompatible definitions of write in the base classes
def __init__(
self,
buffer: IO[bytes],
encoding: str | None = ...,
errors: str | None = ...,
newline: str | None = ...,
line_buffering: bool = ...,
write_through: bool = ...,
) -> None: ...
@property
def buffer(self) -> BinaryIO: ...
@property
def closed(self) -> bool: ...
@property
def line_buffering(self) -> bool: ...
@property
def write_through(self) -> bool: ...
def reconfigure(
self,
*,
encoding: str | None = None,
errors: str | None = None,
newline: str | None = None,
line_buffering: bool | None = None,
write_through: bool | None = None,
) -> None: ...
# These are inherited from TextIOBase, but must exist in the stub to satisfy mypy.
def __enter__(self) -> Self: ...
def __iter__(self) -> Iterator[str]: ... # type: ignore[override]
def __next__(self) -> str: ... # type: ignore[override]
def writelines(self, __lines: Iterable[str]) -> None: ... # type: ignore[override]
def readline(self, __size: int = -1) -> str: ... # type: ignore[override]
def readlines(self, __hint: int = -1) -> list[str]: ... # type: ignore[override]
def seek(self, __cookie: int, __whence: int = 0) -> int: ... # stubtest needs this
class StringIO(TextIOWrapper):
def __init__(self, initial_value: str | None = ..., newline: str | None = ...) -> None: ...
# StringIO does not contain a "name" field. This workaround is necessary
# to allow StringIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def getvalue(self) -> str: ...
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
def __init__(self, decoder: codecs.IncrementalDecoder | None, translate: bool, errors: str = ...) -> None: ...
def decode(self, input: ReadableBuffer | str, final: bool = False) -> str: ...
@property
def METHOD_NAME(self) -> str | tuple[str, ...] | None: ...
def setstate(self, __state: tuple[bytes, int]) -> None: ...
|
3,485 |
test client sas credential async
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import asyncio
import time
from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential
from azure.identity.aio import EnvironmentCredential
from azure.eventhub import EventData
from azure.eventhub.aio import EventHubConsumerClient, EventHubProducerClient, EventHubSharedKeyCredential
from azure.eventhub.aio._client_base_async import EventHubSASTokenCredential
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_client_secret_credential_async(live_eventhub, uamqp_transport):
credential = EnvironmentCredential()
producer_client = EventHubProducerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
credential=credential,
user_agent='customized information',
auth_timeout=30,
uamqp_transport=uamqp_transport
)
consumer_client = EventHubConsumerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
consumer_group='$default',
credential=credential,
user_agent='customized information',
auth_timeout=30,
uamqp_transport=uamqp_transport
)
async with producer_client:
batch = await producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await producer_client.send_batch(batch)
async def on_event(partition_context, event):
on_event.called = True
on_event.partition_id = partition_context.partition_id
on_event.event = event
on_event.called = False
async with consumer_client:
task = asyncio.ensure_future(consumer_client.receive(on_event, partition_id='0', starting_position='-1'))
await asyncio.sleep(15)
await task
assert on_event.called is True
assert on_event.partition_id == "0"
assert list(on_event.event.body)[0] == 'A single message'.encode('utf-8')
@pytest.mark.liveTest
@pytest.mark.asyncio
async def METHOD_NAME(live_eventhub, uamqp_transport):
# This should "just work" to validate known-good.
hostname = live_eventhub['hostname']
producer_client = EventHubProducerClient.from_connection_string(live_eventhub['connection_str'],
eventhub_name=live_eventhub['event_hub'], uamqp_transport=uamqp_transport)
async with producer_client:
batch = await producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await producer_client.send_batch(batch)
# This should also work, but now using SAS tokens.
credential = EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
auth_uri = "sb://{}/{}".format(hostname, live_eventhub['event_hub'])
token = (await credential.get_token(auth_uri)).token
producer_client = EventHubProducerClient(fully_qualified_namespace=hostname,
eventhub_name=live_eventhub['event_hub'],
credential=EventHubSASTokenCredential(token, time.time() + 3000),
uamqp_transport=uamqp_transport)
async with producer_client:
batch = await producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await producer_client.send_batch(batch)
# Finally let's do it with SAS token + conn str
token_conn_str = "Endpoint=sb://{}/;SharedAccessSignature={};".format(hostname, token)
conn_str_producer_client = EventHubProducerClient.from_connection_string(token_conn_str,
eventhub_name=live_eventhub['event_hub'], uamqp_transport=uamqp_transport)
async with conn_str_producer_client:
batch = await conn_str_producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await conn_str_producer_client.send_batch(batch)
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_client_azure_sas_credential_async(live_eventhub, uamqp_transport):
# This should "just work" to validate known-good.
hostname = live_eventhub['hostname']
producer_client = EventHubProducerClient.from_connection_string(live_eventhub['connection_str'], eventhub_name = live_eventhub['event_hub'], uamqp_transport=uamqp_transport)
async with producer_client:
batch = await producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await producer_client.send_batch(batch)
credential = EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
auth_uri = "sb://{}/{}".format(hostname, live_eventhub['event_hub'])
token = (await credential.get_token(auth_uri)).token
producer_client = EventHubProducerClient(fully_qualified_namespace=hostname,
eventhub_name=live_eventhub['event_hub'],
auth_timeout=30,
credential=AzureSasCredential(token), uamqp_transport=uamqp_transport)
async with producer_client:
batch = await producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
await producer_client.send_batch(batch)
assert (await producer_client.get_eventhub_properties()) is not None
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_client_azure_named_key_credential_async(live_eventhub, uamqp_transport):
credential = AzureNamedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
consumer_client = EventHubConsumerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
consumer_group='$default',
credential=credential,
auth_timeout=30,
user_agent='customized information', uamqp_transport=uamqp_transport)
assert (await consumer_client.get_eventhub_properties()) is not None
credential.update("foo", "bar")
with pytest.raises(Exception):
await consumer_client.get_eventhub_properties()
credential.update(live_eventhub['key_name'], live_eventhub['access_key'])
assert (await consumer_client.get_eventhub_properties()) is not None
|
3,486 |
throttle
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""A client that talks to tensorflow_model_server loaded with mnist model.
The client downloads test images of mnist data set, queries the service with
such test images to get predictions, and calculates the inference error rate.
Typical usage example:
mnist_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import sys
import threading
# This is a placeholder for a Google-internal import.
import grpc
import numpy
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import mnist_input_data
tf.compat.v1.app.flags.DEFINE_integer(
'concurrency', 1, 'maximum number of concurrent inference requests')
tf.compat.v1.app.flags.DEFINE_integer('num_tests', 100, 'Number of test images')
tf.compat.v1.app.flags.DEFINE_string('server', '',
'PredictionService host:port')
tf.compat.v1.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory. ')
FLAGS = tf.compat.v1.app.flags.FLAGS
class _ResultCounter(object):
"""Counter for the prediction results."""
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
def inc_error(self):
with self._condition:
self._error += 1
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def get_error_rate(self):
with self._condition:
while self._done != self._num_tests:
self._condition.wait()
return self._error / float(self._num_tests)
def METHOD_NAME(self):
with self._condition:
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def _create_rpc_callback(label, result_counter):
"""Creates RPC callback function.
Args:
label: The correct label for the predicted example.
result_counter: Counter for the prediction result.
Returns:
The callback function.
"""
def _callback(result_future):
"""Callback function.
Calculates the statistics for the prediction result.
Args:
result_future: Result future of the RPC.
"""
exception = result_future.exception()
if exception:
result_counter.inc_error()
print(exception)
else:
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(
result_future.result().outputs['scores'].float_val)
prediction = numpy.argmax(response)
if label != prediction:
result_counter.inc_error()
result_counter.inc_done()
result_counter.dec_active()
return _callback
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService with concurrent requests.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result_counter = _ResultCounter(num_tests, concurrency)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'mnist'
request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs['images'].CopyFrom(
tf.make_tensor_proto(image[0], shape=[1, image[0].size]))
result_counter.METHOD_NAME()
result_future = stub.Predict.future(request, 5.0) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(label[0], result_counter))
return result_counter.get_error_rate()
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
error_rate = do_inference(FLAGS.server, FLAGS.work_dir,
FLAGS.concurrency, FLAGS.num_tests)
print('\nInference error rate: %s%%' % (error_rate * 100))
if __name__ == '__main__':
tf.compat.v1.app.run()
|
3,487 |
active summary items
|
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
import os
import sys
from GangaCore.Core.exceptions import PluginError
from GangaCore.Utility.Shell import Shell
from GangaCore.Utility.logging import getLogger
from GangaLHCb.Lib.RTHandlers.LHCbGaudiRunTimeHandler import LHCbGaudiRunTimeHandler
from GangaLHCb.Lib.RTHandlers.LHCbGaudiDiracRunTimeHandler import LHCbGaudiDiracRunTimeHandler
import GangaCore.Utility.Config
from GangaGaudi.Lib.Applications.Gaudi import Gaudi
from GangaCore.GPIDev.Schema import SimpleItem
available_lhcb_apps = None
available_lhcb_packs = None
logger = getLogger()
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
def backend_handlers():
backends = {'LSF': LHCbGaudiRunTimeHandler,
'Interactive': LHCbGaudiRunTimeHandler,
'PBS': LHCbGaudiRunTimeHandler,
'SGE': LHCbGaudiRunTimeHandler,
'Local': LHCbGaudiRunTimeHandler,
'Condor': LHCbGaudiRunTimeHandler,
'Remote': LHCbGaudiRunTimeHandler,
'Dirac': LHCbGaudiDiracRunTimeHandler
}
return backends
def available_apps():
global available_lhcb_apps
if available_lhcb_apps is None:
available_lhcb_apps = ["Gauss", "Boole", "Brunel",
"DaVinci", "Moore", "MooreOnline",
"Vetra", "Panoptes", "Erasmus",
"Alignment", "Noether", "Urania"]
return available_lhcb_apps
def available_packs(appname=None):
global available_lhcb_packs
if available_lhcb_packs is None:
available_lhcb_packs = {
'Gauss': 'Sim',
'Boole': 'Digi',
'Brunel': 'Rec',
'DaVinci': 'Phys',
'Moore': 'Hlt',
'MooreOnline': 'Hlt',
'Vetra': 'Tell1',
'Panoptes': 'Rich',
'Bender': 'Phys',
'Erasmus': '',
'Noether': '',
'Urania': 'PID',
'Alignment': 'Alignment/Escher'
}
if appname is None:
return available_lhcb_packs
else:
return available_lhcb_packs[appname]
def addNewLHCbapp(appname, use=''):
temp_apps = available_apps()
temp_packs = available_packs()
try:
assert isinstance(appname, str)
except AssertionError:
raise PluginError("Application name is not a string: %s" % str(appname))
if any(str(appname).lower() == val.lower() for val in temp_apps):
logger.warning("Error: %s is already in the list of supported apps, not adding" % appname)
return
global available_lhcb_apps
global available_lhcb_packs
available_lhcb_apps.append(str(appname))
available_lhcb_packs[str(appname)] = use
return
def available_versions(self, appname):
"""Provide a list of the available Gaudi application versions"""
from . import EnvironFunctions
return EnvironFunctions.available_versions(self, appname)
def guess_version(self, appname):
"""Guess the default Gaudi application version"""
from . import EnvironFunctions
return EnvironFunctions.guess_version(self, appname)
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
def lumi(xmlsummary):
'''given an XMLSummary object, will return the integrated luminosity'''
# print(xmlsummary.counter_dict()['lumiCounters']['IntegrateBeamCrossing/Luminosity'].value()[0],'+/-',xmlsummary.counter_dict()['lumiCounters']['IntegrateBeamCrossing/Luminosity'].value()[2])
lumiDict = dict(list(zip(xmlsummary.counter_dict()['lumiCounters']['IntegrateBeamCrossing/Luminosity'].attrib('format'),
xmlsummary.counter_dict()['lumiCounters'][
'IntegrateBeamCrossing/Luminosity'].value()
))
)
return '"%s +- %s"' % (lumiDict['Flag'], lumiDict['Flag2'])
def events(xmlsummary):
'''given an XMLSummary object, will return the number of events input/output'''
ad = xmlsummary.file_dict()
evts = {}
for type in ad.keys():
if type not in evts:
evts[type] = 0
for file in ad[type].keys():
if type == 'input' and ad[type][file].attrib('status') == 'mult':
logger.warning(
'Warning, processed file ' + ad[type][file].attrib('name') + 'multiple times')
if ad[type][file].attrib('GUID') == file:
continue
else:
evts[type] += ad[type][file].value()
return evts
def xmldatafiles(xmlsummary):
'''return a dictionary of the files the xmlsummary saw as input'''
returndict = {}
for file in xmlsummary.file_dict()['input'].values():
if file.attrib('status') in returndict:
returndict[file.attrib('status')].update([file.attrib('name')])
else:
returndict[file.attrib('status')] = set([file.attrib('name')])
return returndict
def xmldatanumbers(xmlsummary):
'''return a dictionary of the number of files the xmlsummary saw as input'''
returndict = {}
for file in xmlsummary.file_dict()['input'].values():
if file.attrib('status') in returndict:
returndict[file.attrib('status')] = returndict[
file.attrib('status')] + 1
else:
returndict[file.attrib('status')] = 1
return returndict
def xmlskippedfiles(xmlsummary):
'''get all skipped files from xml'''
filedict = xmldatafiles(xmlsummary)
skippedfiles = set()
for stat in ['none', 'fail']:
if stat in filedict:
skippedfiles.update(filedict[stat])
return skippedfiles
def METHOD_NAME():
activeItems = {'lumi': lumi,
'events': events,
'xmldatafiles': xmldatafiles,
'xmldatanumbers': xmldatanumbers,
'xmlskippedfiles': xmlskippedfiles
}
return activeItems
|
3,488 |
set request header
|
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from copy import copy
from frugal import _IS_PY2
# Header containing correlation id.
_CID_HEADER = "_cid"
# Header containing op id (uint64 as string).
_OPID_HEADER = "_opid"
# Header containing request timeout (milliseconds as string).
_TIMEOUT_HEADER = "_timeout"
_DEFAULT_TIMEOUT = 5 * 1000
# Global incrementing op id.
_OP_ID = 0
class FContext(object):
"""
FContext is the context for a Frugal message. Every RPC has an FContext,
which can be used to set request headers, response headers, and the request
timeout. The default timeout is five seconds. An FContext is also sent with
every publish message which is then received by subscribers.
In addition to headers, the FContext also contains a correlation ID which
can be used for distributed tracing purposes. A random correlation ID is
generated for each FContext if one is not provided.
As a best practice, the request headers of an inbound FContext should not
be modified, and outbound FContext instances should not be reused.
Instead, the inbound FContext should be cloned before each outbound call.
FContext also plays a key role in Frugal's multiplexing support. A unique,
per-request operation ID is set on every FContext before a request is made.
This operation ID is sent in the request and included in the response,
which is then used to correlate a response to a request. The operation ID
is an internal implementation detail and is not exposed to the user.
An FContext should belong to a single request for the lifetime of that
request. It can be reused once the request has completed, though they
should generally not be reused. This class is _not_ thread-safe.
"""
def __init__(self, correlation_id=None, timeout=_DEFAULT_TIMEOUT):
"""
Initialize FContext.
Args:
correlation_id: string identifier for distributed tracing purposes.
timeout: number of milliseconds before request times out.
"""
self._request_headers = {}
self._response_headers = {}
if not correlation_id:
correlation_id = self._generate_cid()
self._request_headers[_CID_HEADER] = correlation_id
self._request_headers[_TIMEOUT_HEADER] = str(timeout)
# Take the current op id and increment the counter
self._request_headers[_OPID_HEADER] = _get_next_op_id()
@property
def correlation_id(self):
"""
Return the correlation id for the FContext. This is used for
distributed tracing purposes.
"""
return self._request_headers.get(_CID_HEADER)
def _get_op_id(self):
"""
Return an int operation id for the FContext. This is a unique long
per operation. This is protected as operation ids are an internal
implementation detail.
"""
return int(self._request_headers.get(_OPID_HEADER))
def _set_op_id(self, op_id):
self._request_headers[_OPID_HEADER] = str(op_id)
def _set_response_op_id(self, op_id):
self._response_headers[_OPID_HEADER] = op_id
def get_request_headers(self):
"""
Returns request headers for this FConext.
"""
return copy(self._request_headers)
def get_request_header(self, key):
"""
Returns request header for the specified key from the request
headers dict.
"""
return self._request_headers.get(key)
def METHOD_NAME(self, key, value):
"""
Set a string key value pair in the request headers dictionary.
Return the same FContext to allow for call chaining. Changing the
op ID or correlation ID is disallowed.
Args:
key: string key to set in request headers
value: string value to set for the given key
Returns:
FContext
Throws:
TypeError: if user passes non-string for key or value.
"""
self._check_string(key)
self._check_string(value)
self._request_headers[key] = value
return self
def get_response_headers(self):
return copy(self._response_headers)
def get_response_header(self, key):
return self._response_headers.get(key)
def set_response_header(self, key, value):
"""
Set a string key value pair in the response headers dictionary.
Return the same FContext to allow for call chaining. Changing the
op ID or correlation ID is disallowed.
Args:
key: string key to set in response headers
value: string value to set for the given key
Returns:
FContext
Raises:
TypeError: if user passes non-string for key or value.
"""
self._check_string(key)
self._check_string(value)
self._response_headers[key] = value
return self
def get_timeout(self):
"""
Get the timeout for the FContext.
"""
return int(self._request_headers.get(_TIMEOUT_HEADER))
def set_timeout(self, timeout):
"""
Sets the timeout for the FContext.
Args:
timeout: number of milliseconds
"""
self._request_headers[_TIMEOUT_HEADER] = str(timeout)
@property
def timeout(self):
"""
Get the timeout for the FContext.
"""
return int(self._request_headers.get(_TIMEOUT_HEADER))
@timeout.setter
def timeout(self, timeout):
"""
Sets the timeout for the FContext.
Args:
timeout: number of milliseconds
"""
# TODO: check the type of timeout
self._request_headers[_TIMEOUT_HEADER] = str(timeout)
return self
def copy(self):
"""
Performs a deep copy of an FContext while handling opids correctly.
Returns:
A new instance of FContext with identical headers, with the
exception of _opid.
"""
copied = FContext()
copied._request_headers = self.get_request_headers()
copied._response_headers = self.get_response_headers()
copied._request_headers[_OPID_HEADER] = _get_next_op_id()
return copied
def _check_string(self, string):
if _IS_PY2 and not \
(isinstance(string, str) or isinstance(string, unicode)): # noqa: F821,E501
raise TypeError("Value should either be a string or unicode.")
if not _IS_PY2 and not \
(isinstance(string, str) or isinstance(string, bytes)):
raise TypeError("Value should be either a string or bytes.")
def _generate_cid(self):
return uuid.uuid4().hex
def _get_next_op_id():
global _OP_ID
_OP_ID += 1
return str(_OP_ID)
|
3,489 |
get length in bits
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
from plc4py.api.messages.PlcMessage import PlcMessage
from plc4py.spi.generation.ReadBuffer import ReadBuffer
from plc4py.spi.generation.WriteBuffer import WriteBuffer
from typing import List
import math
@dataclass
class ModbusPDUReadFileRecordResponseItem(PlcMessage):
reference_type: int
data: List[int]
def __post_init__(self):
super().__init__()
def serialize(self, write_buffer: WriteBuffer):
write_buffer.push_context("ModbusPDUReadFileRecordResponseItem")
# Implicit Field (data_length) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
data_length: int = int(len(self.data)) + int(1)
write_buffer.write_unsigned_byte(data_length, logical_name="dataLength")
# Simple Field (referenceType)
write_buffer.write_unsigned_byte(
self.reference_type, logical_name="referenceType"
)
# Array Field (data)
write_buffer.write_byte_array(self.data, logical_name="data")
write_buffer.pop_context("ModbusPDUReadFileRecordResponseItem")
def length_in_bytes(self) -> int:
return int(math.ceil(float(self.METHOD_NAME() / 8.0)))
def METHOD_NAME(self) -> int:
length_in_bits: int = 0
_value: ModbusPDUReadFileRecordResponseItem = self
# Implicit Field (dataLength)
length_in_bits += 8
# Simple field (referenceType)
length_in_bits += 8
# Array field
if self.data != None:
length_in_bits += 8 * len(self.data)
return length_in_bits
def static_parse(self, read_buffer: ReadBuffer, args):
return self.static_parse_context(read_buffer)
@staticmethod
def static_parse_context(read_buffer: ReadBuffer):
read_buffer.push_context("ModbusPDUReadFileRecordResponseItem")
data_length: int = read_implicit_field("dataLength", read_unsigned_short)
self.reference_type = read_simple_field("referenceType", read_unsigned_short)
self.data = read_buffer.read_byte_array("data", int(data_length - int(1)))
read_buffer.pop_context("ModbusPDUReadFileRecordResponseItem")
# Create the instance
_modbus_pdu_read_file_record_response_item: ModbusPDUReadFileRecordResponseItem = ModbusPDUReadFileRecordResponseItem(
reference_type, data
)
return _modbus_pdu_read_file_record_response_item
def equals(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusPDUReadFileRecordResponseItem):
return False
that: ModbusPDUReadFileRecordResponseItem = ModbusPDUReadFileRecordResponseItem(
o
)
return (
(self.reference_type == that.reference_type)
and (self.data == that.data)
and True
)
def hash_code(self) -> int:
return hash(self)
def __str__(self) -> str:
write_buffer_box_based: WriteBufferBoxBased = WriteBufferBoxBased(True, True)
try:
write_buffer_box_based.writeSerializable(self)
except SerializationException as e:
raise RuntimeException(e)
return "\n" + str(write_buffer_box_based.get_box()) + "\n"
|
3,490 |
test load postgres plugin
|
from asynctest import mock as async_mock, TestCase as AsyncTestCase
from .. import wallet_plugin as test_module
class TestWalletCrypto(AsyncTestCase):
def setUp(self):
test_module.LOADED = False
async def test_file_ext(self):
assert test_module.file_ext()
def METHOD_NAME(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=0),
init_storagetype=async_mock.MagicMock(return_value=0),
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
test_module.load_postgres_plugin(storage_config, storage_creds)
assert test_module.LOADED
def test_load_postgres_plugin_init_x_raise(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=2)
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(OSError) as context:
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=True
)
assert "unable to load postgres" in str(context.exception)
def test_load_postgres_plugin_init_x_exit(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=2)
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(SystemExit):
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=False
)
def test_load_postgres_plugin_config_x_raise(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=0),
init_storagetype=async_mock.MagicMock(return_value=2),
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(OSError) as context:
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=True
)
assert "unable to configure postgres" in str(context.exception)
def test_load_postgres_plugin_config_x_exit(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=0),
init_storagetype=async_mock.MagicMock(return_value=2),
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(SystemExit):
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=False
)
def test_load_postgres_plugin_bad_json_x_raise(self):
storage_config = '{"wallet_scheme":"MultiWalletSingleTable"}'
storage_creds = '"account":"test"'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=0),
init_storagetype=async_mock.MagicMock(return_value=2),
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(OSError) as context:
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=True
)
assert "Invalid stringified JSON input" in str(context.exception)
def test_load_postgres_plugin_bad_json_x_exit(self):
storage_config = '"wallet_scheme":"MultiWalletSingleTable"'
storage_creds = '{"account":"test"}'
mock_stg_lib = async_mock.MagicMock(
postgresstorage_init=async_mock.MagicMock(return_value=0),
init_storagetype=async_mock.MagicMock(return_value=2),
)
with async_mock.patch.object(
test_module.cdll, "LoadLibrary", async_mock.Mock()
) as mock_load:
mock_load.return_value = mock_stg_lib
with self.assertRaises(SystemExit):
test_module.load_postgres_plugin(
storage_config, storage_creds, raise_exc=False
)
|
3,491 |
main
|
# @file
# Split a file into two pieces at the request offset.
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import argparse
import os
import io
import shutil
import logging
import sys
import tempfile
parser = argparse.ArgumentParser(description='''
SplitFile creates two Binary files either in the same directory as the current working directory or in the specified directory.
''')
parser.add_argument("-f", "--filename", dest="inputfile",
required=True, help="The input file to split tool.")
parser.add_argument("-s", "--split", dest="position",
required=True, help="The number of bytes in the first file. The valid format are HEX, Decimal and Decimal[KMG].")
parser.add_argument("-p", "--prefix", dest="output",
help="The output folder.")
parser.add_argument("-o", "--firstfile", help="The first file name")
parser.add_argument("-t", "--secondfile", help="The second file name")
parser.add_argument("--version", action="version", version='%(prog)s Version 2.0',
help="Print debug information.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true",
help="Print debug information.")
group.add_argument("-q", "--quiet", action="store_true",
help="Disable all messages except fatal errors")
SizeDict = {
"K": 1024,
"M": 1024*1024,
"G": 1024*1024*1024
}
def GetPositionValue(position):
'''
Parse the string of the argument position and return a decimal number.
The valid position formats are
1. HEX
e.g. 0x1000 or 0X1000
2. Decimal
e.g. 100
3. Decimal[KMG]
e.g. 100K or 100M or 100G or 100k or 100m or 100g
'''
logger = logging.getLogger('Split')
PosVal = 0
header = position[:2].upper()
tailer = position[-1].upper()
try:
if tailer in SizeDict:
PosVal = int(position[:-1]) * SizeDict[tailer]
else:
if header == "0X":
PosVal = int(position, 16)
else:
PosVal = int(position)
except Exception as e:
logger.error(
"The parameter %s format is incorrect. The valid format is HEX, Decimal and Decimal[KMG]." % position)
raise(e)
return PosVal
def getFileSize(filename):
'''
Read the input file and return the file size.
'''
logger = logging.getLogger('Split')
length = 0
try:
with open(filename, "rb") as fin:
fin.seek(0, io.SEEK_END)
length = fin.tell()
except Exception as e:
logger.error("Access file failed: %s", filename)
raise(e)
return length
def getoutputfileabs(inputfile, prefix, outputfile,index):
inputfile = os.path.abspath(inputfile)
if outputfile is None:
if prefix is None:
outputfileabs = os.path.join(os.path.dirname(inputfile), "{}{}".format(os.path.basename(inputfile),index))
else:
if os.path.isabs(prefix):
outputfileabs = os.path.join(prefix, "{}{}".format(os.path.basename(inputfile),index))
else:
outputfileabs = os.path.join(os.getcwd(), prefix, "{}{}".format(os.path.basename(inputfile),index))
elif not os.path.isabs(outputfile):
if prefix is None:
outputfileabs = os.path.join(os.getcwd(), outputfile)
else:
if os.path.isabs(prefix):
outputfileabs = os.path.join(prefix, outputfile)
else:
outputfileabs = os.path.join(os.getcwd(), prefix, outputfile)
else:
outputfileabs = outputfile
return outputfileabs
def splitFile(inputfile, position, outputdir=None, outputfile1=None, outputfile2=None):
'''
Split the inputfile into outputfile1 and outputfile2 from the position.
'''
logger = logging.getLogger('Split')
if not os.path.exists(inputfile):
logger.error("File Not Found: %s" % inputfile)
raise(Exception)
if outputfile1 and outputfile2 and outputfile1 == outputfile2:
logger.error(
"The firstfile and the secondfile can't be the same: %s" % outputfile1)
raise(Exception)
# Create dir for the output files
try:
outputfile1 = getoutputfileabs(inputfile, outputdir, outputfile1,1)
outputfolder = os.path.dirname(outputfile1)
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
outputfile2 = getoutputfileabs(inputfile, outputdir, outputfile2,2)
outputfolder = os.path.dirname(outputfile2)
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
except Exception as e:
logger.error("Can't make dir: %s" % outputfolder)
raise(e)
if position <= 0:
if outputfile2 != os.path.abspath(inputfile):
shutil.copyfile(os.path.abspath(inputfile), outputfile2)
with open(outputfile1, "wb") as fout:
fout.write(b'')
else:
inputfilesize = getFileSize(inputfile)
if position >= inputfilesize:
if outputfile1 != os.path.abspath(inputfile):
shutil.copyfile(os.path.abspath(inputfile), outputfile1)
with open(outputfile2, "wb") as fout:
fout.write(b'')
else:
try:
tempdir = tempfile.mkdtemp()
tempfile1 = os.path.join(tempdir, "file1.bin")
tempfile2 = os.path.join(tempdir, "file2.bin")
with open(inputfile, "rb") as fin:
content1 = fin.read(position)
with open(tempfile1, "wb") as fout1:
fout1.write(content1)
content2 = fin.read(inputfilesize - position)
with open(tempfile2, "wb") as fout2:
fout2.write(content2)
shutil.copyfile(tempfile1, outputfile1)
shutil.copyfile(tempfile2, outputfile2)
except Exception as e:
logger.error("Split file failed")
raise(e)
finally:
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
def METHOD_NAME():
args = parser.parse_args()
status = 0
logger = logging.getLogger('Split')
if args.quiet:
logger.setLevel(logging.CRITICAL)
if args.verbose:
logger.setLevel(logging.DEBUG)
lh = logging.StreamHandler(sys.stdout)
lf = logging.Formatter("%(levelname)-8s: %(message)s")
lh.setFormatter(lf)
logger.addHandler(lh)
try:
position = GetPositionValue(args.position)
splitFile(args.inputfile, position, args.output,
args.firstfile, args.secondfile)
except Exception as e:
status = 1
return status
if __name__ == "__main__":
exit(METHOD_NAME())
|
3,492 |
test help
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import Optional
from unittest.mock import AsyncMock, patch
from click.testing import CliRunner
from later.unittest import TestCase
from openr.cli.clis import prefix_mgr
from openr.cli.tests import helpers
from openr.thrift.KvStore import thrift_types as openr_kvstore_types
from .fixtures import (
ADVERTISED_ROUTES_OUTPUT,
ADVERTISED_ROUTES_OUTPUT_DETAILED,
ADVERTISED_ROUTES_OUTPUT_JSON,
MOCKED_ADVERTISED_ROUTES,
MOCKED_INIT_EVENT_GOOD,
MOCKED_INIT_EVENT_TIMEOUT,
MOCKED_INIT_EVENT_WARNING,
)
BASE_MODULE = "openr.cli.clis.prefix_mgr"
BASE_CMD_MODULE = "openr.cli.commands.prefix_mgr"
class CliPrefixManagerTests(TestCase):
maxDiff: Optional[int] = None
def setUp(self) -> None:
self.runner = CliRunner()
def METHOD_NAME(self) -> None:
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrCli.prefixmgr,
["--help"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
@patch(helpers.COMMANDS_GET_OPENR_CTRL_CPP_CLIENT)
@patch(f"{BASE_CMD_MODULE}.PrefixMgrCmd._get_config")
def test_prefixmgr_advertised_routes(
self, mocked_openr_config: AsyncMock, mocked_openr_client: AsyncMock
) -> None:
# Set mock data for testing
mocked_returned_connection = helpers.get_enter_thrift_asyncmock(
mocked_openr_client
)
mocked_returned_connection.getAdvertisedRoutesFiltered.return_value = (
MOCKED_ADVERTISED_ROUTES
)
tag_map = {
"NOT_USED_TAG_NAME": {"tagSet": ["not_used_tag"]},
"TAG_NAME2": {"tagSet": ["65520:822"]},
}
mocked_openr_config.return_value = {
"area_policies": {"definitions": {"openrTag": {"objects": tag_map}}}
}
# Invoke with no flags & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["--no-detail", "all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT, invoked_return.stdout)
# Invoke with [--detail] & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["--detail", "all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT_DETAILED, invoked_return.stdout)
# Invoke with [--json] & verify output
invoked_return = self.runner.invoke(
prefix_mgr.AdvertisedRoutesCli.show,
["--json", "all"],
catch_exceptions=False,
)
self.assertEqual(0, invoked_return.exit_code)
self.assertEqual(ADVERTISED_ROUTES_OUTPUT_JSON, invoked_return.stdout)
@patch(helpers.COMMANDS_GET_OPENR_CTRL_CPP_CLIENT)
def test_prefixmgr_validate_init_event(
self, mocked_openr_client: AsyncMock
) -> None:
mocked_returned_connection = helpers.get_enter_thrift_asyncmock(
mocked_openr_client
)
mocked_returned_connection.getInitializationEvents.return_value = (
MOCKED_INIT_EVENT_GOOD
)
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrValidateCli.validate,
[],
catch_exceptions=False,
)
stdout_lines = invoked_return.stdout.split("\n")
for i, l in enumerate(stdout_lines):
print(i, l)
# The check result is printed on line 0 of stdout in this case
init_event_pass_state = stdout_lines[0].split(" ")[-1]
init_event_duration = stdout_lines[1].split(": ")[1]
pass_time = MOCKED_INIT_EVENT_GOOD[
openr_kvstore_types.InitializationEvent.PREFIX_DB_SYNCED
]
self.assertEqual(f"{pass_time}ms", init_event_duration)
self.assertEqual("PASS", init_event_pass_state)
# Test pass - duration results in warning
mocked_returned_connection.getInitializationEvents.return_value = (
MOCKED_INIT_EVENT_WARNING
)
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrValidateCli.validate,
[],
catch_exceptions=False,
)
stdout_lines = invoked_return.stdout.split("\n")
init_event_pass_state = stdout_lines[0].split(" ")[-1]
init_event_duration = stdout_lines[1].split(": ")[1]
self.assertEqual("PASS", init_event_pass_state)
pass_time = MOCKED_INIT_EVENT_WARNING[
openr_kvstore_types.InitializationEvent.PREFIX_DB_SYNCED
]
self.assertEqual(f"{pass_time}ms", init_event_duration)
# Test fail - duration results in timeout
mocked_returned_connection.getInitializationEvents.return_value = (
MOCKED_INIT_EVENT_TIMEOUT
)
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrValidateCli.validate,
[],
catch_exceptions=False,
)
stdout_lines = invoked_return.stdout.split("\n")
init_event_pass_state = stdout_lines[0].split(" ")[-1]
err_msg = stdout_lines[1]
init_event_duration = stdout_lines[2].split(": ")[1]
self.assertEqual("FAIL", init_event_pass_state)
self.assertEqual(
"PREFIX_DB_SYNCED event duration exceeds acceptable time limit (>300000ms)",
err_msg,
)
pass_time = MOCKED_INIT_EVENT_TIMEOUT[
openr_kvstore_types.InitializationEvent.PREFIX_DB_SYNCED
]
self.assertEqual(f"{pass_time}ms", init_event_duration)
# Test fail - PREFIX_DB_SYNCED is not published
mocked_returned_connection.getInitializationEvents.return_value = {}
invoked_return = self.runner.invoke(
prefix_mgr.PrefixMgrValidateCli.validate,
[],
catch_exceptions=False,
)
stdout_lines = invoked_return.stdout.split("\n")
init_event_pass_state = stdout_lines[0].split(" ")[-1]
err_msg = stdout_lines[1]
self.assertEqual("FAIL", init_event_pass_state)
self.assertEqual("PREFIX_DB_SYNCED event is not published", err_msg)
|
3,493 |
es query string
|
"""
Copyright(C) 2015-2020, Stamus Networks
Written by Eric Leblond <[email protected]>
This file is part of Scirius.
Scirius is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Scirius is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Scirius. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import psutil
from rest_framework import serializers
from django.conf import settings
from django.db.models import Count
if settings.SURICATA_UNIX_SOCKET:
try:
import suricata.sc as suricatasc
except:
settings.SURICATA_UNIX_SOCKET = None
class Info():
def status(self):
suri_running = 'danger'
if settings.SURICATA_UNIX_SOCKET:
sc = suricatasc.SuricataSC(settings.SURICATA_UNIX_SOCKET)
try:
sc.connect()
except:
return {'probe': 'danger'}
res = sc.send_command('uptime', None)
if res['return'] == 'OK':
suri_running = 'success'
sc.close()
else:
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['name'])
except psutil.NoSuchProcess:
pass
else:
if pinfo['name'] == 'Suricata-Main':
suri_running = 'success'
break
return {'probe': suri_running}
def disk(self):
return psutil.disk_usage('/')
def memory(self):
return psutil.virtual_memory()
def used_memory(self):
mem = psutil.virtual_memory()
return round(mem.used * 100. / mem.total, 1)
def cpu(self):
return psutil.cpu_percent(interval=0.2)
def get_es_template():
return 'rules/elasticsearch.html'
def has_extra_auth():
return False
def has_multitenant():
return False
def get_tenants(empty_queryset=False):
return []
def update_scirius_user_class(user, data):
pass
def help_links(djlink):
HELP_LINKS_TABLE = {
"suricata_edit": {"name": "Suricata setup", "base_url": "doc/suricata-ce.html", "anchor": "#setup"},
"suricata_update": {"name": "Updating Suricata ruleset", "base_url": "doc/suricata-ce.html", "anchor": "#updating-ruleset"},
}
if djlink in HELP_LINKS_TABLE:
return HELP_LINKS_TABLE[djlink]
return None
def get_user_actions_dict():
from rules.models import UserAction
return UserAction.get_user_actions_dict()
def get_hunt_filters():
from rules.models import get_hunt_filters
return get_hunt_filters()
def validate_rule_postprocessing(data, partial, serializer):
action = data.get('action')
if not partial and action not in ('suppress',):
raise serializers.ValidationError('Action "%s" is not supported.' % action)
serializer.validate_rule_postprocessing(data, partial)
PROCESSING_FILTER_FIELDS = set(('src_ip', 'dest_ip', 'alert.signature_id', 'alert.target.ip', 'alert.source.ip', 'msg', 'alert.signature', 'content'))
PROCESSING_THRESHOLD_FIELDS = set(('alert.signature_id', 'msg', 'alert.signature', 'content'))
def get_processing_actions_capabilities(fields):
return (('suppress', 'Suppress'), ('threshold', 'Threshold'))
def get_processing_filter_capabilities(fields, action):
if action == 'suppress':
return {
'fields': sorted(list(PROCESSING_FILTER_FIELDS & set(fields))),
'operators': ['equal'],
'supported_fields': ', '.join(PROCESSING_FILTER_FIELDS)
}
elif action == 'threshold':
return {
'fields': sorted(list(PROCESSING_THRESHOLD_FIELDS & set(fields))),
'operators': ['equal'],
'supported_fields': ', '.join(PROCESSING_THRESHOLD_FIELDS)
}
return {'fields': [], 'operators': ['equal'], 'supported_fields': ''}
def update_processing_filter_action_options_serializer(dictionary):
return dictionary
def update_processing_filter_action_options(rule_processing):
return rule_processing
def get_homepage_context():
context = {
'title': settings.APP_LONG_NAME,
'short_title': settings.APP_MEDIUM_NAME,
'common_long_name': settings.APP_LONG_NAME,
'product_long_name': settings.APP_LONG_NAME,
'content_lead': '%s is a web application for threat hunting and Suricata ruleset management of one sensor.' % settings.APP_MEDIUM_NAME,
'content_minor1': '%s is developed by Stamus Networks and is available under the GNU GPLv3 license.' % settings.APP_MEDIUM_NAME,
'content_minor2': 'Manage multiple rulesets and rules sources. Upload and manage custom rules and any data files. Handle thresholding and suppression to limit verbosity of noisy alerts. Get suricata performance statistics and information about rules activity.',
'content_minor3': 'Interact with Elasticsearch, Kibana and other interfaces such as EveBox.',
'admin_title': 'Ruleset setup and Suricata management',
'version': settings.APP_LONG_NAME + " v" + settings.SCIRIUS_VERSION,
'icon': False,
'nb_probes': 1
}
return context
def get_default_filter_sets():
from rules.models import FilterSet
fsets = FilterSet.get_default_filter_sets()
for idx, fset in enumerate(fsets):
fset['id'] = -idx
return fsets
def es_bool_clauses(request):
return {}
def METHOD_NAME(request):
return ''
def check_es_version(request, es_url):
from rules.es_graphs import ESVersion, ESError
try:
es_version = ESVersion(None, es_url).get()
except ESError as e:
return {'error': e.args[0]}
return {'es_is_good_version': True, 'es_version': es_version}
def update_context(request):
return {}
def custom_source_datatype(check_conf=False):
return tuple()
def update_source_content_type(source=None):
return []
def update_custom_source(source_path):
pass
def extract_custom_source(f, source_path):
pass
def get_sources():
from rules.models import Source
return Source.objects.annotate(
cats_count=Count('category', distinct=True),
rules_count=Count('category__rule')
)
def get_sources_with_extra_info():
return get_sources()
def update_settings(data):
pass
def extra_ruleset_form(request):
return None
def data_export():
pass
def update_policies(proc_filter):
pass
def delete_policies():
pass
def extract_policies(item):
return {}
def import_policies(filter_, method_dict=None, threat_dict=None):
pass
def changelog_ruleset(request, ruleset):
from rules.views import build_source_diff
from scirius.utils import scirius_render
url = 'rules/ruleset.html'
diff = ruleset.diff()
for key in diff:
cdiff = diff[key]
build_source_diff(request, cdiff)
diff[key] = cdiff
context = {'ruleset': ruleset, 'diff': diff, 'mode': 'changelog'}
return scirius_render(request, url, context)
def es_version_changed():
pass
def sn_loggers():
return {}
def use_stamuslogger():
return False
def login_redirection_url(request):
if request.user.has_perm('rules.events_view'):
return '/stamus/hunting/dashboards'
return '/rules'
def current_user_js(request):
return 'var current_user = %s;\n' % json.dumps(request.user.sciriususer.to_dict(json_compatible=True))
|
3,494 |
test ai examples example clean from http
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import unittest
from unittest import mock
import requests
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
from azext_ai_examples.custom import (call_aladdin_service, ping_aladdin_service,
clean_from_http_answer, get_generated_examples)
def create_valid_http_response():
mock_response = requests.Response()
mock_response.status_code = 200
data = [{
'title': 'RunTestAutomation',
'snippet': 'az find',
'source': 'crawler-example'
}, {
'title': 'az test',
'snippet': 'The title',
'source': 'crawler-crafted'
}]
mock_response._content = json.dumps(data)
return mock_response
def create_empty_http_response():
mock_response = requests.Response()
mock_response.status_code = 200
data = []
mock_response._content = json.dumps(data)
return mock_response
def create_failed_http_response():
mock_response = requests.Response()
mock_response.status_code = 500
data = []
mock_response._content = json.dumps(data)
return mock_response
class AiExamplesCustomCommandTest(unittest.TestCase):
# Test the Aladdin check connection command
def test_ai_examples_ping_aladdin_service_success(self):
mock_response = create_empty_http_response()
with mock.patch('requests.get', return_value=(mock_response)):
response = ping_aladdin_service()
self.assertEqual(200, response.status_code)
def test_ai_examples_ping_aladdin_service_failed(self):
mock_response = create_failed_http_response()
with mock.patch('requests.get', return_value=(mock_response)):
response = ping_aladdin_service()
self.assertEqual(500, response.status_code)
# Test the Aladdin examples
def test_ai_examples_call_aladdin_service(self):
mock_response = create_valid_http_response()
with mock.patch('requests.get', return_value=(mock_response)):
response = call_aladdin_service('RunTestAutomation')
self.assertEqual(200, response.status_code)
self.assertEqual(2, len(json.loads(response.content)))
def METHOD_NAME(self):
cleaned_responses = []
mock_response = create_valid_http_response()
for response in json.loads(mock_response.content):
cleaned_responses.append(clean_from_http_answer(response))
self.assertEqual('RunTestAutomation', cleaned_responses[0].short_summary)
self.assertEqual('az find\n', cleaned_responses[0].command)
self.assertEqual('The title', cleaned_responses[1].short_summary)
self.assertEqual('az test\n', cleaned_responses[1].command)
def test_ai_examples_get_generated_examples_full(self):
examples = []
mock_response = create_valid_http_response()
with mock.patch('requests.get', return_value=(mock_response)):
examples = get_generated_examples('RunTestAutomation')
self.assertEqual('RunTestAutomation', examples[0].short_summary)
self.assertEqual('az find\n', examples[0].command)
self.assertEqual('The title', examples[1].short_summary)
self.assertEqual('az test\n', examples[1].command)
def test_ai_examples_get_generated_examples_empty(self):
examples = []
mock_response = create_empty_http_response()
with mock.patch('requests.get', return_value=(mock_response)):
examples = get_generated_examples('RunTestAutomation')
self.assertEqual(0, len(examples))
|
3,495 |
activate
|
from __future__ import annotations
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
import pytest
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.package import Package
from poetry.plugins.application_plugin import ApplicationPlugin
from poetry.plugins.plugin import Plugin
from poetry.utils._compat import metadata
if TYPE_CHECKING:
from os import PathLike
from pathlib import Path
from cleo.io.io import IO
from cleo.testers.command_tester import CommandTester
from pytest_mock import MockerFixture
from poetry.plugins.base_plugin import BasePlugin
from poetry.poetry import Poetry
from poetry.repositories import Repository
from poetry.utils.env import Env
from tests.helpers import PoetryTestApplication
from tests.types import CommandTesterFactory
class DoNothingPlugin(Plugin):
def METHOD_NAME(self, poetry: Poetry, io: IO) -> None:
pass
class EntryPoint(metadata.EntryPoint):
def load(self) -> type[BasePlugin]:
if self.group == ApplicationPlugin.group:
return ApplicationPlugin
return DoNothingPlugin
@pytest.fixture()
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("self show plugins")
@pytest.fixture()
def plugin_package_requires_dist() -> list[str]:
return []
@pytest.fixture()
def plugin_package(plugin_package_requires_dist: list[str]) -> Package:
package = Package("poetry-plugin", "1.2.3")
for requirement in plugin_package_requires_dist:
package.add_dependency(Dependency.create_from_pep_508(requirement))
return package
@pytest.fixture()
def plugin_distro(plugin_package: Package, tmp_path: Path) -> metadata.Distribution:
class MockDistribution(metadata.Distribution):
def read_text(self, filename: str) -> str | None:
if filename == "METADATA":
return "\n".join(
[
f"Name: {plugin_package.name}",
f"Version: {plugin_package.version}",
*[
f"Requires-Dist: {dep.to_pep_508()}"
for dep in plugin_package.requires
],
]
)
return None
def locate_file(self, path: str | PathLike[str]) -> Path:
return tmp_path / path
return MockDistribution() # type: ignore[no-untyped-call]
@pytest.fixture
def entry_point_name() -> str:
return "poetry-plugin"
@pytest.fixture
def entry_point_values_by_group() -> dict[str, list[str]]:
return {}
@pytest.fixture
def entry_points(
entry_point_name: str,
entry_point_values_by_group: dict[str, list[str]],
plugin_distro: metadata.Distribution,
) -> Callable[..., list[metadata.EntryPoint]]:
by_group = {
key: [
EntryPoint( # type: ignore[no-untyped-call]
name=entry_point_name,
group=key,
value=value,
)._for( # type: ignore[attr-defined]
plugin_distro
)
for value in values
]
for key, values in entry_point_values_by_group.items()
}
def _entry_points(**params: Any) -> list[metadata.EntryPoint]:
group = params.get("group")
if group not in by_group:
return []
eps: list[metadata.EntryPoint] = by_group[group]
return eps
return _entry_points
@pytest.fixture(autouse=True)
def mock_metadata_entry_points(
plugin_package: Package,
plugin_distro: metadata.Distribution,
installed: Repository,
mocker: MockerFixture,
tmp_venv: Env,
entry_points: Callable[..., metadata.EntryPoint],
) -> None:
installed.add_package(plugin_package)
mocker.patch.object(
tmp_venv.site_packages, "find_distribution", return_value=plugin_distro
)
mocker.patch.object(metadata, "entry_points", entry_points)
@pytest.mark.parametrize("entry_point_name", ["poetry-plugin", "not-package-name"])
@pytest.mark.parametrize(
"entry_point_values_by_group",
[
{
ApplicationPlugin.group: ["FirstApplicationPlugin"],
Plugin.group: ["FirstPlugin"],
}
],
)
def test_show_displays_installed_plugins(
app: PoetryTestApplication,
tester: CommandTester,
) -> None:
tester.execute("")
expected = """
• poetry-plugin (1.2.3)
1 plugin and 1 application plugin
"""
assert tester.io.fetch_output() == expected
@pytest.mark.parametrize(
"entry_point_values_by_group",
[
{
ApplicationPlugin.group: [
"FirstApplicationPlugin",
"SecondApplicationPlugin",
],
Plugin.group: ["FirstPlugin", "SecondPlugin"],
}
],
)
def test_show_displays_installed_plugins_with_multiple_plugins(
app: PoetryTestApplication,
tester: CommandTester,
) -> None:
tester.execute("")
expected = """
• poetry-plugin (1.2.3)
2 plugins and 2 application plugins
"""
assert tester.io.fetch_output() == expected
@pytest.mark.parametrize(
"plugin_package_requires_dist", [["foo (>=1.2.3)", "bar (<4.5.6)"]]
)
@pytest.mark.parametrize(
"entry_point_values_by_group",
[
{
ApplicationPlugin.group: ["FirstApplicationPlugin"],
Plugin.group: ["FirstPlugin"],
}
],
)
def test_show_displays_installed_plugins_with_dependencies(
app: PoetryTestApplication,
tester: CommandTester,
) -> None:
tester.execute("")
expected = """
• poetry-plugin (1.2.3)
1 plugin and 1 application plugin
Dependencies
- foo (>=1.2.3)
- bar (<4.5.6)
"""
assert tester.io.fetch_output() == expected
|
3,496 |
port end
|
# sfputil.py
#
# Platform-specific SFP transceiver interface for SONiC
#
try:
import time
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
class SfpUtil(SfpUtilBase):
"""Platform-specific SfpUtil class"""
PORT_START = 0
PORT_END = 31
PORTS_IN_BLOCK = 32
QSFP_PORT_START = 0
QSFP_PORT_END = 31
_port_to_eeprom_mapping = {}
port_to_i2c_mapping = {
0: 22,
1: 23,
2: 24,
3: 25,
4: 26,
5: 27,
6: 28,
7: 29,
8: 30,
9: 31,
10: 32,
11: 33,
12: 34,
13: 35,
14: 36,
15: 37,
16: 6,
17: 7,
18: 8,
19: 9,
20: 10,
21: 11,
22: 12,
23: 13,
24: 14,
25: 15,
26: 16,
27: 17,
28: 18,
29: 19,
30: 20,
31: 21
}
@property
def port_start(self):
return self.PORT_START
@property
def METHOD_NAME(self):
return self.PORT_END
@property
def qsfp_port_start(self):
return self.QSFP_PORT_START
@property
def qsfp_port_end(self):
return self.QSFP_PORT_END
@property
def qsfp_ports(self):
return list(range(0, self.PORTS_IN_BLOCK + 1))
@property
def port_to_eeprom_mapping(self):
return self._port_to_eeprom_mapping
def __init__(self):
eeprom_path = "/sys/class/i2c-adapter/i2c-{0}/{0}-0050/eeprom"
for x in range(0, self.METHOD_NAME + 1):
port_eeprom_path = eeprom_path.format(self.port_to_i2c_mapping[x])
self.port_to_eeprom_mapping[x] = port_eeprom_path
SfpUtilBase.__init__(self)
def get_presence(self, port_num):
# Check for invalid port_num
if port_num < self.port_start or port_num > self.METHOD_NAME:
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/present")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = int(reg_file.readline().rstrip())
if reg_value == 0:
return True
return False
def get_low_power_mode(self, port_num):
# Check for invalid port_num
if port_num < self.port_start or port_num > self.METHOD_NAME:
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/lpmod")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
reg_value = int(reg_file.readline().rstrip())
if reg_value == 0:
return False
return True
def set_low_power_mode(self, port_num, lpmode):
# Check for invalid port_num
if port_num < self.port_start or port_num > self.METHOD_NAME:
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/lpmod", "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = int(reg_file.readline().rstrip())
# LPMode is active high; set or clear the bit accordingly
if lpmode is True:
reg_value = 1
else:
reg_value = 0
reg_file.write(hex(reg_value))
reg_file.close()
return True
def reset(self, port_num):
QSFP_RESET_REGISTER_DEVICE_FILE = "/sys/class/swps/port"+str(port_num)+"/reset"
# Check for invalid port_num
if port_num < self.port_start or port_num > self.METHOD_NAME:
return False
try:
reg_file = open(QSFP_RESET_REGISTER_DEVICE_FILE, "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = 0
reg_file.write(hex(reg_value))
reg_file.close()
# Sleep 2 second to allow it to settle
time.sleep(2)
# Flip the value back write back to the register to take port out of reset
try:
reg_file = open(QSFP_RESET_REGISTER_DEVICE_FILE, "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = 1
reg_file.write(hex(reg_value))
reg_file.close()
return True
def get_transceiver_change_event(self):
"""
TODO: This function need to be implemented
when decide to support monitoring SFP(Xcvrd)
on this platform.
"""
raise NotImplementedError
|
3,497 |
get train dicts
|
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from detectron2.structures import BoxMode
def METHOD_NAME(img_dir, bbx_train, kps_train, num_train):
dataset_dicts = []
for idx in range(0, num_train):
record = {}
filename = img_dir + "/" + str(idx) + ".jpg"
height = 480
width = 640
record["file_name"] = filename
record["height"] = height
record["width"] = width
kps = []
for i in range(len(kps_train[idx][0])):
kps.append(kps_train[idx][0][i])
kps.append(kps_train[idx][1][i])
kps.append(2) # visibility
objs = []
obj = {
"bbox": [bbx_train[idx][0], bbx_train[idx][1], bbx_train[idx][2], bbx_train[idx][3]],
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": kps, # x-y-visibility
"category_id": 0,
"iscrowd": 0
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def get_val_dicts(img_dir, bbx_val, kps_val, num_val):
dataset_dicts = []
for idx in range(0, num_val):
record = {}
filename = img_dir + "/" + str(idx) + ".jpg"
height = 480
width = 640
kps = []
for i in range(len(kps_val[idx][0])):
kps.append(kps_val[idx][0][i])
kps.append(kps_val[idx][1][i])
kps.append(2) # visibility
record["file_name"] = filename
record["height"] = height
record["width"] = width
objs = []
obj = {
"bbox": [bbx_val[idx][0], bbx_val[idx][1], bbx_val[idx][2], bbx_val[idx][3]],
"bbox_mode": BoxMode.XYXY_ABS,
"keypoints": kps, # x-y-visibility
"category_id": 0,
"iscrowd": 0
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def register_datasets(DatasetCatalog, MetadataCatalog, image_dir, object_name,
bbx_train, kps_train, bbx_val, kps_val):
num_train = len(bbx_train)
num_val = len(bbx_val)
num_kps = len(kps_train[0][0])
kps_names = []
for i in range(num_kps):
kps_names.append("p" + str(i + 1))
for d in ["train"]:
DatasetCatalog.register(object_name + "_" + d, lambda d=d: METHOD_NAME(
image_dir + "/" + object_name + "/images/" + d, bbx_train, kps_train, num_train))
MetadataCatalog.get(object_name + "_" + d).set(thing_classes=[object_name])
MetadataCatalog.get(object_name + "_" + d).set(keypoint_names=kps_names)
MetadataCatalog.get(object_name + "_" + d).set(keypoint_flip_map=[])
train_set = METHOD_NAME(image_dir + "/" + object_name + "/images/" + d, bbx_train, kps_train, num_train)
for d in ["val"]:
DatasetCatalog.register(object_name + "_" + d, lambda d=d: get_val_dicts(
image_dir + "/" + object_name + "/images/" + d, bbx_val, kps_val, num_val))
MetadataCatalog.get(object_name + "_" + d).set(thing_classes=[object_name])
MetadataCatalog.get(object_name + "_" + d).set(keypoint_names=kps_names)
MetadataCatalog.get(object_name + "_" + d).set(keypoint_flip_map=[])
val_set = get_val_dicts(image_dir + "/" + object_name + "/images/" + d, bbx_val, kps_val, num_val)
return MetadataCatalog.get(object_name + "_" + "train"), train_set, val_set
|
3,498 |
save index
|
import json
import random
import subprocess
from pathlib import Path
from typing import Set, Union
from librelingo_types.data_types import Course, PhraseIdentity
from librelingo_utils import audio_id
from librelingo_audios.functions import list_required_audios
def update_audios_for_course(
output_path: str, course_name: str, course: Course, settings
):
if not course.settings.audio_settings.enabled:
return
index_file_path = Path(output_path) / f"{course_name}.json"
phrases_with_existing_audios = _load_index_file(index_file_path)
# We want to go from the old state (the existing audios) to the new state
# (the phrases now in the course). So we determine which phrases are the
# same in both, which are new, and which are no longer needed.
new_phrase_set = {
_phrase_identity_info_from_text(p[1]) for p in list_required_audios(course)
}
old_phrase_set = {
_phrase_identity_info_from_index(p) for p in phrases_with_existing_audios
}
if settings.destructive:
phrases_to_keep = set()
else:
phrases_to_keep = new_phrase_set & old_phrase_set
phrases_to_fetch = new_phrase_set - phrases_to_keep
phrases_to_delete = old_phrase_set - phrases_to_keep
_delete_phrases(
phrases_to_delete, output_path, phrases_with_existing_audios, settings
)
kept_phrases_index_entries = _keep_phrases(
phrases_to_keep, phrases_with_existing_audios
)
fetched_phrases_index_entries = _fetch_phrases(
phrases_to_fetch, output_path, course, settings
)
result_index = kept_phrases_index_entries + fetched_phrases_index_entries
if not settings.dry_run:
METHOD_NAME(result_index, index_file_path)
def _load_index_file(file_path: Path):
if not file_path.is_file():
return []
with open(file_path, "r") as f:
return json.loads(f.read())
def _keep_phrases(phrases_to_keep: Union[Set, Set[PhraseIdentity]], existing_index):
return [
phrase
for phrase in existing_index
if _phrase_identity_info_from_index(phrase) in phrases_to_keep
]
def _fetch_phrases(
phrases: Set[PhraseIdentity], output_path: str, course: Course, settings
):
return [
_fetch_audio_for_phrase(phrase_identity, output_path, course, settings)
for phrase_identity in phrases
]
def _fetch_audio_for_phrase(
phrase_identity: PhraseIdentity, output_path: str, course: Course, settings
):
file_name = audio_id(course.target_language, phrase_identity.text)
destination_path = Path(output_path) / f"{file_name}.mp3"
# This is where more audio sources would be added with an if statement. For
# now there is only TTS.
return _generate_audio_with_tts(
phrase_identity, file_name, destination_path, course, settings
)
def _generate_audio_with_tts(
phrase_identity: PhraseIdentity,
file_name: str,
destination_path: Path,
course: Course,
settings,
):
tts_settings_list = course.settings.audio_settings.text_to_speech_settings_list
if tts_settings_list == []:
raise RuntimeError(
f"Cannot generate {destination_path} because there are no TTS settings configured"
)
chosen_tts_settings = random.choice(tts_settings_list)
if settings.dry_run:
print(
f"Would generate {destination_path} "
f"using {chosen_tts_settings.voice} {chosen_tts_settings.engine}"
)
else:
print(
f"Generating {destination_path} "
f"using {chosen_tts_settings.voice} {chosen_tts_settings.engine}"
)
# This is where more more TTS providers would be added with an if statement.
# For now there is only Polly.
tts_provider = "polly"
subprocess.run(
[
"aws",
tts_provider,
"synthesize-speech",
"--output-format",
"mp3",
"--voice-id",
chosen_tts_settings.voice,
"--engine",
chosen_tts_settings.engine,
"--text",
phrase_identity.text,
destination_path,
],
stdout=subprocess.DEVNULL,
)
return {
"id": file_name,
"text": phrase_identity.text,
"source": "TTS",
"license": course.license.full_name,
"ttsProvider": "Polly",
"ttsVoice": chosen_tts_settings.voice,
"ttsEngine": chosen_tts_settings.engine,
}
def _delete_phrases(
phrases: Set[PhraseIdentity], output_path: str, existing_index, settings
):
for phrase_index in existing_index:
if _phrase_identity_info_from_index(phrase_index) in phrases:
_delete_audio_for_phrase(phrase_index, output_path, settings)
def _delete_audio_for_phrase(index_entry, output_path: str, settings):
target_path = Path(output_path) / f"{index_entry['id']}.mp3"
if not target_path.is_file():
# It's already not there, for whatever reason
return
if settings.dry_run:
print("Would delete {target_path}")
else:
print(f"Deleting {target_path}")
target_path.unlink()
def METHOD_NAME(result_index: list, index_file_path: Path):
with open(index_file_path, "w", encoding="utf-8") as f:
json.dump(
sorted(result_index, key=lambda i: i["id"]), f, ensure_ascii=False, indent=4
)
def _phrase_identity_info_from_text(text):
return PhraseIdentity(text, "TTS")
def _phrase_identity_info_from_index(phrase_index_entry):
return PhraseIdentity(phrase_index_entry["text"], phrase_index_entry["source"])
|
3,499 |
tophub context
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from tvm.target import Target
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
tophub_context = self.METHOD_NAME(raw_targets)
with tophub_context:
self._lower(mod, raw_targets)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, raw_targets), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def METHOD_NAME(self, raw_targets):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(raw_targets)
else:
tophub_context = autotvm.utils.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`~tvm.runtime.Device`
The runtime device to run the code on.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
"""
def __init__(self, mod, device, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.device = device
self.target = target
self.executable = None
self.vm = None
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.executable = compile(self.mod, self.target)
self.vm = vm_rt.VirtualMachine(self.executable, self.device)
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.