id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
1,200 |
zip frames
|
from os import remove
from os.path import join
from shutil import copyfile, rmtree
from tempfile import mkdtemp
from threading import Event
from zipfile import ZipFile
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
from kivy.tests.common import GraphicUnitTest, ensure_web_server
class AsyncImageTestCase(GraphicUnitTest):
@classmethod
def setUpClass(cls):
from kivy import kivy_examples_dir
ensure_web_server(kivy_examples_dir)
def setUp(self):
from kivy.config import Config
self.maxfps = Config.getint('graphics', 'maxfps')
assert self.maxfps > 0
super(AsyncImageTestCase, self).setUp()
def METHOD_NAME(self, path):
with ZipFile(path) as zipf:
return len(zipf.namelist())
def wait_for_event_or_timeout(self, event):
timeout = 30 * self.maxfps
while timeout and not event.is_set():
self.advance_frames(1)
timeout -= 1
def load_zipimage(self, source, frames):
# load ZIP with images named: 000.png, 001.png, ...
from kivy.uix.image import AsyncImage
event = Event()
image = AsyncImage(anim_delay=0.0333333333333333)
# bind to 'on_load' because there are various
# steps where the image is (re)loaded, but
# the event is triggered only at the end
image.bind(on_load=lambda *args, **kwargs: event.set())
image.source = source
self.wait_for_event_or_timeout(event)
self.render(image)
proxyimg = image._coreimage
self.assertTrue(proxyimg.anim_available)
self.assertEqual(len(proxyimg.image.textures), frames)
return image
def test_remote_zipsequence(self):
# cube ZIP has 63 PNGs used for animation
zip_cube = (
'http://localhost:8000/widgets/'
'sequenced_images/data/images/cube.zip'
)
# ref Loader._load_urllib
tempf, headers = urlretrieve(zip_cube)
zip_pngs = self.METHOD_NAME(tempf)
remove(tempf)
image = self.load_zipimage(zip_cube, zip_pngs)
# pure delay * fps isn't enough and
# just +1 isn't either (index collisions)
self.assertTrue(self.check_sequence_frames(
image._coreimage,
int(image._coreimage.anim_delay * self.maxfps + 3)
))
def test_local_zipsequence(self):
# cube ZIP has 63 PNGs used for animation
from kivy import kivy_examples_dir
zip_cube = join(
kivy_examples_dir, 'widgets', 'sequenced_images',
'data', 'images', 'cube.zip'
)
zip_pngs = self.METHOD_NAME(zip_cube)
image = self.load_zipimage(zip_cube, zip_pngs)
# pure delay * fps isn't enough and
# just +1 isn't either (index collisions)
self.assertTrue(self.check_sequence_frames(
image._coreimage,
int(image._coreimage.anim_delay * self.maxfps + 3)
))
def check_sequence_frames(self, img, frames, slides=5):
# check whether it really changes the images
# in the anim_delay interval
old = None
while slides:
# different frames, sequence is changing
self.assertNotEqual(img.anim_index, old)
old = img.anim_index
self.advance_frames(frames)
slides -= 1
return True
def test_reload_asyncimage(self):
from kivy.resources import resource_find
from kivy.uix.image import AsyncImage
temp_dir = mkdtemp()
event = Event()
image = AsyncImage()
image.bind(on_load=lambda *args, **kwargs: event.set())
fn = resource_find('data/logo/kivy-icon-16.png')
source = join(temp_dir, 'source.png')
copyfile(fn, source)
event.clear()
image.source = source
self.wait_for_event_or_timeout(event)
self.render(image, framecount=2)
self.assertEqual(image.texture_size, [16, 16])
remove(source)
fn = resource_find('data/logo/kivy-icon-32.png')
copyfile(fn, source)
event.clear()
image.reload()
self.wait_for_event_or_timeout(event)
self.render(image, framecount=2)
self.assertEqual(image.texture_size, [32, 32])
remove(source)
rmtree(temp_dir)
if __name__ == '__main__':
import unittest
unittest.main()
|
1,201 |
get empty
|
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from dateutil import parser
import cvat.apps.dataset_manager as dm
from cvat.apps.analytics_report.models import (
BinaryOperatorType,
GranularityChoice,
TransformOperationType,
ViewChoice,
)
from cvat.apps.analytics_report.report.primary_metrics.base import PrimaryMetricBase
from cvat.apps.engine.models import SourceType
class JobAnnotationSpeed(PrimaryMetricBase):
_title = "Annotation speed (objects per hour)"
_description = "Metric shows the annotation speed in objects per hour."
_default_view = ViewChoice.HISTOGRAM
_key = "annotation_speed"
# Raw SQL queries are used to execute ClickHouse queries, as there is no ORM available here
_query = "SELECT sum(JSONExtractUInt(payload, 'working_time')) / 1000 / 3600 as wt FROM events WHERE job_id={job_id:UInt64} AND timestamp >= {start_datetime:DateTime64} AND timestamp < {end_datetime:DateTime64}"
_granularity = GranularityChoice.DAY
_is_filterable_by_date = False
_transformations = [
{
"name": "annotation_speed",
TransformOperationType.BINARY: {
"left": "object_count",
"operator": BinaryOperatorType.DIVISION,
"right": "working_time",
},
},
]
def calculate(self):
def get_tags_count(annotations):
return sum(1 for t in annotations["tags"] if t["source"] != SourceType.FILE)
def get_shapes_count(annotations):
return sum(1 for s in annotations["shapes"] if s["source"] != SourceType.FILE)
def get_track_count(annotations):
count = 0
for track in annotations["tracks"]:
if track["source"] == SourceType.FILE:
continue
if len(track["shapes"]) == 1:
count += self._db_obj.segment.stop_frame - track["shapes"][0]["frame"] + 1
for prev_shape, cur_shape in zip(track["shapes"], track["shapes"][1:]):
if prev_shape["outside"] is not True:
count += cur_shape["frame"] - prev_shape["frame"]
return count
def get_default():
return {
"data_series": {
"object_count": [],
"working_time": [],
}
}
# Calculate object count
annotations = dm.task.get_job_data(self._db_obj.id)
object_count = 0
object_count += get_tags_count(annotations)
object_count += get_shapes_count(annotations)
object_count += get_track_count(annotations)
timestamp = self._get_utc_now()
timestamp_str = timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
report = self._db_obj.analytics_report
if report is None:
statistics = get_default()
else:
statistics = next(
filter(lambda s: s["name"] == "annotation_speed", report.statistics), get_default()
)
data_series = statistics["data_series"]
last_entry_count = 0
start_datetime = self._db_obj.created_date
if data_series["object_count"]:
last_entry = data_series["object_count"][-1]
last_entry_timestamp = parser.parse(last_entry["datetime"])
if last_entry_timestamp.date() == timestamp.date():
data_series["object_count"] = data_series["object_count"][:-1]
data_series["working_time"] = data_series["working_time"][:-1]
if len(data_series["object_count"]):
last_last_entry = data_series["object_count"][-1]
start_datetime = parser.parse(last_last_entry["datetime"])
last_entry_count = last_last_entry["value"]
else:
last_entry_count = last_entry["value"]
start_datetime = parser.parse(last_entry["datetime"])
data_series["object_count"].append(
{
"value": object_count - last_entry_count,
"datetime": timestamp_str,
}
)
# Calculate working time
parameters = {
"job_id": self._db_obj.id,
"start_datetime": start_datetime,
"end_datetime": self._get_utc_now(),
}
result = self._make_clickhouse_query(parameters)
value = 0
if (wt := next(iter(result.result_rows))[0]) is not None:
value = wt
data_series["working_time"].append(
{
"value": value,
"datetime": timestamp_str,
}
)
return data_series
def METHOD_NAME(self):
return {
"object_count": [],
"working_time": [],
}
|
1,202 |
input
|
"""Preprocess a C source file using gcc and convert the result into
a token stream
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
"""
__docformat__ = "restructuredtext"
import os
import re
import sys
import subprocess
from ctypesgen.parser import pplexer, lex
from ctypesgen.parser.lex import LexError
IS_WINDOWS = sys.platform.startswith("win")
IS_MAC = sys.platform.startswith("darwin")
# --------------------------------------------------------------------------
# Lexers
# --------------------------------------------------------------------------
class PreprocessorLexer(lex.Lexer):
def __init__(self):
lex.Lexer.__init__(self)
self.filename = "<input>"
self.in_define = False
def METHOD_NAME(self, data, filename=None):
if filename:
self.filename = filename
self.lasttoken = None
lex.Lexer.METHOD_NAME(self, data)
def token(self):
result = lex.Lexer.token(self)
if result:
self.lasttoken = result.type
result.filename = self.filename
else:
self.lasttoken = None
return result
# --------------------------------------------------------------------------
# Grammars
# --------------------------------------------------------------------------
class PreprocessorParser(object):
def __init__(self, options, cparser):
self.defines = [
"__extension__=",
"__const=const",
"__asm__(x)=",
"__asm(x)=",
"CTYPESGEN=1",
]
# On macOS, explicitly add these defines to keep from getting syntax
# errors in the macOS standard headers.
if IS_MAC:
self.defines += [
"_Nullable=",
"_Nonnull=",
]
self.matches = []
self.output = []
optimize = options.optimize_lexer if hasattr(options, "optimize_lexer") else False
self.lexer = lex.lex(
cls=PreprocessorLexer,
optimize=optimize,
lextab="lextab",
outputdir=os.path.dirname(__file__),
module=pplexer,
)
self.options = options
self.cparser = cparser # An instance of CParser
def parse(self, filename):
"""Parse a file and save its output"""
cmd = self.options.cpp
# Legacy behaviour is to implicitly undefine '__GNUC__'
# Continue doing this, unless user explicitly requested to allow it.
if self.options.allow_gnu_c:
# New behaviour. No implicit override.
# (currently NOT enabled by default yet)
pass
else:
# Legacy behaviour. Add an implicit override.
# (currently the default)
cmd += " -U __GNUC__"
cmd += " -dD"
for undefine in self.options.cpp_undefines:
cmd += " -U%s" % undefine
# This fixes Issue #6 where OS X 10.6+ adds a C extension that breaks
# the parser. Blocks shouldn't be needed for ctypesgen support anyway.
if IS_MAC:
cmd += " -U __BLOCKS__"
for path in self.options.include_search_paths:
cmd += ' -I"%s"' % path
for define in self.defines + self.options.cpp_defines:
cmd += ' "-D%s"' % define
cmd += ' "' + filename + '"'
self.cparser.handle_status(cmd)
if IS_WINDOWS:
cmd = ["sh.exe", "-c", cmd]
pp = subprocess.Popen(
cmd,
shell=True,
universal_newlines=False, # binary
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
ppout_data, pperr_data = pp.communicate()
try:
ppout = ppout_data.decode("utf-8")
except UnicodeError:
if IS_MAC:
ppout = ppout_data.decode("utf-8", errors="replace")
else:
raise UnicodeError
pperr = pperr_data.decode("utf-8")
if IS_WINDOWS:
ppout = ppout.replace("\r\n", "\n")
pperr = pperr.replace("\r\n", "\n")
for line in pperr.split("\n"):
if line:
self.cparser.handle_pp_error(line)
# We separate lines to two groups: directives and c-source. Note that
# #pragma directives actually belong to the source category for this.
# This is necessary because some source files intermix preprocessor
# directives with source--this is not tolerated by ctypesgen's single
# grammar.
# We put all the source lines first, then all the #define lines.
source_lines = []
define_lines = []
first_token_reg = re.compile(r"^#\s*([^ ]+)($|\s)")
for line in ppout.split("\n"):
line += "\n"
search = first_token_reg.match(line)
hash_token = search.group(1) if search else None
if (not hash_token) or hash_token == "pragma":
source_lines.append(line)
define_lines.append("\n")
elif hash_token.isdigit():
# Line number information has to go with both groups
source_lines.append(line)
define_lines.append(line)
else: # hash_token in ("define", "undef"):
source_lines.append("\n")
define_lines.append(line)
text = "".join(source_lines + define_lines)
if self.options.save_preprocessed_headers:
self.cparser.handle_status(
"Saving preprocessed headers to %s." % self.options.save_preprocessed_headers
)
try:
with open(self.options.save_preprocessed_headers, "w") as f:
f.write(text)
except IOError:
self.cparser.handle_error("Couldn't save headers.")
self.lexer.METHOD_NAME(text)
self.output = []
try:
while True:
token = self.lexer.token()
if token is not None:
self.output.append(token)
else:
break
except LexError as e:
self.cparser.handle_error("{}; {}".format(e, e.text.partition("\n")[0]), filename, 0)
|
1,203 |
test debian
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import unittest
from avocado.utils import distro
from sos.policies import Policy, import_policy
from sos.policies.distros import LinuxPolicy
from sos.policies.package_managers import PackageManager, MultiPackageManager
from sos.policies.package_managers.rpm import RpmPackageManager
from sos.policies.package_managers.dpkg import DpkgPackageManager
from sos.report.plugins import (Plugin, IndependentPlugin,
RedHatPlugin, DebianPlugin)
class FauxPolicy(Policy):
distro = "Faux"
class FauxLinuxPolicy(LinuxPolicy):
distro = "FauxLinux"
@classmethod
def set_forbidden_paths(cls):
return ['/etc/secret']
class FauxPlugin(Plugin, IndependentPlugin):
pass
class FauxRedHatPlugin(Plugin, RedHatPlugin):
pass
class FauxDebianPlugin(Plugin, DebianPlugin):
pass
class PolicyTests(unittest.TestCase):
def test_independent_only(self):
p = FauxPolicy()
p.valid_subclasses = []
self.assertTrue(p.validate_plugin(FauxPlugin))
def test_forbidden_paths_building(self):
p = FauxLinuxPolicy(probe_runtime=False)
self.assertTrue('*.pyc' in p.forbidden_paths)
self.assertTrue('/etc/passwd' in p.forbidden_paths)
self.assertTrue('/etc/secret' in p.forbidden_paths)
def test_redhat(self):
p = FauxPolicy()
p.valid_subclasses = [RedHatPlugin]
self.assertTrue(p.validate_plugin(FauxRedHatPlugin))
def METHOD_NAME(self):
p = FauxPolicy()
p.valid_subclasses = [DebianPlugin]
self.assertTrue(p.validate_plugin(FauxDebianPlugin))
def test_fails(self):
p = FauxPolicy()
p.valid_subclasses = []
self.assertFalse(p.validate_plugin(FauxDebianPlugin))
def test_can_import(self):
self.assertTrue(import_policy('redhat') is not None)
def test_cant_import(self):
self.assertTrue(import_policy('notreal') is None)
class PackageManagerTests(unittest.TestCase):
def setUp(self):
self.pm = PackageManager()
def test_default_all_pkgs(self):
self.assertEquals(self.pm.packages, {})
def test_default_all_pkgs_by_name(self):
self.assertEquals(self.pm.all_pkgs_by_name('doesntmatter'), [])
def test_default_all_pkgs_by_name_regex(self):
self.assertEquals(
self.pm.all_pkgs_by_name_regex('.*doesntmatter$'), [])
def test_default_pkg_by_name(self):
self.assertEquals(self.pm.pkg_by_name('foo'), None)
class RpmPackageManagerTests(unittest.TestCase):
def setUp(self):
if distro.detect().name not in ['fedora', 'centos', 'rhel']:
self.skipTest('Not running on an RPM distribution')
self.pm = RpmPackageManager()
def test_load_all_packages(self):
self.assertNotEquals(self.pm.packages, {})
def test_pkg_is_formatted(self):
kpkg = self.pm.pkg_by_name('coreutils')
self.assertIsInstance(kpkg, dict)
self.assertIsInstance(kpkg['version'], list)
self.assertEquals(kpkg['pkg_manager'], 'rpm')
class DpkgPackageManagerTests(unittest.TestCase):
def setUp(self):
if distro.detect().name not in ['Ubuntu', 'debian']:
self.skipTest('Not running on a dpkg distribution')
self.pm = DpkgPackageManager()
def test_load_all_packages(self):
self.assertNotEquals(self.pm.packages, {})
def test_pkg_is_formatted(self):
kpkg = self.pm.pkg_by_name('coreutils')
self.assertIsInstance(kpkg, dict)
self.assertIsInstance(kpkg['version'], list)
self.assertEquals(kpkg['pkg_manager'], 'dpkg')
class MultiPackageManagerTests(unittest.TestCase):
def setUp(self):
self.pm = MultiPackageManager(primary=RpmPackageManager,
fallbacks=[DpkgPackageManager])
def test_load_all_packages(self):
self.assertNotEquals(self.pm.packages, {})
def test_pkg_is_formatted(self):
kpkg = self.pm.pkg_by_name('coreutils')
self.assertIsInstance(kpkg, dict)
self.assertIsInstance(kpkg['version'], list)
_local = distro.detect().name
if _local in ['Ubuntu', 'debian']:
self.assertEquals(kpkg['pkg_manager'], 'dpkg')
else:
self.assertEquals(kpkg['pkg_manager'], 'rpm')
if __name__ == "__main__":
unittest.main()
# vim: set et ts=4 sw=4 :
|
1,204 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetVariableResult',
'AwaitableGetVariableResult',
'get_variable',
'get_variable_output',
]
@pulumi.output_type
class GetVariableResult:
"""
A collection of values returned by getVariable.
"""
def __init__(__self__, id=None, METHOD_NAME=None, parent=None, project=None, text=None, update_time=None, value=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if parent and not isinstance(parent, str):
raise TypeError("Expected argument 'parent' to be a str")
pulumi.set(__self__, "parent", parent)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if text and not isinstance(text, str):
raise TypeError("Expected argument 'text' to be a str")
pulumi.set(__self__, "text", text)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> str:
return pulumi.get(self, "parent")
@property
@pulumi.getter
def project(self) -> Optional[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def text(self) -> str:
return pulumi.get(self, "text")
@property
@pulumi.getter(METHOD_NAME="updateTime")
def update_time(self) -> str:
return pulumi.get(self, "update_time")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
class AwaitableGetVariableResult(GetVariableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVariableResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
parent=self.parent,
project=self.project,
text=self.text,
update_time=self.update_time,
value=self.value)
def get_variable(METHOD_NAME: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVariableResult:
"""
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
run_service = gcp.runtimeconfig.get_variable(name="prod-variables/hostname",
parent="my-service")
```
:param str name: The name of the Runtime Configurator configuration.
:param str parent: The name of the RuntimeConfig resource containing this variable.
- - -
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['name'] = METHOD_NAME
__args__['parent'] = parent
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:runtimeconfig/getVariable:getVariable', __args__, opts=opts, typ=GetVariableResult).value
return AwaitableGetVariableResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
parent=pulumi.get(__ret__, 'parent'),
project=pulumi.get(__ret__, 'project'),
text=pulumi.get(__ret__, 'text'),
update_time=pulumi.get(__ret__, 'update_time'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(get_variable)
def get_variable_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVariableResult]:
"""
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
run_service = gcp.runtimeconfig.get_variable(name="prod-variables/hostname",
parent="my-service")
```
:param str name: The name of the Runtime Configurator configuration.
:param str parent: The name of the RuntimeConfig resource containing this variable.
- - -
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
...
|
1,205 |
method not found
|
# -*- test-case-name: twisted.web.test.test_soap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SOAP support for twisted.web.
Requires SOAPpy 0.10.1 or later.
Maintainer: Itamar Shtull-Trauring
Future plans:
SOAPContext support of some kind.
Pluggable method lookup policies.
"""
# SOAPpy
import SOAPpy # type: ignore[import]
from twisted.internet import defer
# twisted imports
from twisted.web import client, resource, server
class SOAPPublisher(resource.Resource):
"""Publish SOAP methods.
By default, publish methods beginning with 'soap_'. If the method
has an attribute 'useKeywords', it well get the arguments passed
as keyword args.
"""
isLeaf = 1
# override to change the encoding used for responses
encoding = "UTF-8"
def lookupFunction(self, functionName):
"""Lookup published SOAP function.
Override in subclasses. Default behaviour - publish methods
starting with soap_.
@return: callable or None if not found.
"""
return getattr(self, "soap_%s" % functionName, None)
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
methodName, args, kwargs = p._name, p._aslist, p._asdict
# deal with changes in SOAPpy 0.11
if callable(args):
args = args()
if callable(kwargs):
kwargs = kwargs()
function = self.lookupFunction(methodName)
if not function:
self.METHOD_NAME(request, methodName)
return server.NOT_DONE_YET
else:
if hasattr(function, "useKeywords"):
keywords = {}
for k, v in kwargs.items():
keywords[str(k)] = v
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args)
d.addCallback(self._gotResult, request, methodName)
d.addErrback(self._gotError, request, methodName)
return server.NOT_DONE_YET
def METHOD_NAME(self, request, methodName):
response = SOAPpy.buildSOAP(
SOAPpy.faultType(
"%s:Client" % SOAPpy.NS.ENV_T, "Method %s not found" % methodName
),
encoding=self.encoding,
)
self._sendResponse(request, response, status=500)
def _gotResult(self, result, request, methodName):
if not isinstance(result, SOAPpy.voidType):
result = {"Result": result}
response = SOAPpy.buildSOAP(
kw={"%sResponse" % methodName: result}, encoding=self.encoding
)
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName):
e = failure.value
if isinstance(e, SOAPpy.faultType):
fault = e
else:
fault = SOAPpy.faultType(
"%s:Server" % SOAPpy.NS.ENV_T, "Method %s failed." % methodName
)
response = SOAPpy.buildSOAP(fault, encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _sendResponse(self, request, response, status=200):
request.setResponseCode(status)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.write(response)
request.finish()
class Proxy:
"""A Proxy for making remote SOAP calls.
Pass the URL of the remote SOAP server to the constructor.
Use proxy.callRemote('foobar', 1, 2) to call remote method
'foobar' with args 1 and 2, proxy.callRemote('foobar', x=1)
will call foobar with named argument 'x'.
"""
# at some point this should have encoding etc. kwargs
def __init__(self, url, namespace=None, header=None):
self.url = url
self.namespace = namespace
self.header = header
def _cbGotResult(self, result):
result = SOAPpy.parseSOAPRPC(result)
if hasattr(result, "Result"):
return result.Result
elif len(result) == 1:
## SOAPpy 0.11.6 wraps the return results in a containing structure.
## This check added to make Proxy behaviour emulate SOAPProxy, which
## flattens the structure by default.
## This behaviour is OK because even singleton lists are wrapped in
## another singleton structType, which is almost always useless.
return result[0]
else:
return result
def callRemote(self, method, *args, **kwargs):
payload = SOAPpy.buildSOAP(
args=args,
kw=kwargs,
method=method,
header=self.header,
namespace=self.namespace,
)
return client.getPage(
self.url,
postdata=payload,
method="POST",
headers={"content-type": "text/xml", "SOAPAction": method},
).addCallback(self._cbGotResult)
|
1,206 |
test function import quantize fx
|
# Owner(s): ["oncall: quantization"]
from .common import AOMigrationTestCase
class TestAOMigrationQuantizationFx(AOMigrationTestCase):
def METHOD_NAME(self):
function_list = [
'_check_is_graph_module',
'_swap_ff_with_fxff',
'_fuse_fx',
'QuantizationTracer',
'_prepare_fx',
'_prepare_standalone_module_fx',
'fuse_fx',
'Scope',
'ScopeContextManager',
'prepare_fx',
'prepare_qat_fx',
'_convert_fx',
'convert_fx',
'_convert_standalone_module_fx',
]
self._test_function_import('quantize_fx', function_list)
def test_function_import_fx(self):
function_list = [
'prepare',
'convert',
'fuse',
]
self._test_function_import('fx', function_list)
def test_function_import_fx_graph_module(self):
function_list = [
'FusedGraphModule',
'ObservedGraphModule',
'_is_observed_module',
'ObservedStandaloneGraphModule',
'_is_observed_standalone_module',
'QuantizedGraphModule'
]
self._test_function_import('fx.graph_module', function_list)
def test_function_import_fx_pattern_utils(self):
function_list = [
'QuantizeHandler',
'_register_fusion_pattern',
'get_default_fusion_patterns',
'_register_quant_pattern',
'get_default_quant_patterns',
'get_default_output_activation_post_process_map'
]
self._test_function_import('fx.pattern_utils', function_list)
def test_function_import_fx_equalize(self):
function_list = [
'reshape_scale',
'_InputEqualizationObserver',
'_WeightEqualizationObserver',
'calculate_equalization_scale',
'EqualizationQConfig',
'input_equalization_observer',
'weight_equalization_observer',
'default_equalization_qconfig',
'fused_module_supports_equalization',
'nn_module_supports_equalization',
'node_supports_equalization',
'is_equalization_observer',
'get_op_node_and_weight_eq_obs',
'maybe_get_weight_eq_obs_node',
'maybe_get_next_input_eq_obs',
'maybe_get_next_equalization_scale',
'scale_input_observer',
'scale_weight_node',
'scale_weight_functional',
'clear_weight_quant_obs_node',
'remove_node',
'update_obs_for_equalization',
'convert_eq_obs',
'_convert_equalization_ref',
'get_layer_sqnr_dict',
'get_equalization_qconfig_dict'
]
self._test_function_import('fx._equalize', function_list)
def test_function_import_fx_quantization_patterns(self):
function_list = [
'QuantizeHandler',
'BinaryOpQuantizeHandler',
'CatQuantizeHandler',
'ConvReluQuantizeHandler',
'LinearReLUQuantizeHandler',
'BatchNormQuantizeHandler',
'EmbeddingQuantizeHandler',
'RNNDynamicQuantizeHandler',
'DefaultNodeQuantizeHandler',
'FixedQParamsOpQuantizeHandler',
'CopyNodeQuantizeHandler',
'CustomModuleQuantizeHandler',
'GeneralTensorShapeOpQuantizeHandler',
'StandaloneModuleQuantizeHandler'
]
self._test_function_import(
'fx.quantization_patterns',
function_list,
new_package_name='fx.quantize_handler',
)
def test_function_import_fx_match_utils(self):
function_list = [
'_MatchResult',
'MatchAllNode',
'_is_match',
'_find_matches'
]
self._test_function_import('fx.match_utils', function_list)
def test_function_import_fx_prepare(self):
function_list = [
'prepare'
]
self._test_function_import('fx.prepare', function_list)
def test_function_import_fx_convert(self):
function_list = [
'convert'
]
self._test_function_import('fx.convert', function_list)
def test_function_import_fx_fuse(self):
function_list = ['fuse']
self._test_function_import('fx.fuse', function_list)
def test_function_import_fx_fusion_patterns(self):
function_list = [
'FuseHandler',
'DefaultFuseHandler'
]
self._test_function_import(
'fx.fusion_patterns',
function_list,
new_package_name='fx.fuse_handler',
)
# we removed matching test for torch.quantization.fx.quantization_types
# old: torch.quantization.fx.quantization_types
# new: torch.ao.quantization.utils
# both are valid, but we'll deprecate the old path in the future
def test_function_import_fx_utils(self):
function_list = [
'get_custom_module_class_keys',
'get_linear_prepack_op_for_dtype',
'get_qconv_prepack_op',
'get_new_attr_name_with_prefix',
'graph_module_from_producer_nodes',
'assert_and_get_unique_device',
'create_getattr_from_value',
'all_node_args_have_no_tensors',
'get_non_observable_arg_indexes_and_types',
'maybe_get_next_module'
]
self._test_function_import('fx.utils', function_list)
|
1,207 |
convert spiketrains
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import quantities
import numpy as np
# needed as dealing with quantities
# pylint: disable=c-extension-no-member
def convert_analog_signal(signal_array, time_unit=quantities.ms):
"""
Converts part of a NEO object into told spynnaker7 format.
:param ~neo.core.AnalogSignal signal_array: Extended Quantities object
:param quantities.unitquantity.UnitTime time_unit:
Data time unit for time index
:rtype: ~numpy.ndarray
"""
ids = signal_array.annotations["channel_names"]
xs = range(len(ids))
if time_unit == quantities.ms:
times = signal_array.times.magnitude
else:
times = signal_array.times.rescale(time_unit).magnitude
all_times = np.tile(times, len(xs))
neurons = np.repeat(ids, len(times))
values = np.concatenate([signal_array.magnitude[:, x] for x in xs])
return np.column_stack((neurons, all_times, values))
def convert_data(data, name, run=0):
"""
Converts the data into a numpy array in the format ID, time, value.
:param ~neo.core.Block data: Data as returned by a getData() call
:param str name: Name of the data to be extracted.
Same values as used in getData()
:param int run: Zero based index of the run to extract data for
:rtype: ~numpy.ndarray
"""
if len(data.segments) <= run:
raise ValueError(
f"Data only contains {len(data.segments)} so unable to run {run}. "
"Note run is the zero based index.")
if name == "all":
raise ValueError("Unable to convert all data in one go "
"as result would be comparing apples and oranges.")
if name == "spikes":
return convert_spikes(data, run)
return convert_analog_signal(
data.segments[run].filter(name=name)[0])
def convert_data_list(data, name, runs=None):
"""
Converts the data into a list of numpy arrays in the format ID, time,
value.
:param ~neo.core.Block data: Data as returned by a getData() call
:param str name: Name of the data to be extracted.
Same values as used in getData()
:param runs: List of Zero based index of the run to extract data for.
Or `None` to extract all runs
:type runs: list(int) or None
:rtype: list(~numpy.ndarray)
"""
results = []
if runs is None:
runs = range(len(data.segments))
for run in runs:
results.append(convert_data(data, name, run=run))
return results
def convert_v_list(data, runs=None):
"""
Converts the voltage into a list numpy array one per segment (all
runs) in the format ID, time, value.
:param ~neo.core.Block data: The data to convert; it must have V data in it
:param runs: List of Zero based index of the run to extract data for.
Or `None` to extract all runs
:type runs: list(int) or None
:rtype: list(~numpy.ndarray)
"""
return convert_data_list(data, "v", runs=runs)
def convert_gsyn_exc_list(data, runs=None):
"""
Converts the gsyn_exc into a list numpy array one per segment (all
runs) in the format ID, time, value.
:param ~neo.core.Block data:
The data to convert; it must have Gsyn_exc data in it
:param runs: List of Zero based index of the run to extract data for.
Or `None` to extract all runs
:type runs: list(int) or None
:rtype: list(~numpy.ndarray)
"""
return convert_data_list(data, "gsyn_exc", runs=runs)
def convert_gsyn_inh_list(data, runs=None):
"""
Converts the gsyn_inh into a list numpy array one per segment (all
runs) in the format ID, time, value.
:param ~neo.core.Block data:
The data to convert; it must have Gsyn_inh data in it
:param runs: List of Zero based index of the run to extract data for.
Or `None` to extract all runs
:type runs: list(int) or None
:rtype: list(~numpy.ndarray)
"""
return convert_data_list(data, "gsyn_inh", runs=runs)
def convert_gsyn(gsyn_exc, gsyn_inh):
"""
Converts two neo objects into the spynnaker7 format.
.. note::
It is acceptable for both neo parameters to be the same object
:param ~neo.core.Block gsyn_exc: neo with gsyn_exc data
:param ~neo.core.Block gsyn_inh: neo with gsyn_exc data
:rtype: ~numpy.ndarray
"""
exc = gsyn_exc.segments[0].filter(name='gsyn_exc')[0]
inh = gsyn_inh.segments[0].filter(name='gsyn_inh')[0]
ids = exc.annotations["channel_names"]
ids2 = inh.annotations["channel_names"]
if len(ids) != len(ids2):
raise ValueError(
f"Found {len(ids)} neuron IDs in gsyn_exc "
f"but {len(ids2)} in gsyn_inh")
if not np.allclose(ids, ids2):
raise ValueError("IDs in gsyn_exc and gsyn_inh do not match")
times = exc.times.rescale(quantities.ms)
times2 = inh.times.rescale(quantities.ms)
if len(times) != len(times2):
raise ValueError(
f"Found {len(times)} times in gsyn_exc "
f"but {len(times2)} in gsyn_inh")
if not np.allclose(times, times2):
raise ValueError("times in gsyn_exc and gsyn_inh do not match")
all_times = np.tile(times, len(ids))
neurons = np.repeat(ids, len(times))
idlist = list(range(len(ids)))
exc_np = np.concatenate([exc[:, x] for x in idlist])
inh_np = np.concatenate([inh[:, x] for x in idlist])
return np.column_stack((neurons, all_times, exc_np, inh_np))
def METHOD_NAME(spiketrains):
"""
Converts a list of spiketrains into spynnaker7 format.
:param list(~neo.core.SpikeTrain) spiketrains: List of SpikeTrains
:rtype: ~numpy.ndarray
"""
if len(spiketrains) == 0:
return np.empty(shape=(0, 2))
neurons = np.concatenate([
np.repeat(x.annotations['source_index'], len(x))
for x in spiketrains])
spikes = np.concatenate([x.magnitude for x in spiketrains])
return np.column_stack((neurons, spikes))
def convert_spikes(neo, run=0):
"""
Extracts the spikes for run one from a Neo Object.
:param ~neo.core.Block neo: neo Object including Spike Data
:param int run: Zero based index of the run to extract data for
:rtype: ~numpy.ndarray
"""
if len(neo.segments) <= run:
raise ValueError(
f"Data only contains {len(neo.segments)} so unable to run {run}. "
"Note run is the zero based index.")
return METHOD_NAME(neo.segments[run].spiketrains)
def count_spiketrains(spiketrains):
"""
Help function to count the number of spikes in a list of spiketrains.
:param list(~neo.core.SpikeTrain) spiketrains: List of SpikeTrains
:return: Total number of spikes in all the spiketrains
:rtype: int
"""
return sum(map(len, spiketrains))
def count_spikes(neo):
"""
Help function to count the number of spikes in a list of spiketrains.
Only counts run 0
:param ~neo.core.Block neo: Neo Object which has spikes in it
:return: The number of spikes in the first segment
"""
return count_spiketrains(neo.segments[0].spiketrains)
|
1,208 |
default costing method
|
#################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
"""
This module contains a zero-order representation of a UV-AOP unit
operation.
"""
import pyomo.environ as pyo
from pyomo.environ import units as pyunits, Var
from idaes.core import declare_process_block_class
from watertap.unit_models.zero_order.uv_zo import UVZOData
from watertap.unit_models.zero_order.aop_addition_zo import AOPAdditionMixin
# Some more information about this module
__author__ = "Adam Atia"
@declare_process_block_class("UVAOPZO")
class UVAOPZOData(UVZOData, AOPAdditionMixin):
"""
Zero-Order model for a UV-AOP unit operation.
"""
CONFIG = UVZOData.CONFIG()
def build(self):
super().build()
self._tech_type = "uv_aop"
self.oxidant_dose = Var(
self.flowsheet().time, units=pyunits.mg / pyunits.L, doc="Oxidant dosage"
)
self.chemical_flow_mass = Var(
self.flowsheet().time,
units=pyunits.kg / pyunits.s,
bounds=(0, None),
doc="Mass flow rate of oxidant solution",
)
self._fixed_perf_vars.append(self.oxidant_dose)
@self.Constraint(self.flowsheet().time, doc="Chemical mass flow constraint")
def chemical_flow_mass_constraint(b, t):
return b.chemical_flow_mass[t] == pyunits.convert(
b.oxidant_dose[t] * b.properties_in[t].flow_vol,
to_units=pyunits.kg / pyunits.s,
)
self._perf_var_dict["Oxidant Dosage (mg/L)"] = self.oxidant_dose
self._perf_var_dict["Oxidant Flow (kg/s)"] = self.chemical_flow_mass
@property
def METHOD_NAME(self):
return self.cost_uv_aop
@staticmethod
def cost_uv_aop(blk):
t0 = blk.flowsheet().time.first()
# Add cost variable and constraint
blk.capital_cost = pyo.Var(
initialize=1,
units=blk.config.flowsheet_costing_block.base_currency,
bounds=(0, None),
doc="Capital cost of unit operation",
)
# Get parameter dict from database
parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(
blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype
)
# Get costing parameter sub-block for this technology
A, B, C, D = blk.unit_model._get_tech_parameters(
blk,
parameter_dict,
blk.unit_model.config.process_subtype,
[
"reactor_cost",
"lamp_cost",
"aop_capital_a_parameter",
"aop_capital_b_parameter",
],
)
expr = blk.unit_model._get_uv_capital_cost(blk, A, B)
expr += blk.unit_model._get_aop_capital_cost(blk, C, D)
# Determine if a costing factor is required
blk.unit_model._add_cost_factor(
blk, parameter_dict["capital_cost"]["cost_factor"]
)
blk.capital_cost_constraint = pyo.Constraint(
expr=blk.capital_cost == blk.cost_factor * expr
)
# Register flows
blk.config.flowsheet_costing_block.cost_flow(
blk.unit_model.electricity[t0], "electricity"
)
# TODO: Check whether chemical flow cost was accounted for originally
# and if should be in case study verification
blk.config.flowsheet_costing_block.cost_flow(
blk.unit_model.chemical_flow_mass[t0], "hydrogen_peroxide"
)
|
1,209 |
get n params
|
# ***************************************************************************
# * Copyright (c) 2020 three_d *
# * Copyright (c) 2020 Eliud Cabrera Castillo <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides various functions to work with arrays.
One of the functions is used to create a `twisted array` object.
See `draftobjects/twistedarray.py`.
This array was developed in order to build a `twisted bridge` object.
See https://forum.freecad.org/viewtopic.php?f=23&t=49617
"""
## @package geo_arrays
# \ingroup draftgeoutils
# \brief Provides various functions to work with arrays.
import lazy_loader.lazy_loader as lz
import FreeCAD as App
from draftutils.messages import _msg
# Delay import of module until first use because it is heavy
Part = lz.LazyLoader("Part", globals(), "Part")
## \addtogroup draftgeoutils
# @{
def print_places(places, title="Places"):
"""Print a vector with a title."""
_msg(12*"-")
_msg(title)
for i in places:
_msg("{}".format(i))
def get_init_values(path, count=6):
"""Set values needed to create the array."""
norm = App.Vector(0, 0, 1)
# Currently this works with a sketch that has a single edge.
# Here we need a more general function to extract all edges from a shape,
# so that the array uses all of them.
edge = path.Shape.Edges[0]
edge_length = edge.Length
step = edge_length / (count - 1)
inc = 360 / (count - 1)
return norm, edge, step, inc
def METHOD_NAME(edge, number, step, norm):
"""Get the parameters needed in each iteration."""
parameter = edge.getParameterByLength(number * step)
v0 = edge.valueAt(parameter)
tan = edge.tangentAt(parameter).normalize()
binorm = tan.cross(norm).normalize()
rot = App.Rotation(binorm, tan, norm)
return v0, tan, rot
def get_twisted_placements(path, count=15, rot_factor=0.25):
"""Get the placements of the twisted array elements."""
(norm, edge,
step, inc) = get_init_values(path, count)
increment = 0
places = []
params = []
for number in range(count):
v0, tan, rot = METHOD_NAME(edge, number, step, norm)
angle = increment * rot_factor
place = App.Placement(v0, tan, angle)
place.Rotation = place.Rotation * rot
places.append(place)
params.append((v0, tan, angle, rot))
increment += inc
return places, params
def get_twisted_array_shape(base, path, count=15, rot_factor=0.25):
"""Get the twisted array shape as a compound."""
places, _ = get_twisted_placements(path,
count=count,
rot_factor=rot_factor)
shapes, _ = create_frames(base, places)
shape = Part.makeCompound(shapes)
return shape
def get_twisted_bridge_shape(base, path, count=15, rot_factor=0.25,
width=100,
thickness=10):
"""Get the twisted bridge array shape as a compound."""
compound = list()
places, _ = get_twisted_placements(path,
count=count,
rot_factor=rot_factor)
# print_places(places)
shapes, profiles = create_frames(base, places)
compound.extend(shapes)
tunnel = make_tunnel(path, profiles)
compound.append(tunnel)
# size = base.Shape.Wires[-1].BoundBox.XLength * 0.9
# size = int(size)
# thickness = size/12.0
walkway = make_walkway(path, width, thickness)
compound.append(walkway)
shape = Part.makeCompound(compound)
return shape
def create_frames(obj, places):
"""Create the frames from the placements."""
len_wires = len(obj.Shape.Wires)
frames = list()
profiles = list()
# _msg("{}: {} wires".format(obj.Label, len_wires))
# _msg("places: {}".format(len(places)))
for i in places:
frame = obj.Shape.copy()
frame.Placement = i
frames.append(frame)
profiles.append(frame.Wires[len_wires - 1])
return frames, profiles
def make_tunnel(path, profiles):
"""Create the tunnel shape."""
edge = path.Shape.Edges[0]
wire = Part.Wire(edge)
sweep = wire.makePipeShell(profiles)
return sweep
def make_walkway(path, width=100, thickness=10):
"""Construct the walkway of the twisted bridge array."""
spine = path.Shape.Edges[0]
half_size = width/2
offset_height = thickness
norm1 = App.Vector(0, 0, 1)
v1 = spine.valueAt(1)
tan1 = spine.tangentAt(1).normalize()
binorm1 = tan1.cross(norm1)
place = App.Placement()
place.Rotation = App.Rotation(binorm1, norm1, tan1)
place.move(v1 - App.Vector(0, 0, 3 * offset_height))
plane = Part.makePlane(width, thickness,
App.Vector(-half_size, -2 * offset_height, 0))
face = Part.Face(plane)
face.Placement = place.multiply(face.Placement)
wire = Part.Wire(spine)
sweep = wire.makePipe(face)
return sweep
## @}
|
1,210 |
test gather purloviatest pgd
|
import pytest
from ark.gathering import gather_dcsc_properties
from ark.types import PrimalDinoCharacter, PrimalDinoStatusComponent, PrimalGameData
from ue.gathering import gather_properties
from ue.hierarchy import inherits_from
from ue.proxy import UEProxyStructure
from .common import * # noqa: F401,F403 # needed to pick up all fixtures
from .common import DEINO_CHR, DODO_AB_CHR, DODO_CHR, PTM_DCSC_CONFLICT_CHR, TEST_PGD_CLS, TROODON_CHR, X_DRAGON_CHR, ScanLoadFn
# pylint: disable=singleton-comparison
@pytest.mark.requires_game
def METHOD_NAME(scan_and_load):
export = scan_and_load(TEST_PGD_CLS)
pgd: PrimalGameData = gather_properties(export)
assert isinstance(pgd, UEProxyStructure)
assert isinstance(pgd, PrimalGameData)
assert str(pgd.ModName[0]) == 'PurloviaTEST'
assert str(pgd.ModDescription[0]) == 'Test mod used for Purlovia'
@pytest.mark.requires_game
def test_gather_dodo(scan_and_load):
dodo = scan_and_load(DODO_CHR)
dodo_chr: PrimalDinoCharacter = gather_properties(dodo)
assert isinstance(dodo_chr, UEProxyStructure)
assert isinstance(dodo_chr, PrimalDinoCharacter)
assert str(dodo_chr.DescriptiveName[0]) == 'Dodo'
@pytest.mark.requires_game
def test_gather_ab_dodo(scan_and_load):
dodo_ab = scan_and_load(DODO_AB_CHR)
assert inherits_from(dodo_ab, DODO_CHR)
dodo_ab_chr: PrimalDinoCharacter = gather_properties(dodo_ab)
assert isinstance(dodo_ab_chr, UEProxyStructure)
assert isinstance(dodo_ab_chr, PrimalDinoCharacter)
assert str(dodo_ab_chr.DescriptiveName[0]) == 'Aberrant Dodo'
@pytest.mark.requires_game
def test_gather_dodo_dcsc(scan_and_load):
dodo = scan_and_load(DODO_CHR)
dodo_dcsc = gather_dcsc_properties(dodo)
assert isinstance(dodo_dcsc, UEProxyStructure)
assert isinstance(dodo_dcsc, PrimalDinoStatusComponent)
assert dodo_dcsc.MaxStatusValues[0] == 40 # only in Dodo chr
assert dodo_dcsc.MaxStatusValues[3] == 150 # only in DCSC asset
assert dodo_dcsc.MaxStatusValues[7] == 50 # in DCSC, then overridden by Dodo
@pytest.mark.requires_game
def test_gather_troodon_dcsc(scan_and_load):
chr_export = scan_and_load(TROODON_CHR)
props = gather_dcsc_properties(chr_export)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.MaxStatusValues[0] == 200 # only in Troodon DCSC asset
assert props.MaxStatusValues[4] == 200 # in Troodon chr asset
assert props.MaxStatusValues[7] == 140 # in DCSC, overridden in Troodon DCSC
@pytest.mark.requires_game
def test_gather_troodon_dcsc_alt(scan_and_load: ScanLoadFn):
chr_export = scan_and_load(TROODON_CHR)
props = gather_dcsc_properties(chr_export, alt=True)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.MaxStatusValues[0] == 200 # only in Troodon DCSC asset
assert props.MaxStatusValues[4] == 100 # was 200 in Troodon chr asset, skipped due to alt=True
assert props.MaxStatusValues[7] == 140 # in DCSC, overridden in Troodon DCSC
@pytest.mark.requires_game
def test_gather_deino(scan_and_load: ScanLoadFn):
# Deino has a species-specific DCSC with a lower priority than the one it inherits
chr_export = scan_and_load(DEINO_CHR)
props = gather_dcsc_properties(chr_export)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.MaxStatusValues[0] == 200 # from Raptor DCSC because Deino DCSC priority is -1
assert props.MaxStatusValues[1] == 150 # from Raptor DCSC because Deino DCSC priority is -1
assert props.MaxStatusValues[3] == 150 # from default DCSC
@pytest.mark.requires_game
def test_gather_dragon_boss(scan_and_load: ScanLoadFn):
# DragonBoss has two DCSCs, one with a higher priority
chr_export = scan_and_load(X_DRAGON_CHR)
props = gather_dcsc_properties(chr_export)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.bCanSuffocate[0] == False
@pytest.mark.requires_game
def test_gather_x_dragon(scan_and_load: ScanLoadFn):
# X-Dragon inherits the same two DCSCs from DragonBoss
chr_export = scan_and_load(X_DRAGON_CHR)
props = gather_dcsc_properties(chr_export)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.bCanSuffocate[0] == False
@pytest.mark.requires_game
def test_gather_dcsc_conflict(scan_and_load: ScanLoadFn):
# Species inherits from Quetz but doesn't use that DCSC
# Used to verify that DCSCs shouldn't be combined if no override exists for a property
chr_export = scan_and_load(PTM_DCSC_CONFLICT_CHR)
props = gather_dcsc_properties(chr_export)
assert isinstance(props, UEProxyStructure)
assert isinstance(props, PrimalDinoStatusComponent)
assert props.MaxStatusValues[0] == 100 # from PTM_DCSC and not DCSC_Quetz (1200)
assert props.MaxStatusValues[1] == 100 # from PTM_DCSC and not DCSC_Quetz (800)
assert props.MaxStatusValues[2] == 100 # from PTM_DCSC and not DCSC_Quetz (1850)
assert props.MaxStatusValues[4] == 100 # from PTM_DCSC and not DCSC_Quetz (1200)
assert props.TamedBaseHealthMultiplier[0] == 1 # from PTM_DCSC and not DCSC_Quetz (0.85)
|
1,211 |
test new module success
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import sys
from os.path import abspath, dirname
from types import ModuleType
# Module under test
import bokeh.application.handlers.code_runner as bahc # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class TestCodeRunner:
# Public methods ----------------------------------------------------------
def test_init(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
assert cr.failed is False
assert cr.doc is None
assert cr.error is None
assert cr.error_detail is None
assert cr.ran is False
assert cr.source == "# test"
assert cr.path == "path"
def test_syntax_error_init(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
assert cr.failed is True
assert cr.error is not None
assert cr.error_detail is not None
assert "Invalid syntax in" in cr.error
def METHOD_NAME(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
assert m.__dict__['__package__'] is None
def test_new_module_initpy(self) -> None:
cr = bahc.CodeRunner("# test", "/foo/__init__.py", [])
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'].endswith("__init__.py")
assert m.__dict__['__package__'] == m.__dict__['__name__']
def test_new_module_package(self) -> None:
cr = bahc.CodeRunner("# test", "/foo/__init__.py", [])
package = cr.new_module()
cr = bahc.CodeRunner("# test", "path", [], package=package)
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
assert m.__dict__['__package__'] == package.__dict__["__name__"]
def test_new_module_resets_run_errors(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
cr._failed = True
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
def test_new_module_returns_None_for_permanent_errors(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
assert cr.failed is True
m = cr.new_module()
assert m is None
def test_reset_run_errors(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
cr._failed = True
cr._error = "error"
cr._error_detail = "detail"
cr.reset_run_errors()
assert cr.failed is False
assert cr.error is None
assert cr.error_detail is None
def test_reset_run_errors_leaves_permanent_errors(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
cr._failed = True
cr.reset_run_errors()
assert cr.failed is True
assert cr.error is not None
assert cr.error_detail is not None
def test_run_sets_ran(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert not cr.ran
cr.run(m, lambda: None)
assert cr.ran
def test_run_runs_post_check(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert not cr.ran
result = {}
def post_check():
result['ran'] = True
cr.run(m, post_check)
assert cr.ran
assert result == dict(ran=True)
def test_run_fixups_argv(self) -> None:
cr = bahc.CodeRunner("import sys; argv = list(sys.argv)", "path", ["foo", "10"])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert m.__dict__['argv'] == ["path", "foo", "10"]
def test_run_fixups_path(self) -> None:
cr = bahc.CodeRunner("import sys; path = list(sys.path)", "/dir/to/path", ["foo", "10"])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert m.__dict__['path'][0] == dirname("/dir/to/path")
assert m.__dict__['path'][1:] == sys.path
def test_run_restores_cwd(self) -> None:
old_cwd = os.getcwd()
cr = bahc.CodeRunner("import os; os.chdir('/')", "path", ["foo", "10"])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert os.getcwd() == old_cwd
def test_run_restores_argv(self) -> None:
old_argv = list(sys.argv)
cr = bahc.CodeRunner("# test", "path", ["foo", "10"])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert sys.argv == old_argv
def test_run_restores_path(self) -> None:
old_path = list(sys.path)
cr = bahc.CodeRunner("# test", "path", ["foo", "10"])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert sys.path == old_path
def test_doc(self) -> None:
cr = bahc.CodeRunner("'''some docstring\n\nfoo bar'''", "path", [])
assert cr.failed is False
assert cr.doc == "some docstring\n\nfoo bar"
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
1,212 |
render
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# Standard Library
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import logging
#
# You should have received a copy of the GNU Affero General Public License
import uuid
from itertools import chain
# Django
from django.forms import fields
from django.forms.widgets import (
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
Select,
SelectMultiple,
TextInput,
)
from django.utils.encoding import force_str
from django.utils.html import (
conditional_escape,
escape,
)
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
logger = logging.getLogger(__name__)
#
# Date and time related fields
#
class Html5DateInput(DateInput):
"""
Custom Input class that is rendered with an HTML5 type="date"
"""
template_name = 'forms/html5_date.html'
input_type = 'date'
def get_context(self, name, value, attrs):
"""Pass the value as a date to the template. This is necessary because
django's default behaviour is to convert it to a string, where it can't
be formatted anymore."""
context = super(Html5DateInput, self).get_context(name, value, attrs)
context['widget']['orig_value'] = value
return context
class Html5FormDateField(fields.DateField):
"""
HTML5 form date field
"""
widget = Html5DateInput
class Html5TimeInput(TextInput):
"""
Custom Input class that is rendered with an HTML5 type="time"
This is specially useful in mobile devices and not available
with older versions of django.
"""
input_type = 'time'
class Html5FormTimeField(fields.TimeField):
"""
HTML5 form time field
"""
widget = Html5TimeInput
#
# Number related fields
#
class Html5NumberInput(TextInput):
"""
Custom Input class that is rendered with an HTML5 type="number"
This is specially useful in mobile devices and not available
with older versions of django.
"""
input_type = 'number'
#
# Others
#
class ExerciseAjaxSelect(SelectMultiple):
"""
Custom widget that allows to select exercises from an autocompleter
This is basically a modified MultipleSelect widget
"""
def METHOD_NAME(self, name, value, attrs=None, choices=(), renderer=None):
if value is None:
value = []
output = [
'<div>', '<input type="text" id="exercise-search" class="form-control">', '</div>',
'<div id="exercise-search-log">'
]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</div>')
return mark_safe('\n'.join(output))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_str(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_str(option_value)
if option_value in selected_choices:
return """
<div id="a%(div_id)s" class="ajax-exercise-select">
<a href="#">
<img src="/static/images/icons/status-off.svg"
width="14"
height="14"
alt="Delete">
</a> %(value)s
<input type="hidden" name="exercises" value="%(id)s">
</div>
""" % {
'value': conditional_escape(force_str(option_label)),
'id': escape(option_value),
'div_id': uuid.uuid4()
}
else:
return ''
class CheckboxChoiceInputTranslated(CheckboxInput):
"""
Overwritten CheckboxInput
This only translated the text for the select widgets
"""
input_type = 'checkbox'
def __init__(self, name, value, attrs, choice, index):
choice = (choice[0], _(choice[1]))
super(CheckboxChoiceInputTranslated, self).__init__(name, value, attrs, choice, index)
class CheckboxChoiceInputTranslatedOriginal(CheckboxInput):
"""
Overwritten CheckboxInput
This only translated the text for the select widgets, showing the original
string as well.
"""
input_type = 'checkbox'
def __init__(self, name, value, attrs, choice, index):
if _(choice[1]) != choice[1]:
choice = (choice[0], "{0} ({1})".format(choice[1], _(choice[1])))
else:
choice = (choice[0], _(choice[1]))
super(CheckboxChoiceInputTranslatedOriginal,
self).__init__(name, value, attrs, choice, index)
class CheckboxFieldRendererTranslated(CheckboxSelectMultiple):
choice_input_class = CheckboxChoiceInputTranslated
class CheckboxFieldRendererTranslatedOriginal(CheckboxSelectMultiple):
choice_input_class = CheckboxChoiceInputTranslatedOriginal
class BootstrapSelectMultiple(CheckboxSelectMultiple):
pass
# renderer = CheckboxBootstrapRenderer
class BootstrapSelectMultipleTranslatedOriginal(CheckboxSelectMultiple):
pass
# renderer = CheckboxBootstrapRendererTranslatedOriginal
class TranslatedSelectMultiple(BootstrapSelectMultiple):
"""
A SelectMultiple widget that translates the options
"""
pass
class TranslatedOriginalSelectMultiple(BootstrapSelectMultipleTranslatedOriginal):
"""
A SelectMultiple widget that translates the options, showing the original
string as well. This is currently only used in the muscle list, where the
translated muscles as well as the latin names are shown.
"""
pass
class TranslatedSelect(Select):
"""
A Select widget that translates the options
"""
def render_option(self, selected_choices, option_value, option_label):
return super(TranslatedSelect,
self).render_option(selected_choices, option_value, _(option_label))
|
1,213 |
update settings for use of model based
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import numpy as np
from simpa.core.device_digital_twins import DetectionGeometryBase
from simpa.utils import Settings, Tags
class PlanarArrayDetectionGeometry(DetectionGeometryBase):
"""
This class represents a digital twin of a ultrasound detection device
with a linear detection geometry. The origin for this device is the center of the planar array.
"""
def __init__(self, pitch_mm=0.5,
number_detector_elements_x=100,
number_detector_elements_y=100,
detector_element_width_mm=0.24,
detector_element_length_mm=0.5,
center_frequency_hz=3.96e6,
bandwidth_percent=55,
sampling_frequency_mhz=40,
device_position_mm: np.ndarray = None,
field_of_view_extent_mm: np.ndarray = None):
"""
:param pitch_mm:
:param number_detector_elements_x:
:param number_detector_elements_y:
:param detector_element_width_mm:
:param detector_element_length_mm:
:param center_frequency_hz:
:param bandwidth_percent:
:param sampling_frequency_mhz:
:param device_position_mm: Center of the planar array.
"""
if field_of_view_extent_mm is None:
field_of_view_extent_mm = np.asarray([-number_detector_elements_x * pitch_mm / 2,
number_detector_elements_x * pitch_mm / 2,
-number_detector_elements_y * pitch_mm / 2,
number_detector_elements_y * pitch_mm / 2,
0, 100])
super(PlanarArrayDetectionGeometry, self).__init__(
number_detector_elements=number_detector_elements_x * number_detector_elements_y,
detector_element_width_mm=detector_element_width_mm,
detector_element_length_mm=detector_element_length_mm,
center_frequency_hz=center_frequency_hz,
bandwidth_percent=bandwidth_percent,
sampling_frequency_mhz=sampling_frequency_mhz,
device_position_mm=device_position_mm,
field_of_view_extent_mm=field_of_view_extent_mm)
self.pitch_mm = pitch_mm
self.number_detector_elements_x = number_detector_elements_x
self.number_detector_elements_y = number_detector_elements_y
self.probe_depth_mm = number_detector_elements_y * pitch_mm
self.probe_width_mm = number_detector_elements_x * pitch_mm
def get_field_of_view_extent_mm(self) -> np.ndarray:
return np.asarray([-self.number_detector_elements_x*self.pitch_mm/2,
self.number_detector_elements_x*self.pitch_mm/2,
-self.number_detector_elements_y * self.pitch_mm / 2,
self.number_detector_elements_y * self.pitch_mm / 2,
0, 100])
def check_settings_prerequisites(self, global_settings: Settings) -> bool:
if global_settings[Tags.DIM_VOLUME_X_MM] < self.probe_width_mm + global_settings[Tags.SPACING_MM]:
self.logger.error(f"Volume x dimension is too small to encompass RSOM device in simulation!"
f"Must be at least {self.probe_width_mm + global_settings[Tags.SPACING_MM]} mm but "
f"was {global_settings[Tags.DIM_VOLUME_X_MM]} mm")
return False
if global_settings[Tags.DIM_VOLUME_Y_MM] < self.probe_depth_mm + global_settings[Tags.SPACING_MM]:
self.logger.error(f"Volume y dimension is too small to encompass RSOM device in simulation!"
f"Must be at least {self.probe_depth_mm + global_settings[Tags.SPACING_MM]} mm but "
f"was {global_settings[Tags.DIM_VOLUME_X_MM]} mm")
return False
return True
def METHOD_NAME(self, global_settings):
pass
def get_detector_element_positions_base_mm(self) -> np.ndarray:
detector_element_positions_mm = np.zeros((self.number_detector_elements, 3))
for x in range(self.number_detector_elements_x):
for y in range(self.number_detector_elements_y):
detector_element_positions_mm[x + y*self.number_detector_elements_x] = \
[(x - self.number_detector_elements_x/2 + 0.5) * self.pitch_mm,
(y - self.number_detector_elements_y/2 + 0.5) * self.pitch_mm,
0]
return detector_element_positions_mm
def get_detector_element_orientations(self) -> np.ndarray:
detector_element_orientations = np.zeros((self.number_detector_elements, 3))
detector_element_orientations[:, 2] = 1
return detector_element_orientations
def serialize(self) -> dict:
serialized_device = self.__dict__
return {"PlanarArrayDetectionGeometry": serialized_device}
@staticmethod
def deserialize(dictionary_to_deserialize):
deserialized_device = PlanarArrayDetectionGeometry()
for key, value in dictionary_to_deserialize.items():
deserialized_device.__dict__[key] = value
return deserialized_device
|
1,214 |
run compiler
|
import os
import sys
import shutil
import subprocess
# Clear all incremental stuff
def clear_stuff():
if os.path.exists('objc'):
shutil.rmtree('objc')
if os.path.exists('object'):
shutil.rmtree('object')
if os.path.exists('flowc.debug'):
os.remove('flowc.debug')
if os.path.exists('flowc.bytecode'):
os.remove('flowc.bytecode')
def METHOD_NAME(useMd5):
if useMd5:
return subprocess.check_output("flowc verbose=1 use-md5=1 test1.flow", shell=True)
else:
return subprocess.check_output("flowc verbose=1 test1.flow", shell=True)
# Check that incremental files are created and loaded
def test1(useMd5):
result1 = METHOD_NAME(useMd5)
if not 'Saving incremental for test1_1' in result1.split('\n'):
print('FAILED 1\n' + result1)
return False
if not 'Saving incremental for test1' in result1.split('\n'):
print('FAILED 2\n' + result1)
return False
result2 = METHOD_NAME(useMd5)
if not 'Loaded incremental for test1_1' in result2.split('\n'):
print('FAILED 3\n' + result2)
return False
if not 'Loaded incremental for test1' in result2.split('\n'):
print('FAILED 4\n' + result2)
return False
print('PASSED: Check that incremental files are created and loaded')
return True
# Change one inclded string
def test2(useMd5):
# Change a file
content_1 = open('test_content_1').read()
open('test_content_1', 'w').write(content_1)
result = METHOD_NAME(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change one inclded string')
return True
# Change the other inclded string
def test3(useMd5):
# Change a file
content_2 = open('test_content_2').read()
open('test_content_2', 'w').write(content_2)
result = METHOD_NAME(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change the other inclded string')
return True
# Change both inclded strings
def test4(useMd5):
# Change a file
content_1 = open('test_content_1').read()
content_2 = open('test_content_2').read()
open('test_content_1', 'w').write(content_1)
open('test_content_2', 'w').write(content_2)
result = METHOD_NAME(useMd5)
if not "Deleting outdated incremental for test1_1, file objc/test1_1.module" in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not "Deleting outdated incremental for test1, file objc/test1.module" in result.split('\n'):
print('FAILED 2\n' + result)
return False
if not 'Saving incremental for test1_1' in result.split('\n'):
print result
print('FAILED 3\n' + result)
return False
if not 'Saving incremental for test1' in result.split('\n'):
print result
print('FAILED 4\n' + result)
return False
print('PASSED: Change both inclded strings')
return True
# Incremental file is loaded, no changes
def test5(useMd5):
result = METHOD_NAME(useMd5)
if not 'Loaded incremental for test1_1' in result.split('\n'):
print('FAILED 1\n' + result)
return False
if not 'Loaded incremental for test1' in result.split('\n'):
print('FAILED 2\n' + result)
return False
print('PASSED: Incremental file is loaded, no changes')
return True
def runtests():
clear_stuff()
tests = [test1, test2, test3, test4, test5]
i = 1
print('Testing with no use-md5 option')
for test in tests:
sys.stdout.write('TEST ' + str(i) + ' ')
sys.stdout.flush()
if not test(False):
return
i += 1
clear_stuff()
print('Testing with use-md5=1 option')
for test in tests:
sys.stdout.write('TEST ' + str(i) + ' ')
sys.stdout.flush()
if not test(True):
return
i += 1;
clear_stuff()
def main():
runtests()
if __name__ == "__main__":
main()
|
1,215 |
write
|
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_choice_opt
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.METHOD_NAME(value.encode(enc))
else:
outfile.METHOD_NAME(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.METHOD_NAME(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
METHOD_NAME = outfile.METHOD_NAME
flush = outfile.close
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def METHOD_NAME(text):
outfile.METHOD_NAME(compressor.compress(text))
def flush():
outfile.METHOD_NAME(compressor.flush())
outfile.flush()
else:
METHOD_NAME = outfile.METHOD_NAME
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = b"%r\t%r\n" % (ttype, value)
if ttype is Token.Error:
METHOD_NAME(colorize(self.error_color, line))
else:
METHOD_NAME(line)
else:
for ttype, value in tokensource:
METHOD_NAME(b"%r\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = '''\
def testNeedsName(lexer):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = '''\
]
assert list(lexer.get_tokens(fragment)) == tokens
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (''.join(rawbuf),)
during = ''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.METHOD_NAME(before + during + after)
else:
outfile.METHOD_NAME(before.encode('utf-8'))
outfile.METHOD_NAME(during.encode('utf-8'))
outfile.METHOD_NAME(after.encode('utf-8'))
outfile.flush()
|
1,216 |
accept
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, AsyncGenerator, Generic, cast
from litestar._multipart import parse_content_header, parse_multipart_form
from litestar._parsers import parse_url_encoded_form_data
from litestar.connection.base import (
ASGIConnection,
AuthT,
StateT,
UserT,
empty_receive,
empty_send,
)
from litestar.datastructures.headers import Accept
from litestar.datastructures.multi_dicts import FormMultiDict
from litestar.enums import RequestEncodingType
from litestar.exceptions import InternalServerException
from litestar.serialization import decode_json, decode_msgpack
from litestar.types import Empty
__all__ = ("Request",)
if TYPE_CHECKING:
from litestar.handlers.http_handlers import HTTPRouteHandler # noqa: F401
from litestar.types.asgi_types import HTTPScope, Method, Receive, Scope, Send
SERVER_PUSH_HEADERS = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
class Request(Generic[UserT, AuthT, StateT], ASGIConnection["HTTPRouteHandler", UserT, AuthT, StateT]):
"""The Litestar Request class."""
__slots__ = ("_json", "_form", "_body", "_msgpack", "_content_type", "_accept", "is_connected")
scope: HTTPScope
"""The ASGI scope attached to the connection."""
receive: Receive
"""The ASGI receive function."""
send: Send
"""The ASGI send function."""
def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:
"""Initialize ``Request``.
Args:
scope: The ASGI connection scope.
receive: The ASGI receive function.
send: The ASGI send function.
"""
super().__init__(scope, receive, send)
self.is_connected: bool = True
self._body: Any = scope.get("_body", Empty)
self._form: Any = scope.get("_form", Empty)
self._json: Any = scope.get("_json", Empty)
self._msgpack: Any = scope.get("_msgpack", Empty)
self._content_type: Any = scope.get("_content_type", Empty)
self._accept: Any = scope.get("_accept", Empty)
@property
def method(self) -> Method:
"""Return the request method.
Returns:
The request :class:`Method <litestar.types.Method>`
"""
return self.scope["method"]
@property
def content_type(self) -> tuple[str, dict[str, str]]:
"""Parse the request's 'Content-Type' header, returning the header value and any options as a dictionary.
Returns:
A tuple with the parsed value and a dictionary containing any options send in it.
"""
if self._content_type is Empty:
self._content_type = self.scope["_content_type"] = parse_content_header(self.headers.get("Content-Type", "")) # type: ignore[typeddict-unknown-key]
return cast("tuple[str, dict[str, str]]", self._content_type)
@property
def METHOD_NAME(self) -> Accept:
"""Parse the request's 'Accept' header, returning an :class:`Accept <litestar.datastructures.headers.Accept>` instance.
Returns:
An :class:`Accept <litestar.datastructures.headers.Accept>` instance, representing the list of acceptable media types.
"""
if self._accept is Empty:
self._accept = self.scope["_accept"] = Accept(self.headers.get("Accept", "*/*")) # type: ignore[typeddict-unknown-key]
return cast("Accept", self._accept)
async def json(self) -> Any:
"""Retrieve the json request body from the request.
Returns:
An arbitrary value
"""
if self._json is Empty:
body = await self.body()
self._json = self.scope["_json"] = decode_json(body or b"null", type_decoders=self.route_handler.resolve_type_decoders()) # type: ignore[typeddict-unknown-key]
return self._json
async def msgpack(self) -> Any:
"""Retrieve the MessagePack request body from the request.
Returns:
An arbitrary value
"""
if self._msgpack is Empty:
body = await self.body()
self._msgpack = self.scope["_msgpack"] = decode_msgpack(body or b"\xc0", type_decoders=self.route_handler.resolve_type_decoders()) # type: ignore[typeddict-unknown-key]
return self._msgpack
async def stream(self) -> AsyncGenerator[bytes, None]:
"""Return an async generator that streams chunks of bytes.
Returns:
An async generator.
Raises:
RuntimeError: if the stream is already consumed
"""
if self._body is Empty:
if not self.is_connected:
raise InternalServerException("stream consumed")
while event := await self.receive():
if event["type"] == "http.request":
if event["body"]:
yield event["body"]
if not event.get("more_body", False):
break
if event["type"] == "http.disconnect":
raise InternalServerException("client disconnected prematurely")
self.is_connected = False
yield b""
else:
yield self._body
yield b""
return
async def body(self) -> bytes:
"""Return the body of the request.
Returns:
A byte-string representing the body of the request.
"""
if self._body is Empty:
self._body = self.scope["_body"] = b"".join([c async for c in self.stream()]) # type: ignore[typeddict-unknown-key]
return cast("bytes", self._body)
async def form(self) -> FormMultiDict:
"""Retrieve form data from the request. If the request is either a 'multipart/form-data' or an
'application/x-www-form- urlencoded', return a FormMultiDict instance populated with the values sent in the
request, otherwise, an empty instance.
Returns:
A FormMultiDict instance
"""
if self._form is not Empty:
return FormMultiDict(self._form)
content_type, options = self.content_type
if content_type == RequestEncodingType.MULTI_PART:
self._form = self.scope["_form"] = form_values = parse_multipart_form( # type: ignore[typeddict-unknown-key]
body=await self.body(),
boundary=options.get("boundary", "").encode(),
multipart_form_part_limit=self.app.multipart_form_part_limit,
)
return FormMultiDict(form_values)
if content_type == RequestEncodingType.URL_ENCODED:
self._form = self.scope["_form"] = form_values = parse_url_encoded_form_data( # type: ignore[typeddict-unknown-key]
await self.body(),
)
return FormMultiDict(form_values)
return FormMultiDict()
async def send_push_promise(self, path: str) -> None:
"""Send a push promise.
This method requires the `http.response.push` extension to be sent from the ASGI server.
Args:
path: Path to send the promise to.
Returns:
None
"""
extensions: dict[str, dict[Any, Any]] = self.scope.get("extensions") or {}
if "http.response.push" in extensions:
raw_headers: list[tuple[bytes, bytes]] = []
for name in SERVER_PUSH_HEADERS:
raw_headers.extend(
(name.encode("latin-1"), value.encode("latin-1")) for value in self.headers.getall(name, [])
)
await self.send({"type": "http.response.push", "path": path, "headers": raw_headers})
|
1,217 |
test qvm run pqer
|
import numpy as np
import pytest
from pyquil import Program
from pyquil.api import QVM
from pyquil.api._errors import QVMError
from pyquil.api._qvm import validate_noise_probabilities, validate_qubit_list, prepare_register_list
from pyquil.api import QCSClientConfiguration
from pyquil.gates import MEASURE, X
from pyquil.quilbase import Declare, MemoryReference
def test_qvm__default_client(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
p = Program(Declare("ro", "BIT"), X(0), MEASURE(0, MemoryReference("ro")))
result = qvm.run(p.wrap_in_numshots_loop(1000))
bitstrings = result.readout_data.get("ro")
assert bitstrings.shape == (1000, 1)
def METHOD_NAME(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration, gate_noise=(0.01, 0.01, 0.01))
p = Program(Declare("ro", "BIT"), X(0), MEASURE(0, MemoryReference("ro")))
result = qvm.run(p.wrap_in_numshots_loop(1000))
bitstrings = result.readout_data.get("ro")
assert bitstrings.shape == (1000, 1)
assert np.mean(bitstrings) > 0.8
def test_qvm_run_just_program(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration, gate_noise=(0.01, 0.01, 0.01))
p = Program(Declare("ro", "BIT"), X(0), MEASURE(0, MemoryReference("ro")))
result = qvm.run(p.wrap_in_numshots_loop(1000))
bitstrings = result.readout_data.get("ro")
assert bitstrings.shape == (1000, 1)
assert np.mean(bitstrings) > 0.8
def test_qvm_run_only_pqer(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration, gate_noise=(0.01, 0.01, 0.01))
p = Program(Declare("ro", "BIT"), X(0), MEASURE(0, MemoryReference("ro")))
result = qvm.run(p.wrap_in_numshots_loop(1000))
bitstrings = result.readout_data.get("ro")
assert bitstrings.shape == (1000, 1)
assert np.mean(bitstrings) > 0.8
def test_qvm_run_region_declared_and_measured(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
p = Program(Declare("reg", "BIT"), X(0), MEASURE(0, MemoryReference("reg")))
result = qvm.run(p.wrap_in_numshots_loop(100))
bitstrings = result.readout_data.get("reg")
assert bitstrings.shape == (100, 1)
def test_qvm_run_region_declared_not_measured(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
p = Program(Declare("reg", "BIT"), X(0))
result = qvm.run(p.wrap_in_numshots_loop(100))
bitstrings = result.readout_data.get("reg")
assert bitstrings.shape == (100, 0)
def test_qvm_run_region_not_declared_is_measured(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
p = Program(X(0), MEASURE(0, MemoryReference("ro")))
with pytest.raises(QVMError, match='Bad memory region name "ro" in MEASURE'):
qvm.run(p)
def test_qvm_run_region_not_declared_not_measured(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
p = Program(X(0))
result = qvm.run(p.wrap_in_numshots_loop(100))
assert result.readout_data.get("ro") is None
def test_qvm_version(client_configuration: QCSClientConfiguration):
qvm = QVM(client_configuration=client_configuration)
version = qvm.get_version_info()
def is_a_version_string(version_string: str):
parts = version_string.split(".")
try:
map(int, parts)
except ValueError:
return False
return True
assert is_a_version_string(version)
def test_validate_noise_probabilities():
with pytest.raises(TypeError, match="noise_parameter must be a tuple"):
validate_noise_probabilities(1)
with pytest.raises(TypeError, match="noise_parameter values should all be floats"):
validate_noise_probabilities(("a", "b", "c"))
with pytest.raises(ValueError, match="noise_parameter tuple must be of length 3"):
validate_noise_probabilities((0.0, 0.0, 0.0, 0.0))
with pytest.raises(
ValueError,
match="sum of entries in noise_parameter must be between 0 and 1 \\(inclusive\\)",
):
validate_noise_probabilities((0.5, 0.5, 0.5))
with pytest.raises(ValueError, match="noise_parameter values should all be non-negative"):
validate_noise_probabilities((-0.5, -0.5, 1.0))
def test_validate_qubit_list():
with pytest.raises(TypeError):
validate_qubit_list([-1, 1])
with pytest.raises(TypeError):
validate_qubit_list(["a", 0], 1)
def test_prepare_register_list():
with pytest.raises(TypeError):
prepare_register_list({"ro": [-1, 1]})
|
1,218 |
nested
|
# util/compat.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py36 = sys.version_info >= (3, 6)
py33 = sys.version_info >= (3, 3)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info < (3, 0)
py265 = sys.version_info >= (2, 6, 5)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
cpython = not pypy and not jython # TODO: something better for this ?
import collections
next = next
if py3k:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
# work around http://bugs.python.org/issue2646
if py265:
safe_kwarg = lambda arg: arg
else:
safe_kwarg = str
ArgSpec = collections.namedtuple("ArgSpec",
["args", "varargs", "keywords", "defaults"])
if py3k:
import builtins
from inspect import getfullargspec as inspect_getfullargspec
from urllib.parse import (quote_plus, unquote_plus,
parse_qsl, quote, unquote)
import configparser
from io import StringIO
from io import BytesIO as byte_buffer
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
string_types = str,
binary_types = bytes,
binary_type = bytes
text_type = str
int_types = int,
iterbytes = iter
def u(s):
return s
def ue(s):
return s
def b(s):
return s.encode("latin-1")
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
print_ = getattr(builtins, "print")
import_ = getattr(builtins, '__import__')
import itertools
itertools_filterfalse = itertools.filterfalse
itertools_filter = filter
itertools_imap = map
from itertools import zip_longest
import base64
def b64encode(x):
return base64.b64encode(x).decode('ascii')
def b64decode(x):
return base64.b64decode(x.encode('ascii'))
else:
from inspect import getargspec as inspect_getfullargspec
inspect_getargspec = inspect_getfullargspec
from urllib import quote_plus, unquote_plus, quote, unquote
from urlparse import parse_qsl
import ConfigParser as configparser
from StringIO import StringIO
from cStringIO import StringIO as byte_buffer
string_types = basestring,
binary_types = bytes,
binary_type = str
text_type = unicode
int_types = int, long
def iterbytes(buf):
return (ord(byte) for byte in buf)
def u(s):
# this differs from what six does, which doesn't support non-ASCII
# strings - we only use u() with
# literal source strings, and all our source files with non-ascii
# in them (all are tests) are utf-8 encoded.
return unicode(s, "utf-8")
def ue(s):
return unicode(s, "unicode_escape")
def b(s):
return s
def import_(*args):
if len(args) == 4:
args = args[0:3] + ([str(arg) for arg in args[3]],)
return __import__(*args)
callable = callable
cmp = cmp
reduce = reduce
import base64
b64encode = base64.b64encode
b64decode = base64.b64decode
def print_(*args, **kwargs):
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
for arg in enumerate(args):
if not isinstance(arg, basestring):
arg = str(arg)
fp.write(arg)
import itertools
itertools_filterfalse = itertools.ifilterfalse
itertools_filter = itertools.ifilter
itertools_imap = itertools.imap
from itertools import izip_longest as zip_longest
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
from collections import namedtuple
from operator import attrgetter as dottedgetter
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
assert cause is not value, "Same cause emitted"
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
# not as nice as that of Py3K, but at least preserves
# the code line where the issue occurred
exec("def reraise(tp, value, tb=None, cause=None):\n"
" if cause is not None:\n"
" assert cause is not value, 'Same cause emitted'\n"
" raise tp, value, tb\n")
def raise_from_cause(exception, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
cause = exc_value if exc_value is not exception else None
reraise(type(exception), exception, tb=exc_tb, cause=cause)
if py3k:
exec_ = getattr(builtins, 'exec')
else:
def exec_(func_text, globals_, lcl=None):
if lcl is None:
exec('exec func_text in globals_')
else:
exec('exec func_text in globals_, lcl')
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Drops the middle class upon creation.
Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
from contextlib import contextmanager
try:
from contextlib import METHOD_NAME
except ImportError:
# removed in py3k, credit to mitsuhiko for
# workaround
@contextmanager
def METHOD_NAME(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
reraise(exc[0], exc[1], exc[2])
|
1,219 |
load schema
|
import logging
import sys
from dataclasses import dataclass
from typing import Union
import click
from linkml_runtime.linkml_model import SchemaDefinition
from linkml_runtime.utils.schemaview import SchemaView
from rdflib import Graph
from SPARQLWrapper import JSON, SPARQLWrapper
from linkml._version import __version__
from linkml.generators.sparqlgen import SparqlGenerator
from linkml.reporting import CheckResult, Report
from linkml.utils.datautils import _get_format, dumpers_loaders, get_dumper
from linkml.utils.datavalidator import DataValidator
def sparqljson2dict(row: dict):
return {k: v["value"] for k, v in row.items()}
def _make_result(row):
return CheckResult(
type=row.get("check"),
source=row.get("graph"),
subject=row.get("subject"),
predicate=row.get("predicate"),
)
@dataclass
class SparqlDataValidator(DataValidator):
schema: SchemaDefinition = None
queries: dict = None
def validate_file(self, input: str, format: str = "turtle", **kwargs):
g = Graph()
g.parse(input, format=format)
return self.validate_graph(g, **kwargs)
def validate_graph(self, g: Graph, **kwargs):
if self.queries is None:
self.queries = SparqlGenerator(self.schema, **kwargs).queries
invalid = []
for qn, q in self.queries.items():
print(f"QUERY: {qn}")
q: str
# q = "\n".join([line for line in q.split('\n') if not line.lower().startswith('prefix')])
print(q)
qres = g.query(q)
try:
qres = g.query(q)
for row in qres:
invalid += row
except Exception:
logging.error(f"FAILED: {qn}")
return invalid
def validate_endpoint(self, url: str, **kwargs):
if self.queries is None:
self.queries = SparqlGenerator(self.schema, **kwargs).queries
invalid = []
report = Report()
for qn, q in self.queries.items():
q += " LIMIT 20"
logging.debug(f"QUERY: {qn}")
logging.debug(f"{q}")
sw = SPARQLWrapper(url)
sw.setQuery(q)
sw.setReturnFormat(JSON)
sw_q = sw.query()
results = sw_q.convert()
for row in results["results"]["bindings"]:
row = sparqljson2dict(row)
report.results.append(_make_result(row))
invalid += row
return report
def METHOD_NAME(self, schema: Union[str, SchemaDefinition]):
self.schemaview = SchemaView(schema)
self.schema = self.schemaview.schema
# self.schema = YAMLGenerator(schema).schema
return self.schema
@click.command()
@click.option("--named-graph", "-G", multiple=True, help="Constrain query to a named graph")
@click.option("--input", "-i", help="Input file to validate")
@click.option("--endpoint-url", "-U", help="URL of sparql endpoint")
@click.option("--limit", "-L", help="Max results per query")
@click.option("--output", "-o", help="Path to report file")
@click.option(
"--input-format",
"-f",
type=click.Choice(list(dumpers_loaders.keys())),
help="Input format. Inferred from input suffix if not specified",
)
@click.option(
"--output-format",
"-t",
type=click.Choice(list(dumpers_loaders.keys())),
help="Output format. Inferred from output suffix if not specified",
)
@click.option("--schema", "-s", help="Path to schema specified as LinkML yaml")
@click.version_option(__version__, "-V", "--version")
def cli(
input,
output=None,
input_format=None,
output_format=None,
endpoint_url=None,
limit=None,
named_graph=None,
schema=None,
) -> None:
"""
Validates sparql
Example:
linkml-sparql-validate -U http://sparql.hegroup.org/sparql -s tests/test_validation/input/omo.yaml
"""
validator = SparqlDataValidator(schema)
if endpoint_url is not None:
results = validator.validate_endpoint(endpoint_url, limit=limit, named_graphs=named_graph)
else:
if input is None:
raise Exception("Must pass one of --endpoint-url OR --input")
input_format = _get_format(input, input_format)
results = validator.validate_file(input, format=input_format)
output_format = _get_format(output, output_format, default="json")
dumper = get_dumper(output_format)
if output is not None:
dumper.dump(results, output)
else:
print(dumper.dumps(results))
if __name__ == "__main__":
cli(sys.argv[1:])
|
1,220 |
voucher and user client
|
# pylint: disable=redefined-outer-name
"""
Fixtures for voucher tests
"""
from datetime import datetime
from types import SimpleNamespace
import pytest
import pytz
import factory
from django.http import HttpRequest
from faker import Faker
from courses.factories import CourseRunFactory
from ecommerce.factories import (
CouponEligibilityFactory,
ProductFactory,
CouponFactory,
CouponRedemptionFactory,
CouponVersionFactory,
CouponPaymentVersionFactory,
CompanyFactory,
)
from voucher.factories import VoucherFactory
from voucher.forms import VOUCHER_PARSE_ERROR
from voucher.views import UploadVoucherFormView
fake = Faker()
@pytest.fixture
def upload_voucher_form():
"""
Mock form to pass in fake cleaned data
"""
return SimpleNamespace(
cleaned_data={
"voucher": {
"employee_id": fake.password(special_chars=False),
"voucher_id": fake.password(special_chars=False),
"course_start_date_input": fake.date_object(),
"course_id_input": fake.password(),
"course_title_input": factory.fuzzy.FuzzyText(prefix="Course ").fuzz(),
"employee_name": fake.name(),
"pdf": fake.file_name(),
}
}
)
@pytest.fixture
def upload_voucher_form_with_file_field():
"""
Mock form to pass in a fake file param
"""
return SimpleNamespace(cleaned_data={"voucher": fake.file_name()})
@pytest.fixture
def upload_voucher_form_with_parse_error():
"""
Mock form to pass in fake cleaned data
"""
return SimpleNamespace(errors={"voucher": [VOUCHER_PARSE_ERROR]})
@pytest.fixture
def upload_voucher_form_view(user):
"""
Returns a mock instance of an UploadVoucherFormView with an attached User
"""
request = HttpRequest()
request.user = user
return UploadVoucherFormView(request=request)
@pytest.fixture
def voucher_and_user(user):
"""
Returns a voucher and matching user object
"""
voucher = VoucherFactory(user=user)
return SimpleNamespace(voucher=voucher, user=user)
@pytest.fixture
def authenticated_client(client, user):
"""
Returns an authenticated client
"""
client.force_login(user)
return client
@pytest.fixture
def METHOD_NAME(voucher_and_user, client):
"""
Returns a voucher, user, and authenticated client
"""
user = voucher_and_user.user
client.force_login(user)
return SimpleNamespace(**vars(voucher_and_user), client=client)
@pytest.fixture
def redeemed_voucher_and_user_client(voucher_and_user, client):
"""
Returns a voucher, user, and authenticated client
"""
user = voucher_and_user.user
voucher = voucher_and_user.voucher
client.force_login(user)
voucher.coupon = CouponFactory()
voucher.save()
CouponRedemptionFactory(coupon_version__coupon=voucher.coupon)
return SimpleNamespace(**vars(voucher_and_user), client=client)
@pytest.fixture
def voucher_and_partial_matches(METHOD_NAME):
"""
Returns a voucher with partial matching CourseRuns
"""
voucher = METHOD_NAME.voucher
company = CompanyFactory()
course_run_1 = CourseRunFactory(
start_date=datetime.combine(
voucher.course_start_date_input, datetime.min.time(), tzinfo=pytz.UTC
),
live=True,
)
course_run_2 = CourseRunFactory(
course__readable_id=voucher.course_id_input, live=True
)
course_run_3 = CourseRunFactory(course__title=voucher.course_title_input, live=True)
course_run_4 = CourseRunFactory(
course__readable_id=f"{voucher.course_id_input}-noise", live=True
)
course_run_5 = CourseRunFactory(
course__title=f"{voucher.course_title_input}-noise", live=True
)
return SimpleNamespace(
**vars(METHOD_NAME),
company=company,
partial_matches=[
course_run_1,
course_run_2,
course_run_3,
course_run_4,
course_run_5,
],
)
@pytest.fixture
def voucher_and_exact_match(METHOD_NAME):
"""
Returns a voucher with and an exact matching and partial matching CourseRuns
"""
voucher = METHOD_NAME.voucher
exact_match = CourseRunFactory(
start_date=datetime.combine(
voucher.course_start_date_input, datetime.min.time(), tzinfo=pytz.UTC
),
course__readable_id=voucher.course_id_input,
course__title=voucher.course_title_input,
live=True,
)
return SimpleNamespace(
**vars(METHOD_NAME),
company=CompanyFactory(),
exact_match=exact_match,
)
@pytest.fixture
def voucher_and_partial_matches_with_coupons(voucher_and_partial_matches):
"""
Returns a voucher with partial matching CourseRuns and valid coupons
"""
context = voucher_and_partial_matches
products = [
ProductFactory(content_object=course_run)
for course_run in context.partial_matches
]
coupon_eligibility_list = [
CouponEligibilityFactory(product=product) for product in products
]
payment_versions = [
CouponPaymentVersionFactory(amount=1, company=context.company)
for _ in coupon_eligibility_list
]
coupon_versions = [
CouponVersionFactory(
coupon=coupon_eligibility_list[i].coupon,
payment_version=payment_versions[i],
)
for i in range(len(coupon_eligibility_list))
]
return SimpleNamespace(
**vars(voucher_and_partial_matches),
products=products,
coupon_eligibility_list=coupon_eligibility_list,
coupon_versions=coupon_versions,
payment_versions=payment_versions,
)
@pytest.fixture
def voucher_and_exact_match_with_coupon(voucher_and_exact_match):
"""
Returns a voucher with exact matching and partial matching CourseRuns and valid coupons
"""
context = voucher_and_exact_match
company = context.company
exact_match = context.exact_match
product = ProductFactory(content_object=exact_match)
coupon_eligibility = CouponEligibilityFactory(product=product)
payment_version = CouponPaymentVersionFactory(amount=1, company=company)
coupon_version = CouponVersionFactory(
coupon=coupon_eligibility.coupon, payment_version=payment_version
)
return SimpleNamespace(
**vars(voucher_and_exact_match),
product=product,
coupon_eligibility=coupon_eligibility,
coupon_version=coupon_version,
payment_version=payment_version,
)
|
1,221 |
launch torpedo
|
#!/usr/bin/env python3
import numpy as np
import rospy
from gazebo_msgs.msg import ContactsState, ModelState
from gazebo_msgs.srv import (
ApplyJointEffort,
ApplyJointEffortRequest,
GetModelState,
JointRequest,
JointRequestRequest,
SetModelState,
)
from geometry_msgs.msg import Twist
from mil_ros_tools import geometry_helpers, msg_helpers
from std_msgs.msg import String
from subjugator_msgs.srv import SetValve
class ActuatorBoard:
def __init__(self):
self.torpedo_launcher = TorpedoLauncher()
self.gripper_controller = GripperController()
self.marker_dropper = MarkerDropper()
self.actuator_lookup = {
"torpedo1": self.torpedo_launcher.METHOD_NAME,
"torpedo2": self.torpedo_launcher.METHOD_NAME,
"gripper": self.gripper_controller.set_gripper,
"dropper": self.marker_dropper.drop,
}
rospy.Service("/actuator_driver/actuate", SetValve, self.actuate)
rospy.Service("/actuator_driver/actuate_raw", SetValve, self.actuate)
def actuate(self, req):
rospy.loginfo(
"Setting simulated actuator {} to {}".format(
req.actuator, "opened" if req.opened else "closed"
)
)
return self._actuate(req)
def acuate_raw(self, req):
rospy.loginfo(
"Setting simulated actuator {} to {} (raw)".format(
req.actuator, "opened" if req.opened else "closed"
)
)
return self._actuate(req)
def _actuate(self, req):
for port in self.actuator_lookup:
if port == req.actuator:
return self.actuator_lookup[port](req)
return False
class MarkerDropper:
"""
Dummy class to provide compatibility for simulated actuator board.
Does nothing, but could be used to simulate marker dropping in the future.
"""
def __init__(self):
pass
def drop(self, req):
if req.opened:
rospy.loginfo("Dropping marker")
return True
class TorpedoLauncher:
def __init__(self):
self.launched = False
self.set_torpedo = rospy.ServiceProxy("/gazebo/set_model_state", SetModelState)
self.get_model = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
rospy.Subscriber(
"/contact_bumper", ContactsState, self.check_contact, queue_size=1
)
self.contact_pub = rospy.Publisher(
"/gazebo/torpedo_contact", String, queue_size=1
)
def check_contact(self, msg):
if not self.launched:
return
if len(msg.states) == 0:
# If there is no impact don't worry about it.
return
torpedo_name = "torpedo::body::bodycol"
real_state = None
for state in msg.states:
if state.collision1_name == torpedo_name:
real_state = state
other_collison_name = state.collision2_name
break
if state.collision2_name == torpedo_name:
real_state = state
other_collison_name = state.collision1_name
break
if real_state is None:
return
# Generally, if the torpedo is laying on the ground the collision force will be very small.
# So if the force is really small, we assume the impact collision has occurred before the torpedo was launched.
print(
np.abs(
np.linalg.norm(
msg_helpers.rosmsg_to_numpy(real_state.total_wrench.force)
)
)
)
if (
np.abs(
np.linalg.norm(
msg_helpers.rosmsg_to_numpy(real_state.total_wrench.force)
)
)
< 10
):
rospy.loginfo("Torpedo probably still on the ground, still waiting.")
return
# Now the torpedo has impacted something, publish what it hit.
rospy.loginfo("Impact detected!")
self.launched = False
rospy.loginfo(other_collison_name)
self.contact_pub.publish(other_collison_name)
def METHOD_NAME(self, srv):
"""
Find position of sub and launch the torpedo from there.
TODO:
- Test to make sure it always fires from the right spot in the right direction.
(It seems to but I haven't tested from all rotations.)
"""
rospy.loginfo("Launching torpedo")
sub_state = self.get_model(model_name="sub8")
sub_pose = msg_helpers.pose_to_numpy(sub_state.pose)
# Translate torpedo init velocity so that it first out of the front of the sub.
muzzle_vel = np.array([10, 0, 0])
v = geometry_helpers.rotate_vect_by_quat(np.append(muzzle_vel, 0), sub_pose[1])
launch_twist = Twist()
launch_twist.linear.x = v[0]
launch_twist.linear.y = v[1]
launch_twist.linear.z = v[2]
# This is offset so it fires approx at the torpedo launcher location.
launch_pos = geometry_helpers.rotate_vect_by_quat(
np.array([0.4, -0.15, -0.3, 0]), sub_pose[1]
)
model_state = ModelState()
model_state.model_name = "torpedo"
model_state.pose = msg_helpers.numpy_quat_pair_to_pose(
sub_pose[0] + launch_pos, sub_pose[1]
)
model_state.twist = launch_twist
self.set_torpedo(model_state)
self.launched = True
return True
class GripperController:
def __init__(self):
self.apply_force = rospy.ServiceProxy(
"/gazebo/apply_joint_effort", ApplyJointEffort
)
self.clear_force = rospy.ServiceProxy(
"/gazebo/clear_joint_forces", JointRequest
)
self.force_to_apply = 5 # Gazebo says these are Newton Meters
self.joint_name = "grip"
def set_gripper(self, srv):
"""
First clear the existing forces on the gripper then apply a new one in the direction
specified by the service call.
"""
if srv.opened:
rospy.loginfo("Opening Gripper")
else:
rospy.loginfo("Closing Gripper")
self.clear_force(JointRequestRequest(joint_name=self.joint_name))
effort_mod = 1
if not srv.opened:
effort_mod = -1
joint_effort = ApplyJointEffortRequest()
joint_effort.joint_name = self.joint_name
joint_effort.effort = self.force_to_apply * effort_mod
joint_effort.duration = rospy.Duration(-1)
self.apply_force(joint_effort)
return True
if __name__ == "__main__":
rospy.init_node("actuator_board_simulator")
a = ActuatorBoard()
rospy.spin()
|
1,222 |
example test 2
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure_devtools.scenario_tests.preparers import AbstractPreparer
traces = []
class _TestPreparer(AbstractPreparer):
def __init__(self, name, use_cache=False):
super(_TestPreparer, self).__init__("test", 20)
self._name = name
self.set_cache(use_cache, name)
def create_resource(self, name, **kwargs):
traces.append("create " + self._name)
return {}
def remove_resource(self, name, **kwargs):
traces.append("remove " + self._name)
class _TestClassSample(unittest.TestCase):
@_TestPreparer("A")
@_TestPreparer("B")
def example_test(self):
pass
class _CachedTestClassSample(unittest.TestCase):
@_TestPreparer("A", True)
@_TestPreparer("B", True)
def example_test(self):
pass
@_TestPreparer("A", True)
@_TestPreparer("C", True)
def METHOD_NAME(self):
pass
@_TestPreparer("A", True)
@_TestPreparer("C", False)
def example_test_3(self):
pass
@_TestPreparer("A", True)
@_TestPreparer("C", False)
def fail_test(self):
raise Exception("Intentional failure to test cache.")
@_TestPreparer("PARENT", True)
@_TestPreparer("A", True)
@_TestPreparer("C", True)
def parent_cache_test(self):
pass
def test_preparer_order():
# Mimic a real test runner, for better compat 2.7 / 3.x
suite = unittest.TestSuite()
suite.addTest(_TestClassSample("example_test"))
unittest.TextTestRunner().run(suite)
assert len(traces) == 4
assert traces[0] == "create A"
assert traces[1] == "create B"
assert traces[2] == "remove B"
assert traces[3] == "remove A"
def test_cached_preparer_order():
# Mimic a real test runner, for better compat 2.7 / 3.x
suite = unittest.TestSuite()
suite.addTest(_CachedTestClassSample("example_test"))
suite.addTest(_CachedTestClassSample("example_test_2"))
suite.addTest(_CachedTestClassSample("example_test_3"))
unittest.TextTestRunner().run(suite)
assert len(traces) == 5
assert traces[0] == "create A"
assert traces[1] == "create B"
assert traces[2] == "create C"
assert traces[3] == "create C"
assert traces[4] == "remove C" # One of the C's is cached, one is not.
# Note: unit test runner doesn't trigger the pytest session fixture that deletes resources when all tests are done.
# let's run that manually now to test it.
AbstractPreparer._perform_pending_deletes()
assert len(traces) == 8
# we're technically relying on an implementation detail (for earlier versions of python
# dicts did not guarantee ordering by insertion order, later versions do)
# to order removal by relying on dict ordering.
assert traces[5] == "remove C"
assert traces[6] == "remove B"
assert traces[7] == "remove A"
def test_cached_preparer_failure():
# Mimic a real test runner, for better compat 2.7 / 3.x
suite = unittest.TestSuite()
suite.addTest(_CachedTestClassSample("fail_test"))
suite.addTest(_CachedTestClassSample("example_test"))
suite.addTest(_CachedTestClassSample("example_test_2"))
suite.addTest(_CachedTestClassSample("example_test_3"))
unittest.TextTestRunner().run(suite)
AbstractPreparer._perform_pending_deletes()
# the key here is that the cached A and noncached C is used even though the test failed, and successfully removed later.
assert traces == [
"create A",
"create C",
"remove C",
"create B",
"create C",
"create C",
"remove C",
"remove C",
"remove B",
"remove A",
]
def test_cached_preparer_parent_cache_keying():
# Mimic a real test runner, for better compat 2.7 / 3.x
suite = unittest.TestSuite()
suite.addTest(_CachedTestClassSample("example_test_2"))
suite.addTest(_CachedTestClassSample("example_test_3"))
suite.addTest(_CachedTestClassSample("parent_cache_test"))
unittest.TextTestRunner().run(suite)
AbstractPreparer._perform_pending_deletes()
# The key here is to observe that changing a parent preparer means the child preparers can't utilize a cache from a cache-stack not including that parent.
assert traces == [
"create A",
"create C",
"create C",
"remove C",
"create PARENT",
"create A",
"create C",
"remove C",
"remove A",
"remove PARENT",
"remove C",
"remove A",
]
|
1,223 |
test shows enhydris version
|
import datetime as dt
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from bs4 import BeautifulSoup
from model_mommy import mommy
from enhydris.models import Station
from enhydris.telemetry.models import Telemetry, TelemetryLogMessage
class TelemetryLogViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user("alice", password="topsecret")
cls.station = mommy.make(Station, name="great station", creator=cls.user)
cls.station2 = mommy.make(Station, name="another station", creator=cls.user)
cls.telemetry = mommy.make(Telemetry, station=cls.station)
cls.telemetry_log = mommy.make(
TelemetryLogMessage,
telemetry=cls.telemetry,
exception_name="HugeError",
message="huge error",
traceback="Detailed traceback:\nThe problem occurred somewhere",
enhydris_version="14.15.16",
enhydris_commit_id="1234567890abcdef",
)
utc = dt.timezone.utc
cls.telemetry_log.timestamp = dt.datetime(2022, 8, 5, 18, 31, 44, tzinfo=utc)
cls.telemetry_log.save()
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class TelemetryLogListViewTestCase(TelemetryLogViewTestCase):
def setUp(self):
self.client.login(username="alice", password="topsecret")
self.url = f"/stations/{self.station.id}/telemetry/logs/"
self.response = self.client.get(self.url)
def test_shows_message(self):
self.assertContains(self.response, "HugeError: huge error")
def test_shows_station_name(self):
self.assertContains(self.response, "great station")
def test_links_to_detail_page(self):
log_id = self.telemetry_log.id
soup = BeautifulSoup(self.response.content, "html.parser")
target = soup.find("div", class_="list-group-item").find("a").get("href")
self.assertEqual(target, f"{self.url}{log_id}/")
def test_no_message_about_no_logs(self):
self.assertNotContains(self.response, "No telemetry errors have been logged")
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class TelemetryLogListViewNoLogsTestCase(TelemetryLogViewTestCase):
def setUp(self):
self.client.login(username="alice", password="topsecret")
self.url = f"/stations/{self.station2.id}/telemetry/logs/"
self.response = self.client.get(self.url)
def test_has_no_logs(self):
self.assertNotContains(self.response, "HugeError")
def test_message_no_logs(self):
self.assertContains(
self.response, "No telemetry errors have been logged for this station."
)
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class TelemetryLogDetailViewTestCase(TelemetryLogViewTestCase):
def setUp(self):
self.client.login(username="alice", password="topsecret")
log_id = self.telemetry_log.id
url = f"/stations/{self.telemetry.station_id}/telemetry/logs/{log_id}/"
self.response = self.client.get(url)
def test_shows_station_name(self):
self.assertContains(self.response, "great station")
def test_shows_message(self):
self.assertContains(self.response, "huge error")
def test_shows_exception_name(self):
self.assertContains(self.response, "HugeError")
def test_shows_error_date(self):
self.assertContains(self.response, "2022-08-05 18:31:44")
def test_shows_traceback(self):
self.assertContains(
self.response, "Detailed traceback:\nThe problem occurred somewhere"
)
def METHOD_NAME(self):
self.assertContains(self.response, "14.15.16 (1234567890)")
def test_back_button(self):
soup = BeautifulSoup(self.response.content, "html.parser")
target = soup.find(id="back-button").get("href")
station_id = self.telemetry.station_id
self.assertEqual(target, f"/stations/{station_id}/telemetry/logs/")
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class PermissionsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user("alice", password="topsecret")
cls.station = mommy.make(Station, creator=cls.user)
cls.telemetry_log_message = mommy.make(
TelemetryLogMessage,
telemetry__station=cls.station,
)
def test_telemetry_logs_are_visible_when_correct_user_logged_on(self):
self.client.login(username="alice", password="topsecret")
response = self.client.get(f"/stations/{self.station.id}/telemetry/logs/")
self.assertEqual(response.status_code, 200)
def test_telemetry_logs_denied_when_not_logged_on(self):
url = f"/stations/{self.station.id}/telemetry/logs/"
response = self.client.get(url)
self.assertRedirects(response, f"/accounts/login/?next={url}")
def test_telemetry_log_detail_visible_when_correct_user_logged_on(self):
self.client.login(username="alice", password="topsecret")
station_id = self.station.id
tlm_id = self.telemetry_log_message.id
url = f"/stations/{station_id}/telemetry/logs/{tlm_id}/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_telemetry_log_detail_denied_when_not_logged_on(self):
station_id = self.station.id
tlm_id = self.telemetry_log_message.id
url = f"/stations/{station_id}/telemetry/logs/{tlm_id}/"
response = self.client.get(url)
self.assertRedirects(response, f"/accounts/login/?next={url}")
|
1,224 |
get generator supported types
|
#!/usr/bin/env python3
#
# This is a tool to generate the bitonic sorter code that is used for small arrays.
#
# usage: bitonic_gen.py [-h] [--vector-isa VECTOR_ISA [VECTOR_ISA ...]]
# [--break-inline BREAK_INLINE] [--output-dir OUTPUT_DIR]
#
# the files in src/coreclr/gc/vxsort/smallsort that are currently checked in can be generated with:
# python bitonic_gen.py --output-dir c:\temp --vector-isa AVX2 AVX512 --break-inline 4
#
import argparse
import os
from enum import Enum
from bitonic_avx2 import AVX2BitonicISA
from bitonic_avx512 import AVX512BitonicISA
from bitonic_isa import BitonicISA
from configuration import Configuration
BitonicISA.register(AVX2BitonicISA)
#BitonicISA.register(AVX512BitonicISA)
def METHOD_NAME(vector_isa):
if isinstance(vector_isa, str):
vector_isa = VectorISA[vector_isa]
if vector_isa == VectorISA.AVX2:
return AVX2BitonicISA.supported_types()
elif vector_isa == VectorISA.AVX512:
return AVX512BitonicISA.supported_types()
else:
raise Exception(f"Non-supported vector machine-type: {vector_isa}")
def get_generator(vector_isa, type, configuration):
if isinstance(vector_isa, str):
vector_isa = VectorISA[vector_isa]
if vector_isa == VectorISA.AVX2:
return AVX2BitonicISA(type, configuration)
elif vector_isa == VectorISA.AVX512:
return AVX512BitonicISA(type)
else:
raise Exception(f"Non-supported vector machine-type: {vector_isa}")
def generate_per_type(f_header, type, vector_isa, break_inline, configuration):
g = get_generator(vector_isa, type, configuration)
g.generate_prologue(f_header)
if g.unroll_bitonic_sorters < 1:
g.generate_1v_sorters(f_header, ascending=True)
g.generate_1v_sorters(f_header, ascending=False)
for width in range(2, g.max_bitonic_sort_vectors + 1):
# Allow breaking the inline chain once in a while (configurable)
if break_inline == 0 or width % break_inline != 0:
inline = True
else:
inline = False
if width >= configuration.unroll_bitonic_sorters:
g.generate_compounded_sorter(f_header, width, ascending=True, inline=inline)
g.generate_compounded_sorter(f_header, width, ascending=False, inline=inline)
if width <= g.largest_merge_variant_needed():
g.generate_compounded_merger(f_header, width, ascending=True, inline=inline)
g.generate_compounded_merger(f_header, width, ascending=False, inline=inline)
g.generate_entry_points(f_header)
g.generate_master_entry_point(f_header)
g.generate_epilogue(f_header)
def generate_main_type(f_header, type, vector_isa, break_inline, configuration):
g = get_generator(vector_isa, type, configuration)
g.generate_prologue(f_header)
g.generate_main(f_header)
g.generate_epilogue(f_header)
class VectorISA(Enum):
AVX2 = 'AVX2'
# AVX512 = 'AVX512'
# SVE = 'SVE'
def __str__(self):
return self.value
def generate_bitonicsort_all_types():
parser = argparse.ArgumentParser()
#parser.add_argument("--language", type=Language, choices=list(Language),
# help="select output language: csharp/cpp/rust")
parser.add_argument("--vector-isa",
nargs='+',
default='all',
help='list of vector ISA to generate',
choices=list(VectorISA).append("all"))
parser.add_argument("--break-inline", type=int, default=0, help="break inlining every N levels")
parser.add_argument("--output-dir", type=str, default='..',
help="output directory")
opts = parser.parse_args()
if 'all' in opts.vector_isa:
opts.vector_isa = list(VectorISA)
config = Configuration()
for isa in opts.vector_isa:
for t in METHOD_NAME(isa):
filename = f"BitonicSort.{isa}.{t}.generated"
print(f"Generating {filename}.{{cs}}")
h_filename = os.path.join(opts.output_dir, filename + ".cs")
with open(h_filename, "w") as f_header:
generate_per_type(f_header, t, isa, opts.break_inline, config)
filename = f"BitonicSort.{isa}.generated"
print(f"Generating {filename}.{{cs}}")
h_filename = os.path.join(opts.output_dir, filename + ".cs")
with open(h_filename, "w") as f_header:
generate_main_type(f_header, t, isa, opts.break_inline, config)
if __name__ == '__main__':
generate_bitonicsort_all_types()
|
1,225 |
collect list dbs
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
class sapnw(Plugin, RedHatPlugin):
short_desc = 'SAP NetWeaver'
plugin_name = 'sapnw'
profiles = ('sap',)
files = ('/usr/sap',)
def collect_list_instances(self):
# list installed instances
inst_out = self.collect_cmd_output(
"/usr/sap/hostctrl/exe/saphostctrl -function ListInstances",
suggest_filename="SAPInstances"
)
if inst_out['status'] != 0:
return
# set the common strings that will be formatted later in each a_c_s
prof_cmd = "env -i %s %s/sappfpar all pf=/usr/sap/%s/SYS/profile/%s"
inst_cmd = "env -i %s %s/sapcontrol -nr %s -function GetProcessList"
vers_cmd = "env -i %s %s/sapcontrol -nr %s -function GetVersionInfo"
user_cmd = 'su - %sadm -c "sapcontrol -nr %s -function GetEnvironment"'
sidsunique = set()
# Cycle through all the instances, get 'sid', 'instance_number'
# and 'vhost' to determine the proper profile
for inst_line in inst_out['output'].splitlines():
if ("DAA" not in inst_line and not
inst_line.startswith("No instances found")):
fields = inst_line.strip().split()
if len(fields) < 8:
continue
sid = fields[3]
inst = fields[5]
vhost = fields[7]
sidsunique.add(sid)
path = "/usr/sap/%s/SYS/profile/" % sid
if not self.path_exists(path):
continue
for line in self.listdir(path):
if all(f in line for f in [sid, inst, vhost]):
ldenv = 'LD_LIBRARY_PATH=/usr/sap/%s/SYS/exe/run' % sid
# TODO: I am assuming unicode here
# nuc should be accounted
pt = '/usr/sap/%s/SYS/exe/uc/linuxx86_64' % sid
profile = line.strip()
# collect profiles
self.add_cmd_output(
prof_cmd % (ldenv, pt, sid, profile),
suggest_filename="%s_parameters" % profile
)
# collect instance status
self.add_cmd_output(
inst_cmd % (ldenv, pt, inst),
suggest_filename="%s_%s_GetProcList" % (sid, inst)
)
# collect version info for the various components
self.add_cmd_output(
vers_cmd % (ldenv, pt, inst),
suggest_filename="%s_%s_GetVersInfo" % (sid, inst)
)
# collect <SID>adm user environment
lowsid = sid.lower()
fname = "%s_%sadm_%s_userenv" % (sid, lowsid, inst)
self.add_cmd_output(
user_cmd % (lowsid, inst),
suggest_filename=fname
)
# traverse the sids list, collecting info about dbclient
for sid in sidsunique:
self.add_copy_spec("/usr/sap/%s/*DVEB*/work/dev_w0" % sid)
def METHOD_NAME(self):
# list installed sap dbs
db_out = self.collect_cmd_output(
"/usr/sap/hostctrl/exe/saphostctrl -function ListDatabases",
suggest_filename="SAPDatabases"
)
if db_out['status'] != 0:
return
for line in db_out['output'].splitlines():
if "Instance name" in line:
fields = line.strip().split()
dbadm = fields[2][:-1]
dbtype = fields[8][:-1]
sid = dbadm[3:].upper()
if dbtype == 'db6':
# IBM DB2
self.add_cmd_output(
"su - %s -c \"db2 get dbm cfg\"" % dbadm,
suggest_filename="%s_%s_db2_info" % (sid, dbadm)
)
elif dbtype == 'sap':
# SAP MAXDB
sid = fields[2][:-1]
self.add_copy_spec(
"/sapdb/%s/data/config/%s.pah" % (sid, sid)
)
elif dbtype == 'ora':
# Oracle
sid = fields[2][:-1]
self.add_copy_spec("/oracle/%s/*/dbs/init.ora" % sid)
elif dbtype == 'syb':
# Sybase
sid = fields[2][:-1]
self.add_copy_spec("/sybase/%s/ASE*/%s.cfg" % (sid, sid))
def setup(self):
self.collect_list_instances()
self.METHOD_NAME()
# run sapconf in check mode
#
# since the command creates a limits.d file on its own,
# we must predicate it by presence of the file
if self.path_exists('/etc/security/limits.d/99-sap-limits.conf') \
or self.get_option('allow_system_changes'):
self.add_cmd_output("sapconf -n",
suggest_filename="sapconf_checkmode")
# vim: et ts=4 sw=4
|
1,226 |
account read
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2017 University of Tuebingen, CERN, CSC, KTH.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test permissions of user account REST API."""
import json
from invenio_db import db
from flask import url_for
from invenio_accounts.models import User
from invenio_oauth2server.models import Token
from invenio_oauth2server import current_oauth2server
from b2share_unit_tests.helpers import create_user
def test_accounts_search_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
def account_search(user, expected_code):
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
with app.app_context():
url = url_for('invenio_accounts_rest.users_list')
if user:
scopes = current_oauth2server.scope_choices()
allowed_token = Token.create_personal(
'allowed_token', user.id,
scopes=[s[0] for s in scopes]
)
# application authentication token header
headers.append(('Authorization',
'Bearer {}'.format(allowed_token.access_token)))
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't list accounts
account_search(None, 401)
# authenticated users can't list other users' account
account_search(test_users['normal'], 403)
# community members cannot list all users' accounts
account_search(test_community.member, 403)
# community admins can list all users
account_search(test_community.admin, 200)
# admin is allowed to list all accounts
account_search(test_users['admin'], 200)
def test_account_read_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
with app.app_context():
read_user = create_user('read_user')
url = url_for('invenio_accounts_rest.user',
user_id=read_user.id)
db.session.commit()
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
def METHOD_NAME(user, expected_code):
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't read accounts
METHOD_NAME(None, 401)
# authenticated users can't read other users' account
METHOD_NAME(test_users['normal'], 403)
# community members cannot read other users' account
METHOD_NAME(test_community.member, 403)
# users can read their own account
METHOD_NAME(read_user, 200)
# community admins can list all users
METHOD_NAME(test_community.admin, 200)
# admin is allowed to read all accounts
METHOD_NAME(test_users['admin'], 200)
def test_account_activation_permission(app, test_users, test_community,
login_user):
"""Test deactivating a user account."""
counter = [0]
def account_update(user, expected_code, modified_user=None):
def account_update_sub(patch_content, content_type):
with app.app_context():
if modified_user is None:
test_user = create_user(
'test_user{}'.format(counter[0]))
else:
test_user = modified_user
counter[0] += 1
url = url_for(
'invenio_accounts_rest.user',
user_id=test_user.id,
)
db.session.commit()
headers = [('Content-Type', content_type),
('Accept', 'application/json')]
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.patch(url, headers=headers,
data=json.dumps(patch_content))
assert res.status_code == expected_code
# test with a simple JSON
account_update_sub({'active': False}, 'application/json')
# test with a JSON patch
account_update_sub([{
'op': 'replace', 'path': '/active','value': False
}], 'application/json-patch+json')
# anonymous users can't activate/deactivate accounts
account_update(None, 401)
# authenticated users can't activate/deactivate other users' account
account_update(test_users['normal'], 403)
# users can't deactivate their own accounts
account_update(test_users['normal'], 403, test_users['normal'])
# admin is allowed to activate/deactivate accounts
account_update(test_users['admin'], 200)
def test_account_roles_search_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
with app.app_context():
read_user = create_user('read_user')
url = url_for('invenio_accounts_rest.user_roles_list',
user_id=read_user.id)
db.session.commit()
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
def roles_read(user, expected_code):
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't read other users' roles
roles_read(None, 401)
# any authenticated user can read other users' roles
roles_read(test_users['normal'], 200)
|
1,227 |
t
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import opengate as gate
import itk
from scipy.spatial.transform import Rotation
if __name__ == "__main__":
paths = gate.get_default_test_paths(__file__, "")
# create the simulation
sim = gate.Simulation()
# main options
ui = sim.user_info
ui.g4_verbose = False
ui.visu = False
ui.number_of_threads = 1
ui.random_seed = 123456
print(ui)
# add a material database
sim.add_material_database(paths.data / "GateMaterials.db")
# units
m = gate.g4_units("m")
mm = gate.g4_units("mm")
cm = gate.g4_units("cm")
keV = gate.g4_units("keV")
MeV = gate.g4_units("MeV")
Bq = gate.g4_units("Bq")
kBq = 1000 * Bq
# change world size
world = sim.world
world.size = [1.5 * m, 1 * m, 1 * m]
# fake box #1
fake = sim.add_volume("Box", "fake")
fake.size = [36 * cm, 36 * cm, 36 * cm]
fake.translation = [25 * cm, 0, 0]
r = Rotation.from_euler("y", -25, degrees=True)
r = r * Rotation.from_euler("x", -35, degrees=True)
fake.rotation = r.as_matrix()
# ---------------------------------------------------
# CT image #1
ct = sim.add_volume("Image", "ct")
ct.image = str(paths.data / "10x10x10.mhd")
ct.mother = fake.name
ct.voxel_materials = [[0, 10, "G4_WATER"]]
ct.translation = [-3 * cm, 0, 0]
r = Rotation.from_euler("z", 45, degrees=True)
ct.rotation = r.as_matrix()
ct_info = gate.read_image_info(ct.image)
print(f"CT image origin and size: ", ct_info.origin, ct_info.size, ct_info.spacing)
# source from image for CT #1
source = sim.add_source("VoxelsSource", "vox_source")
source.mother = ct.name
source.particle = "alpha"
source.activity = 10000 * Bq / ui.number_of_threads
source.image = str(paths.data / "five_pixels_10.mhd")
source.direction.type = "iso"
source.position.translation = gate.get_translation_between_images_center(
ct.image, source.image
)
print(f"Source wrt CT 10x10x10 translation", source.position.translation)
source.energy.mono = 1 * MeV
src_info = gate.read_image_info(source.image)
print(
f"Source image origin and size: ",
src_info.origin,
src_info.size,
src_info.spacing,
)
# add dose actor
dose = sim.add_actor("DoseActor", "dose")
dose.output = paths.output / "test021-edep_1.mhd"
dose.mother = ct.name
img_info = gate.read_image_info(ct.image)
dose.size = img_info.size
dose.spacing = img_info.spacing
dose.img_coord_system = True
# cuts
sim.physics_manager.physics_list_name = "QGSP_BERT_EMZ"
sim.physics_manager.enable_decay = False
sim.physics_manager.global_production_cuts.all = 1 * mm
# sim.set_production_cut("world", "all", 1 * mm)
# add stat actor
stats = sim.add_actor("SimulationStatisticsActor", "Stats")
stats.track_types_flag = True
# verbose
sim.apply_g4_command("/tracking/verbose 0")
# start simulation
sim.run()
# print results at the end
stat = sim.output.get_actor("Stats")
# stat.write(paths.output_ref / "stat021_ref_1.txt")
# test pixels in dose #1
# test pixels in dose #1
d_even = itk.imread(str(dose.output))
s = itk.array_view_from_image(d_even).sum()
v0 = d_even.GetPixel([5, 5, 5])
v1 = d_even.GetPixel([1, 5, 5])
v2 = d_even.GetPixel([1, 2, 5])
v3 = d_even.GetPixel([5, 2, 5])
v4 = d_even.GetPixel([6, 2, 5])
tol = 0.15
ss = v0 + v1 + v2 + v3 + v4
def METHOD_NAME(s, v):
diff = abs(s - v) / s
b = diff < tol
p = diff * 100.0
gate.print_test(b, f"Image diff {s:.2f} vs {v:.2f} -> {p:.2f}%")
return b
is_ok = METHOD_NAME(s, ss)
is_ok = METHOD_NAME(2000, v0) and is_ok
is_ok = METHOD_NAME(2000, v1) and is_ok
is_ok = METHOD_NAME(2000, v2) and is_ok
is_ok = METHOD_NAME(2000, v3) and is_ok
is_ok = METHOD_NAME(2000, v4) and is_ok
stats_ref = gate.read_stat_file(paths.output_ref / "stat021_ref_1.txt")
stats_ref.counts.run_count = ui.number_of_threads
is_ok = gate.assert_stats(stat, stats_ref, 0.1) and is_ok
gate.test_ok(is_ok)
|
1,228 |
test sections absent
|
"""
Test cases for salt.states.ini_manage
"""
import copy
import os
import pytest
import salt.modules.ini_manage as mod_ini_manage
import salt.states.ini_manage as ini_manage
from salt.utils.odict import OrderedDict
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {
ini_manage: {
"__salt__": {
"ini.get_ini": mod_ini_manage.get_ini,
"ini.set_option": mod_ini_manage.set_option,
},
"__opts__": {"test": False},
},
mod_ini_manage: {"__opts__": {"test": False}},
}
@pytest.fixture
def sections():
sections = OrderedDict()
sections["general"] = OrderedDict()
sections["general"]["hostname"] = "myserver.com"
sections["general"]["port"] = "1234"
return sections
def test_options_present(tmp_path, sections):
"""
Test to verify options present when
file does not initially exist
"""
name = str(tmp_path / "test.ini")
exp_ret = {
"name": name,
"changes": {"general": {"before": None, "after": sections["general"]}},
"result": True,
"comment": "Changes take effect",
}
assert ini_manage.options_present(name, sections) == exp_ret
assert os.path.exists(name)
assert mod_ini_manage.get_ini(name) == sections
def test_options_present_true_no_file(tmp_path, sections):
"""
Test to verify options present when
file does not initially exist and test=True
"""
name = str(tmp_path / "test_true_no_file.ini")
exp_ret = {
"name": name,
"changes": {},
"result": None,
"comment": (
"Changed key hostname in section general.\n"
"Changed key port in section general.\n"
),
}
with patch.dict(ini_manage.__opts__, {"test": True}), patch.dict(
mod_ini_manage.__opts__, {"test": True}
):
assert ini_manage.options_present(name, sections) == exp_ret
assert not os.path.exists(name)
def test_options_present_true_file(tmp_path, sections):
"""
Test to verify options present when
file does exist and test=True
"""
name = str(tmp_path / "test_true_file.ini")
exp_ret = {
"name": name,
"changes": {},
"result": None,
"comment": (
"Unchanged key hostname in section general.\n"
"Unchanged key port in section general.\n"
"Changed key user in section general.\n"
),
}
ini_manage.options_present(name, sections)
new_section = copy.deepcopy(sections)
new_section["general"]["user"] = "saltuser"
with patch.dict(ini_manage.__opts__, {"test": True}), patch.dict(
mod_ini_manage.__opts__, {"test": True}
):
assert ini_manage.options_present(name, new_section) == exp_ret
assert os.path.exists(name)
assert mod_ini_manage.get_ini(name) == sections
def test_options_absent():
"""
Test to verify options absent in file.
"""
name = "salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
with patch.dict(ini_manage.__opts__, {"test": True}):
comt = "No changes detected."
ret.update({"comment": comt, "result": True})
assert ini_manage.options_absent(name) == ret
with patch.dict(ini_manage.__opts__, {"test": False}):
comt = "No anomaly detected"
ret.update({"comment": comt, "result": True})
assert ini_manage.options_absent(name) == ret
sections = {"Tables": ["key2", "key3"]}
changes = {"Tables": {"key2": "2", "key3": "3"}}
with patch.dict(
ini_manage.__salt__,
{"ini.remove_option": MagicMock(side_effect=["2", "3"])},
):
with patch.dict(ini_manage.__opts__, {"test": False}):
comt = "Changes take effect"
ret.update({"comment": comt, "result": True, "changes": changes})
assert ini_manage.options_absent(name, sections) == ret
def test_sections_present():
"""
Test to verify sections present in file.
"""
name = "salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
with patch.dict(ini_manage.__opts__, {"test": True}):
with patch.dict(
ini_manage.__salt__, {"ini.get_ini": MagicMock(return_value=None)}
):
comt = "No changes detected."
ret.update({"comment": comt, "result": True})
assert ini_manage.sections_present(name) == ret
changes = {
"first": "who is on",
"second": "what is on",
"third": "I don't know",
}
with patch.dict(
ini_manage.__salt__, {"ini.set_option": MagicMock(return_value=changes)}
):
with patch.dict(ini_manage.__opts__, {"test": False}):
comt = "Changes take effect"
ret.update({"comment": comt, "result": True, "changes": changes})
assert ini_manage.sections_present(name) == ret
def METHOD_NAME():
"""
Test to verify sections absent in file.
"""
name = "salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
with patch.dict(ini_manage.__opts__, {"test": True}):
with patch.dict(
ini_manage.__salt__, {"ini.get_ini": MagicMock(return_value=None)}
):
comt = "No changes detected."
ret.update({"comment": comt, "result": True})
assert ini_manage.sections_absent(name) == ret
with patch.dict(ini_manage.__opts__, {"test": False}):
comt = "No anomaly detected"
ret.update({"comment": comt, "result": True})
assert ini_manage.sections_absent(name) == ret
|
1,229 |
tear down
|
#!/usr/bin/env python3
import os
import random
import unittest
import warnings
from math import exp, pi
import torch
from torch import optim
import gpytorch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import GridInterpolationKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.utils import least_used_cuda_device
from gpytorch.utils.warnings import GPInputWarning
# Simple training data: let's try to learn a sine function,
# but with KISS-GP let's use 100 training examples.
def make_data(cuda=False):
train_x = torch.linspace(0, 1, 100)
train_y = torch.sin(train_x * (2 * pi))
test_x = torch.linspace(0, 1, 51)
test_y = torch.sin(test_x * (2 * pi))
if cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
test_x = test_x.cuda()
test_y = test_y.cuda()
return train_x, train_y, test_x, test_y
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean(constant_prior=SmoothedBoxPrior(-1e-5, 1e-5))
self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1)))
self.grid_covar_module = GridInterpolationKernel(self.base_covar_module, grid_size=50, num_dims=1)
self.covar_module = self.grid_covar_module
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestKISSGPWhiteNoiseRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def METHOD_NAME(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_kissgp_gp_mean_abs_error(self):
# This test throws a warning because the fixed noise likelihood gets the wrong input
warnings.simplefilter("ignore", GPInputWarning)
train_x, train_y, test_x, test_y = make_data()
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.05)
def test_kissgp_gp_fast_pred_var(self):
with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):
train_x, train_y, test_x, test_y = make_data()
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.noise = torch.ones(100) * 3.0
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.noise
var_diff = (test_function_predictions.variance - noise).abs()
self.assertLess(torch.max(var_diff / noise), 0.05)
def test_kissgp_gp_mean_abs_error_cuda(self):
if not torch.cuda.is_available():
return
with least_used_cuda_device():
train_x, train_y, test_x, test_y = make_data(cuda=True)
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001).cuda()
gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.02)
if __name__ == "__main__":
unittest.main()
|
1,230 |
data
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import pi, cos, sin
from compas.geometry import Point
from compas.geometry import Vector
from compas.geometry import Frame
from compas.geometry import Circle
from .surface import Surface
PI2 = 2 * pi
class CylindricalSurface(Surface):
"""A cylindrical surface is defined by a radius and a local coordinate system.
Parameters
----------
radius : float
The radius of the cylinder.
frame : :class:`Frame`
The local coordinate system of the cylinder.
Attributes
----------
Examples
--------
"""
DATASCHEMA = {
"type": "object",
"properties": {
"radius": {"type": "number", "minimum": 0},
"frame": Frame.DATASCHEMA,
},
"required": ["radius", "frame"],
}
# overwriting the __new__ method is necessary
# to avoid triggering the plugin mechanism of the base surface class
def __new__(cls, *args, **kwargs):
surface = object.__new__(cls)
surface.__init__(*args, **kwargs)
return surface
def __init__(self, radius, frame=None, **kwargs):
super(CylindricalSurface, self).__init__(frame=frame, **kwargs)
self._radius = None
self.radius = radius
def __repr__(self):
return "{0}(radius={1}, frame={2!r})".format(
type(self).__name__,
self.radius,
self.frame,
)
def __eq__(self, other):
try:
other_frame = other.frame
other_radius = other.radius
except Exception:
return False
return self.radius == other_radius and self.frame == other_frame
# =============================================================================
# Data
# =============================================================================
@property
def METHOD_NAME(self):
return {
"radius": self.radius,
"frame": self.frame.METHOD_NAME,
}
@classmethod
def from_data(cls, METHOD_NAME):
return cls(
radius=METHOD_NAME["radius"],
frame=Frame.from_data(METHOD_NAME["frame"]),
)
# =============================================================================
# Properties
# =============================================================================
@property
def center(self):
return self.frame.point
@center.setter
def center(self, point):
self.frame.point = point
@property
def radius(self):
if self._radius is None:
raise ValueError("The radius of the surface has not been set yet.")
return self._radius
@radius.setter
def radius(self, radius):
if radius < 0:
raise ValueError("The radius of a sphere should be larger than or equal to zero.")
self._radius = float(radius)
@property
def area(self):
raise NotImplementedError
@property
def volume(self):
raise NotImplementedError
# =============================================================================
# Constructors
# =============================================================================
@classmethod
def from_plane_and_radius(cls, plane, radius):
"""Construct a cylindrical surface from a plane and a radius.
Parameters
----------
plane : :class:`compas.geometry.Plane`
The plane of the surface.
radius : float
The radius of the surface.
Returns
-------
:class:`compas.geometry.CylindricalSurface`
A cylindrical surface.
"""
return cls(radius, frame=Frame.from_plane(plane))
@classmethod
def from_three_points(cls, a, b, c):
"""Construct a cylindrical from three points.
Parameters
----------
a : :class:`compas.geometry.Point`
The first point.
b : :class:`compas.geometry.Point`
The second point.
c : :class:`compas.geometry.Point`
The third point.
Returns
-------
:class:`compas.geometry.CylindricalSurface`
A cylindrical surface.
"""
circle = Circle.from_three_points(a, b, c)
return cls(circle.radius, frame=circle.frame)
# =============================================================================
# Conversions
# =============================================================================
# =============================================================================
# Transformations
# =============================================================================
# =============================================================================
# Methods
# =============================================================================
def point_at(self, u, v, world=True):
"""Compute a point on the surface at the given parameters.
Parameters
----------
u : float
The first parameter.
v : float
The second parameter.
world : bool, optional
If ``True``, the point is transformed to world coordinates.
Returns
-------
:class:`compas.geometry.Point`
The point at the given parameters.
"""
u = u * PI2
x = self.radius * cos(u)
y = self.radius * sin(u)
z = v
point = Point(x, y, z)
if world:
point.transform(self.transformation)
return point
def normal_at(self, u, world=True):
"""Compute the normal at a point on the surface at the given parameters.
Parameters
----------
u : float
The first parameter.
world : bool, optional
If ``True``, the normal is transformed to world coordinates.
Returns
-------
:class:`compas.geometry.Vector`
The normal at the given parameters.
"""
u = u * PI2
x = self.radius * cos(u)
y = self.radius * sin(u)
z = 0
vector = Vector(x, y, z)
vector.unitize()
if world:
vector.transform(self.transformation)
return vector
def frame_at(self, u, v, world=True):
"""Compute the frame at a point on the surface at the given parameters.
Parameters
----------
u : float
The first parameter.
v : float
The second parameter.
world : bool, optional
If ``True``, the frame is transformed to world coordinates.
Returns
-------
:class:`compas.geometry.Frame`
The frame at the given parameters.
"""
u = u * PI2
point = self.point_at(u, v, world=False)
zaxis = self.normal_at(u, world=False)
yaxis = self.frame.zaxis
xaxis = yaxis.cross(zaxis)
frame = Frame(point, xaxis, yaxis)
if world:
frame.transform(self.transformation)
return frame
|
1,231 |
async callable
|
from functools import partial
from typing import Any, AsyncGenerator, Generator
import pytest
from litestar.di import Provide
from litestar.exceptions import ImproperlyConfiguredException, LitestarWarning
from litestar.types import Empty
def generator_func() -> Generator[float, None, None]:
yield 0.1
async def async_generator_func() -> AsyncGenerator[float, None]:
yield 0.1
async def METHOD_NAME(val: str = "three-one") -> str:
return val
def sync_callable(val: str = "three-one") -> str:
return val
async_partial = partial(METHOD_NAME, "why-three-and-one")
sync_partial = partial(sync_callable, "why-three-and-one")
class C:
val = 31
def __init__(self) -> None:
self.val = 13
@classmethod
async def async_class(cls) -> int:
return cls.val
@classmethod
def sync_class(cls) -> int:
return cls.val
@staticmethod
async def async_static() -> str:
return "one-three"
@staticmethod
def sync_static() -> str:
return "one-three"
async def async_instance(self) -> int:
return self.val
def sync_instance(self) -> int:
return self.val
async def test_provide_default(anyio_backend: str) -> None:
provider = Provide(dependency=METHOD_NAME)
value = await provider()
assert value == "three-one"
async def test_provide_cached(anyio_backend: str) -> None:
provider = Provide(dependency=METHOD_NAME, use_cache=True)
assert provider.value is Empty
value = await provider()
assert value == "three-one"
assert provider.value == value
second_value = await provider()
assert value == second_value
third_value = await provider()
assert value == third_value
async def test_run_in_thread(anyio_backend: str) -> None:
provider = Provide(dependency=sync_callable, sync_to_thread=True)
value = await provider()
assert value == "three-one"
def test_provider_equality_check() -> None:
first_provider = Provide(dependency=sync_callable, sync_to_thread=False)
second_provider = Provide(dependency=sync_callable, sync_to_thread=False)
assert first_provider == second_provider
third_provider = Provide(dependency=sync_callable, use_cache=True, sync_to_thread=False)
assert first_provider != third_provider
second_provider.value = True
assert first_provider != second_provider
@pytest.mark.parametrize(
"fn, exp",
[
(C.async_class, 31),
(C.sync_class, 31),
(C.async_static, "one-three"),
(C.sync_static, "one-three"),
(C().async_instance, 13),
(C().sync_instance, 13),
(METHOD_NAME, "three-one"),
(sync_callable, "three-one"),
(async_partial, "why-three-and-one"),
(sync_partial, "why-three-and-one"),
],
)
@pytest.mark.usefixtures("disable_warn_sync_to_thread_with_async")
async def test_provide_for_callable(fn: Any, exp: Any, anyio_backend: str) -> None:
assert await Provide(fn, sync_to_thread=False)() == exp
@pytest.mark.usefixtures("enable_warn_implicit_sync_to_thread")
def test_sync_callable_without_sync_to_thread_warns() -> None:
def func() -> None:
pass
with pytest.warns(LitestarWarning, match="discouraged since synchronous callables"):
Provide(func)
@pytest.mark.parametrize("sync_to_thread", [True, False])
def test_async_callable_with_sync_to_thread_warns(sync_to_thread: bool) -> None:
async def func() -> None:
pass
with pytest.warns(LitestarWarning, match="asynchronous callable"):
Provide(func, sync_to_thread=sync_to_thread)
@pytest.mark.parametrize(
("dep", "exp"),
[
(sync_callable, True),
(METHOD_NAME, False),
(generator_func, True),
(async_generator_func, True),
],
)
def test_dependency_has_async_callable(dep: Any, exp: bool) -> None:
assert Provide(dep).has_sync_callable is exp
def test_raises_when_dependency_is_not_callable() -> None:
with pytest.raises(ImproperlyConfiguredException):
Provide(123) # type: ignore
|
1,232 |
init applet drawer ui
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
from PyQt5 import uic
from PyQt5.QtWidgets import QVBoxLayout, QSpacerItem, QSizePolicy
from volumina.widgets.thresholdingWidget import ThresholdingWidget
from ilastik.applets.layerViewer.layerViewerGui import LayerViewerGui
class ThresholdMaskingGui(LayerViewerGui):
"""
Simple example of an applet tha
"""
###########################################
### AppletGuiInterface Concrete Methods ###
###########################################
def appletDrawer(self):
return self._drawer
# (Other methods already provided by our base class)
###########################################
###########################################
def __init__(self, parentApplet, topLevelOperatorView):
""""""
self.topLevelOperatorView = topLevelOperatorView
super(ThresholdMaskingGui, self).__init__(parentApplet, self.topLevelOperatorView)
def METHOD_NAME(self):
# Load the ui file (find it in our own directory)
localDir = os.path.split(__file__)[0]
self._drawer = uic.loadUi(localDir + "/drawer.ui")
# Init threshold widget
self.thresholdWidget = ThresholdingWidget(self)
self.thresholdWidget.valueChanged.connect(self.apply_gui_settings_to_operator)
# Add widget to a layout
layout = QVBoxLayout()
layout.setSpacing(0)
layout.addWidget(self.thresholdWidget)
layout.addSpacerItem(QSpacerItem(0, 0, vPolicy=QSizePolicy.Expanding))
# Apply layout to the drawer
self._drawer.setLayout(layout)
# Initialize the gui with the operator's current values
self.apply_operator_settings_to_gui()
def apply_operator_settings_to_gui(self):
minValue, maxValue = (0, 255)
if self.topLevelOperatorView.MinValue.ready():
minValue = self.topLevelOperatorView.MinValue.value
if self.topLevelOperatorView.MaxValue.ready():
maxValue = self.topLevelOperatorView.MaxValue.value
self.thresholdWidget.setValue(minValue, maxValue)
def apply_gui_settings_to_operator(self, minVal, maxVal):
self.topLevelOperatorView.MinValue.setValue(minVal)
self.topLevelOperatorView.MaxValue.setValue(maxVal)
def setupLayers(self):
"""
Overridden from LayerViewerGui.
Create a list of all layer objects that should be displayed.
"""
layers = []
# Show the thresholded data
outputImageSlot = self.topLevelOperatorView.Output
if outputImageSlot.ready():
outputLayer = self.createStandardLayerFromSlot(outputImageSlot)
outputLayer.name = "min <= x <= max"
outputLayer.visible = True
outputLayer.opacity = 0.75
layers.append(outputLayer)
# Show the data
invertedOutputSlot = self.topLevelOperatorView.InvertedOutput
if invertedOutputSlot.ready():
invertedLayer = self.createStandardLayerFromSlot(invertedOutputSlot)
invertedLayer.name = "(x < min) U (x > max)"
invertedLayer.visible = True
invertedLayer.opacity = 0.25
layers.append(invertedLayer)
# Show the raw input data
inputImageSlot = self.topLevelOperatorView.InputImage
if inputImageSlot.ready():
inputLayer = self.createStandardLayerFromSlot(inputImageSlot)
inputLayer.name = "Raw Input"
inputLayer.visible = True
inputLayer.opacity = 1.0
layers.append(inputLayer)
return layers
|
1,233 |
mask path
|
""" Contains ADE dataset for semantic segmentation tasks """
import os
from io import BytesIO
from zipfile import ZipFile
import tempfile
from PIL import Image
import tqdm
import requests
from . import ImagesOpenset
class ADESegmentation(ImagesOpenset):
""" Contains 20210 images and masks for training and 2000 for testing.
Notes
-----
Class 0 corresponds to background.
"""
SOURCE_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
SETS_PATH = 'ADEChallengeData2016'
def __init__(self, *args, unpack=False, preloaded=None, train_test=True, **kwargs):
self.localname = None
super().__init__(*args, preloaded=preloaded, train_test=train_test, **kwargs)
if unpack:
with ZipFile(self.localname) as archive:
archive.extractall(os.path.dirname(self.localname))
def download_archive(self, path=None):
""" Download archive"""
if path is None:
path = tempfile.gettempdir()
filename = os.path.basename(self.SOURCE_URL)
localname = os.path.join(path, filename)
self.localname = localname
if not os.path.isfile(localname):
r = requests.get(self.SOURCE_URL, stream=True, timeout=10)
file_size = int(r.headers['Content-Length'])
chunk = 1
chunk_size = 1024
num_bars = int(file_size / chunk_size)
with open(localname, 'wb') as file:
for chunk in tqdm.tqdm(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit='KB',
desc=filename,
leave=True
):
file.write(chunk)
def _name(self, path):
"""
Return file name without format
Parameters
----------
path: str
path from which you want to extract filename
Returns
-------
str
file name
"""
return os.path.basename(path).split('.')[0]
def _zip_listdir(self, archive, target_dir):
"""
Analog of os.listdir() but for zipfile
Parameters
----------
archive: ZipFile object
.zip archive
target_dir: str
directory in which you want to run os.listdir
Returns
-------
List[str]
list of paths inside target_dir
"""
paths = archive.namelist()
target_dir = target_dir if target_dir.endswith("/") else target_dir + "/"
target_dir = "" if target_dir == '/' else target_dir
result = [
path for path in paths if path.startswith(target_dir) and len(path) != len(target_dir)
]
return result
def _extract_names(self, archive, mode):
"""
Train and test images names are located in specific for each task folder
Parameters
----------
archive: ZipFile object
.zip archive
mode: str
can be either "training" or "validation"(test set)
Returns
-------
List[str]
list of file names
"""
assert mode in ['training', 'validation']
target_dir = os.path.join(self.SETS_PATH, 'images', mode)
filepaths = self._zip_listdir(archive=archive, target_dir=target_dir)
filenames = [self._name(filepath) for filepath in filepaths]
return filenames
def _image_path(self, name, mode):
""" Return the path to the .jpg image in the archive by its name """
assert mode in ['training', 'validation']
return os.path.join(self.SETS_PATH, 'images', mode, name + '.jpg')
def METHOD_NAME(self, name, mode):
""" Return the path in the archive to the mask which is .png image by its name and mode"""
assert mode in ['training', 'validation']
return os.path.join(self.SETS_PATH, 'annotations', mode, name + '.png')
def _extract_sample(self, archive, name, mode):
"""
Return image and mask PIL.Image objects from archive based on its name and mode
Parameters
----------
archive: ZipFile object
.zip archive
name: str
file name
mode: str
can be either "training" or "validation"(test set)
Returns
-------
Tuple(PIL.Image, PIL.Image)
tuple of image and corresponding mask
Notes
-----
Images that are grayscale are casted to RGB
"""
image_filepath = self._image_path(name=name, mode=mode)
mask_filepath = self.METHOD_NAME(name=name, mode=mode)
image_data = archive.read(image_filepath)
mask_data = archive.read(mask_filepath)
image = Image.open(BytesIO(image_data))
image = image.convert('RGB') if (image.mode != 'RGB') else image
mask = Image.open(BytesIO(mask_data))
return (image, mask)
def download(self, path):
""" Download a dataset from the source web-site """
self.download_archive(path)
with ZipFile(self.localname) as archive:
train_names = self._extract_names(archive=archive, mode='training')
test_names = self._extract_names(archive=archive, mode='validation')
train_samples = [self._extract_sample(archive, name=name, mode='training') \
for name in train_names]
test_samples = [self._extract_sample(archive, name=name, mode='validation') \
for name in test_names]
train_images, train_masks = map(list, zip(*train_samples))
test_images, test_masks = map(list, zip(*test_samples))
images = self.create_array(train_images + test_images)
masks = self.create_array(train_masks + test_masks)
preloaded = images, masks
index, train_index, test_index = self._infer_train_test_index(
train_len=len(train_names),
test_len=len(test_names)
)
return preloaded, index, train_index, test_index
|
1,234 |
test property decorator baseclass
|
# Test case for property
# more tests are in test_descr
import sys
import unittest
from test.test_support import run_unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class PropertyTests(unittest.TestCase):
def METHOD_NAME(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
except AttributeError:
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
@FooBase.spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
def test_main():
run_unittest(PropertyTests, PropertySubclassTests)
if __name__ == '__main__':
test_main()
|
1,235 |
x open
|
from __future__ import annotations
import sys
from itertools import count
from unittest.mock import MagicMock, Mock, patch
import pytest
from kombu import Connection
from kombu.transport import pyamqp
def test_amqps_connection():
conn = Connection('amqps://')
assert conn.transport # evaluate transport, don't connect
assert conn.ssl
class MockConnection(dict):
def __setattr__(self, key, value):
self[key] = value
def connect(self):
pass
class test_Channel:
def setup(self):
class Channel(pyamqp.Channel):
wait_returns = []
def METHOD_NAME(self, *args, **kwargs):
pass
def wait(self, *args, **kwargs):
return self.wait_returns
def _send_method(self, *args, **kwargs):
pass
self.conn = Mock()
self.conn._get_free_channel_id.side_effect = count(0).__next__
self.conn.channels = {}
self.channel = Channel(self.conn, 0)
def test_init(self):
assert not self.channel.no_ack_consumers
def test_prepare_message(self):
assert self.channel.prepare_message(
'foobar', 10, 'application/data', 'utf-8',
properties={},
)
def test_message_to_python(self):
message = Mock()
message.headers = {}
message.properties = {}
assert self.channel.message_to_python(message)
def test_close_resolves_connection_cycle(self):
assert self.channel.connection is not None
self.channel.close()
assert self.channel.connection is None
def test_basic_consume_registers_ack_status(self):
self.channel.wait_returns = ['my-consumer-tag']
self.channel.basic_consume('foo', no_ack=True)
assert 'my-consumer-tag' in self.channel.no_ack_consumers
self.channel.wait_returns = ['other-consumer-tag']
self.channel.basic_consume('bar', no_ack=False)
assert 'other-consumer-tag' not in self.channel.no_ack_consumers
self.channel.basic_cancel('my-consumer-tag')
assert 'my-consumer-tag' not in self.channel.no_ack_consumers
class test_Transport:
def setup(self):
self.connection = Connection('pyamqp://')
self.transport = self.connection.transport
def test_create_channel(self):
connection = Mock()
self.transport.create_channel(connection)
connection.channel.assert_called_with()
def test_ssl_cert_passed(self):
ssl_dict = {
'ca_certs': '/etc/pki/tls/certs/something.crt',
'cert_reqs': "ssl.CERT_REQUIRED",
}
ssl_dict_copy = {k: ssl_dict[k] for k in ssl_dict}
connection = Connection('amqps://', ssl=ssl_dict_copy)
assert connection.transport.client.ssl == ssl_dict
def test_driver_version(self):
assert self.transport.driver_version()
def test_drain_events(self):
connection = Mock()
self.transport.drain_events(connection, timeout=10.0)
connection.drain_events.assert_called_with(timeout=10.0)
def test_dnspython_localhost_resolve_bug(self):
class Conn:
def __init__(self, **kwargs):
vars(self).update(kwargs)
def connect(self):
pass
self.transport.Connection = Conn
self.transport.client.hostname = 'localhost'
conn1 = self.transport.establish_connection()
assert conn1.host == '127.0.0.1:5672'
self.transport.client.hostname = 'example.com'
conn2 = self.transport.establish_connection()
assert conn2.host == 'example.com:5672'
def test_close_connection(self):
connection = Mock()
connection.client = Mock()
self.transport.close_connection(connection)
assert connection.client is None
connection.close.assert_called_with()
@pytest.mark.masked_modules('ssl')
def test_import_no_ssl(self, mask_modules):
pm = sys.modules.pop('amqp.connection')
try:
from amqp.connection import SSLError
assert SSLError.__module__ == 'amqp.connection'
finally:
if pm is not None:
sys.modules['amqp.connection'] = pm
class test_pyamqp:
def test_default_port(self):
class Transport(pyamqp.Transport):
Connection = MockConnection
c = Connection(port=None, transport=Transport).connect()
assert c['host'] == f'127.0.0.1:{Transport.default_port}'
def test_custom_port(self):
class Transport(pyamqp.Transport):
Connection = MockConnection
c = Connection(port=1337, transport=Transport).connect()
assert c['host'] == '127.0.0.1:1337'
def test_ssl(self):
# Test setting TLS by ssl=True.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(transport=Transport, ssl=True).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] is True
def test_ssl_dict(self):
# Test setting TLS by setting ssl as dict.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(transport=Transport, ssl={'a': 1, 'b': 2}).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] == {'a': 1, 'b': 2}
@pytest.mark.parametrize(
'hostname',
[
'broker.example.com',
'amqp://broker.example.com/0',
'amqps://broker.example.com/0',
'amqp://guest:[email protected]/0',
'amqp://broker.example.com;broker2.example.com'
])
def test_ssl_server_hostname(self, hostname):
# Test setting server_hostname from URI.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(
hostname, transport=Transport, ssl={'server_hostname': None}
).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] == {'server_hostname': 'broker.example.com'}
def test_register_with_event_loop(self):
t = pyamqp.Transport(Mock())
conn = Mock(name='conn')
loop = Mock(name='loop')
t.register_with_event_loop(conn, loop)
loop.add_reader.assert_called_with(
conn.sock, t.on_readable, conn, loop,
)
def test_heartbeat_check(self):
t = pyamqp.Transport(Mock())
conn = Mock()
t.heartbeat_check(conn, rate=4.331)
conn.heartbeat_tick.assert_called_with(rate=4.331)
def test_get_manager(self):
with patch('kombu.transport.pyamqp.get_manager') as get_manager:
t = pyamqp.Transport(Mock())
t.get_manager(1, kw=2)
get_manager.assert_called_with(t.client, 1, kw=2)
|
1,236 |
tolist
|
import sys
from _typeshed import ReadableBuffer, SupportsRead, SupportsWrite
from collections.abc import Iterable
# pytype crashes if array inherits from collections.abc.MutableSequence instead of typing.MutableSequence
from typing import Any, Generic, MutableSequence, TypeVar, overload # noqa: Y022
from typing_extensions import Literal, Self, SupportsIndex, TypeAlias
if sys.version_info >= (3, 12):
from types import GenericAlias
_IntTypeCode: TypeAlias = Literal["b", "B", "h", "H", "i", "I", "l", "L", "q", "Q"]
_FloatTypeCode: TypeAlias = Literal["f", "d"]
_UnicodeTypeCode: TypeAlias = Literal["u"]
_TypeCode: TypeAlias = _IntTypeCode | _FloatTypeCode | _UnicodeTypeCode
_T = TypeVar("_T", int, float, str)
typecodes: str
class array(MutableSequence[_T], Generic[_T]):
@property
def typecode(self) -> _TypeCode: ...
@property
def itemsize(self) -> int: ...
@overload
def __init__(self: array[int], __typecode: _IntTypeCode, __initializer: bytes | bytearray | Iterable[int] = ...) -> None: ...
@overload
def __init__(
self: array[float], __typecode: _FloatTypeCode, __initializer: bytes | bytearray | Iterable[float] = ...
) -> None: ...
@overload
def __init__(
self: array[str], __typecode: _UnicodeTypeCode, __initializer: bytes | bytearray | Iterable[str] = ...
) -> None: ...
@overload
def __init__(self, __typecode: str, __initializer: Iterable[_T]) -> None: ...
@overload
def __init__(self, __typecode: str, __initializer: bytes | bytearray = ...) -> None: ...
def append(self, __v: _T) -> None: ...
def buffer_info(self) -> tuple[int, int]: ...
def byteswap(self) -> None: ...
def count(self, __v: _T) -> int: ...
def extend(self, __bb: Iterable[_T]) -> None: ...
def frombytes(self, __buffer: ReadableBuffer) -> None: ...
def fromfile(self, __f: SupportsRead[bytes], __n: int) -> None: ...
def fromlist(self, __list: list[_T]) -> None: ...
def fromunicode(self, __ustr: str) -> None: ...
if sys.version_info >= (3, 10):
def index(self, __v: _T, __start: int = 0, __stop: int = sys.maxsize) -> int: ...
else:
def index(self, __v: _T) -> int: ... # type: ignore[override]
def insert(self, __i: int, __v: _T) -> None: ...
def pop(self, __i: int = -1) -> _T: ...
def remove(self, __v: _T) -> None: ...
def tobytes(self) -> bytes: ...
def tofile(self, __f: SupportsWrite[bytes]) -> None: ...
def METHOD_NAME(self) -> list[_T]: ...
def tounicode(self) -> str: ...
if sys.version_info < (3, 9):
def fromstring(self, __buffer: str | ReadableBuffer) -> None: ...
def tostring(self) -> bytes: ...
def __len__(self) -> int: ...
@overload
def __getitem__(self, __key: SupportsIndex) -> _T: ...
@overload
def __getitem__(self, __key: slice) -> array[_T]: ...
@overload # type: ignore[override]
def __setitem__(self, __key: SupportsIndex, __value: _T) -> None: ...
@overload
def __setitem__(self, __key: slice, __value: array[_T]) -> None: ...
def __delitem__(self, __key: SupportsIndex | slice) -> None: ...
def __add__(self, __value: array[_T]) -> array[_T]: ...
def __eq__(self, __value: object) -> bool: ...
def __ge__(self, __value: array[_T]) -> bool: ...
def __gt__(self, __value: array[_T]) -> bool: ...
def __iadd__(self, __value: array[_T]) -> Self: ... # type: ignore[override]
def __imul__(self, __value: int) -> Self: ...
def __le__(self, __value: array[_T]) -> bool: ...
def __lt__(self, __value: array[_T]) -> bool: ...
def __mul__(self, __value: int) -> array[_T]: ...
def __rmul__(self, __value: int) -> array[_T]: ...
def __copy__(self) -> array[_T]: ...
def __deepcopy__(self, __unused: Any) -> array[_T]: ...
def __buffer__(self, __flags: int) -> memoryview: ...
def __release_buffer__(self, __buffer: memoryview) -> None: ...
if sys.version_info >= (3, 12):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
ArrayType = array
|
1,237 |
gross
|
from os import listdir, remove, rmdir
from os.path import join
from indigo import IndigoObject
from indigo.bingo import Bingo, BingoException
from ..constants import DB_BINGO
from ..helpers import indigo_iterator
from ..logger import logger
from .base import NoSQLAdapter, catch_indigo_exception
class BingoNoSQL(NoSQLAdapter):
dbms = DB_BINGO
def __init__(self, indigo):
NoSQLAdapter.__init__(self)
self.indigo = indigo
def connect(self):
logger.info(f"Connecting to {self.dbms} DB")
self.bingo = Bingo.loadDatabaseFile(self.indigo, self.db_path)
def close_connect(self):
logger.info(f"Closing connecting to {self.dbms} DB")
self.bingo.close()
def import_data(self, data_path: str, database_type: str):
logger.info(f"Creating {self.dbms} database")
self.bingo = Bingo.createDatabaseFile(
self.indigo, self.db_path, database_type
)
logger.info(f"Importing data to {self.dbms} from {data_path}")
index = 1
for mol in indigo_iterator(self.indigo, data_path):
try:
self.bingo.insert(mol, index)
except BingoException as e:
logger.error(
f"Error during import {database_type} from "
f"{data_path} (id = {index}) "
f"'{mol.rawData()[:20]}...': {e}"
)
finally:
index += 1
self.close_connect()
def delete_base(self):
logger.info(f"Dropping {self.dbms} database")
for db_file in listdir(join(self.db_dir, self.db_name)):
remove(join(self.db_dir, self.db_name, db_file))
rmdir(join(self.db_dir, self.db_name))
@catch_indigo_exception()
def mass(self, molecule: IndigoObject, weight_type: str):
if weight_type == "molecular-weight":
return molecule.molecularWeight()
if weight_type == "most-abundant-mass":
return molecule.mostAbundantMass()
if weight_type == "monoisotopic-mass":
return molecule.monoisotopicMass()
@catch_indigo_exception()
def METHOD_NAME(self, molecule: IndigoObject):
return molecule.grossFormula()
@catch_indigo_exception(catch_error=True)
def exact(self, molecule, target_function=None, options=""):
result = []
exact_matcher = self.bingo.searchExact(molecule, options)
while exact_matcher.next():
id = exact_matcher.getCurrentId()
result.append(id)
exact_matcher.close()
return result
@catch_indigo_exception(catch_error=True)
def substructure(self, molecule, target_function=None, options=""):
result = []
query = self.indigo.loadQueryMolecule(molecule.rawData())
sub_matcher = self.bingo.searchSub(query, options)
while sub_matcher.next():
id = sub_matcher.getCurrentId()
result.append(id)
sub_matcher.close()
return result
@catch_indigo_exception(catch_error=True)
def similarity(self, molecule, target_function=None, options=""):
result = []
sim_type, min_sim, max_sim = options.split(", ")
min_sim, max_sim = float(min_sim), float(max_sim)
sim_matcher = self.bingo.searchSim(
molecule, min_sim, max_sim, sim_type
)
while sim_matcher.next():
id = sim_matcher.getCurrentId()
result.append(id)
sim_matcher.close()
return result
@catch_indigo_exception(catch_error=True)
def smarts(self, molecule, target_function=None, options=""):
return self.substructure(molecule, target_function, options)
@catch_indigo_exception(catch_error=True)
def rsmarts(self, reaction, target_function=None, options=""):
return self.substructure(reaction, target_function, options)
@catch_indigo_exception(catch_error=True)
def rexact(self, reaction, target_function=None, options=""):
result = []
exact_matcher = self.bingo.searchExact(reaction, options)
while exact_matcher.next():
id = exact_matcher.getCurrentId()
result.append(id)
exact_matcher.close()
return result
@catch_indigo_exception(catch_error=True)
def rsubstructure(self, reaction, target_function=None, options=""):
result = []
query = self.indigo.loadQueryReaction(reaction.rawData())
sub_matcher = self.bingo.searchSub(query, options)
while sub_matcher.next():
id = sub_matcher.getCurrentId()
result.append(id)
sub_matcher.close()
return result
|
1,238 |
ocio configs switcher enum
|
from pydantic import Field, validator
from ayon_server.settings import (
BaseSettingsModel,
ensure_unique_names,
)
def METHOD_NAME():
return [
{"value": "nuke-default", "label": "nuke-default"},
{"value": "spi-vfx", "label": "spi-vfx"},
{"value": "spi-anim", "label": "spi-anim"},
{"value": "aces_0.1.1", "label": "aces_0.1.1"},
{"value": "aces_0.7.1", "label": "aces_0.7.1"},
{"value": "aces_1.0.1", "label": "aces_1.0.1"},
{"value": "aces_1.0.3", "label": "aces_1.0.3"},
{"value": "aces_1.1", "label": "aces_1.1"},
{"value": "aces_1.2", "label": "aces_1.2"},
{"value": "aces_1.3", "label": "aces_1.3"},
{"value": "custom", "label": "custom"}
]
class WorkfileColorspaceSettings(BaseSettingsModel):
"""Hiero workfile colorspace preset. """
"""# TODO: enhance settings with host api:
we need to add mapping to resolve properly keys.
Hiero is excpecting camel case key names,
but for better code consistency we are using snake_case:
ocio_config = ocioConfigName
working_space_name = workingSpace
int_16_name = sixteenBitLut
int_8_name = eightBitLut
float_name = floatLut
log_name = logLut
viewer_name = viewerLut
thumbnail_name = thumbnailLut
"""
ocioConfigName: str = Field(
title="OpenColorIO Config",
description="Switch between OCIO configs",
enum_resolver=METHOD_NAME,
conditionalEnum=True
)
workingSpace: str = Field(
title="Working Space"
)
viewerLut: str = Field(
title="Viewer"
)
eightBitLut: str = Field(
title="8-bit files"
)
sixteenBitLut: str = Field(
title="16-bit files"
)
logLut: str = Field(
title="Log files"
)
floatLut: str = Field(
title="Float files"
)
thumbnailLut: str = Field(
title="Thumnails"
)
monitorOutLut: str = Field(
title="Monitor"
)
class ClipColorspaceRulesItems(BaseSettingsModel):
_layout = "expanded"
regex: str = Field("", title="Regex expression")
colorspace: str = Field("", title="Colorspace")
class RegexInputsModel(BaseSettingsModel):
inputs: list[ClipColorspaceRulesItems] = Field(
default_factory=list,
title="Inputs"
)
class ImageIOConfigModel(BaseSettingsModel):
override_global_config: bool = Field(
False,
title="Override global OCIO config"
)
filepath: list[str] = Field(
default_factory=list,
title="Config path"
)
class ImageIOFileRuleModel(BaseSettingsModel):
name: str = Field("", title="Rule name")
pattern: str = Field("", title="Regex pattern")
colorspace: str = Field("", title="Colorspace name")
ext: str = Field("", title="File extension")
class ImageIOFileRulesModel(BaseSettingsModel):
activate_host_rules: bool = Field(False)
rules: list[ImageIOFileRuleModel] = Field(
default_factory=list,
title="Rules"
)
@validator("rules")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class ImageIOSettings(BaseSettingsModel):
"""Hiero color management project settings. """
_isGroup: bool = True
activate_host_color_management: bool = Field(
True, title="Enable Color Management"
)
ocio_config: ImageIOConfigModel = Field(
default_factory=ImageIOConfigModel,
title="OCIO config"
)
file_rules: ImageIOFileRulesModel = Field(
default_factory=ImageIOFileRulesModel,
title="File Rules"
)
workfile: WorkfileColorspaceSettings = Field(
default_factory=WorkfileColorspaceSettings,
title="Workfile"
)
"""# TODO: enhance settings with host api:
- old settings are using `regexInputs` key but we
need to rename to `regex_inputs`
- no need for `inputs` middle part. It can stay
directly on `regex_inputs`
"""
regexInputs: RegexInputsModel = Field(
default_factory=RegexInputsModel,
title="Assign colorspace to clips via rules"
)
DEFAULT_IMAGEIO_SETTINGS = {
"workfile": {
"ocioConfigName": "nuke-default",
"workingSpace": "linear",
"viewerLut": "sRGB",
"eightBitLut": "sRGB",
"sixteenBitLut": "sRGB",
"logLut": "Cineon",
"floatLut": "linear",
"thumbnailLut": "sRGB",
"monitorOutLut": "sRGB"
},
"regexInputs": {
"inputs": [
{
"regex": "[^-a-zA-Z0-9](plateRef).*(?=mp4)",
"colorspace": "sRGB"
}
]
}
}
|
1,239 |
check spec time
|
"""
Lets you set restrictions on spectators.
Original documentation:
This script will hopefully give server owners some control over what
spectators do on there server. As of now since the release of v0.75,
Goon Haven has had issues with spectators idling and using global chat
to send information to a team so that they may know enemy positions
or what the enemy is doing, etc. This script can block spectator chat
as well as kick spectators after so much time as passed.
Additionally, server owners who also give out "guard" or "mini-mod"
positions can add the right "specpower" to the group rights in commands.py
to have the guards/minimods be immune to the spectator kick and chat
restrictions.
Oh, and admins are also automatically immune to spectator kick and chat
restrictions.
Hope you enjoy!
Tocksman
Options
^^^^^^^
.. code-block:: toml
[spectator_control]
no_chat = false # determines whether spectators can chat or not in your server
kick = false # determines whether spectators will be kicked after remaining for so long
kick_time = "5min" # how long a spectator may remain before they are kicked
.. codeauthor:: Tocksman (made for Goon Haven)
"""
from math import ceil, floor
from twisted.internet import reactor
from piqueserver.config import config, cast_duration
spectator_ctrl_config = config.section("spectator_control")
no_chat = spectator_ctrl_config.option("no_chat", False)
kick = spectator_ctrl_config.option("kick", False)
kick_time = spectator_ctrl_config.option("kick_time", default="5min", cast=cast_duration)
def apply_script(protocol, connection, config):
class SpectatorControlConnection(connection):
spec_check = None
def on_chat(self, value, global_message):
# if no chat is set and they're a spectator and not an admin
# also, check for the right "specpower" for owners who add additional
# rights such as guards, mini-mods, etc.
if self.team.spectator and no_chat.get():
if not self.admin and not self.rights.specpower: # not an admin
self.send_chat('Spectators cannot speak on this server.')
return False # deny
return connection.on_chat(self, value, global_message)
def on_team_join(self, team):
if team.spectator and kick.get() and kick_time.get() > 0:
if self.rights is None or (not self.admin and not self.rights.specpower): # not an admin
# this check is necessary as you can join spectator from
# being a spectator
if self.spec_check is None or not self.spec_check.active():
self.send_chat(
'Warning! Spectators are kicked after %s seconds!' %
(kick_time.get()))
time = ceil((kick_time.get() / 4) * 3)
self.spec_check = reactor.callLater(
time, self.METHOD_NAME, 1)
elif not team.spectator:
if self.spec_check is not None and self.spec_check.active():
self.spec_check.cancel()
self.spec_check = None
return connection.on_team_join(self, team)
def on_disconnect(self):
if self.spec_check is not None and self.spec_check.active():
self.spec_check.cancel()
self.spec_check = None
return connection.on_disconnect(self)
def METHOD_NAME(self, id):
if not self.team.spectator:
print(
'WARNING 1. Safety check kept an non-spectator from being spectator-kicked. Report this please!')
return
if self.admin or self.rights.specpower:
print(
'WARNING 2. Safety check kept an admin from being spectator-kicked.')
return
if id == 1:
seconds = floor(kick_time.get() / 4)
self.send_chat(
'Warning! If you do not leave spectator, you will be kicked in %s seconds!' %
(seconds))
self.spec_check = reactor.callLater(
seconds, self.METHOD_NAME, 2)
elif id == 2:
self.kick(
'You have been kicked for remaining in spectator for too long.')
return protocol, SpectatorControlConnection
|
1,240 |
guess exception source
|
# Copyright (c) 2015, Maxwell Morais and contributors
# License: MIT. See LICENSE
import functools
import inspect
import re
from collections import Counter
from contextlib import suppress
import frappe
EXCLUDE_EXCEPTIONS = (
frappe.AuthenticationError,
frappe.CSRFTokenError, # CSRF covers OAuth too
frappe.SecurityException,
frappe.InReadOnlyMode,
)
LDAP_BASE_EXCEPTION = "LDAPException"
def _is_ldap_exception(e):
"""Check if exception is from LDAP library.
This is a hack but ensures that LDAP is not imported unless it's required. This is tested in
unittests in case the exception changes in future.
"""
for t in type(e).__mro__:
if t.__name__ == LDAP_BASE_EXCEPTION:
return True
return False
def log_error(
title=None, message=None, reference_doctype=None, reference_name=None, *, defer_insert=False
):
"""Log error to Error Log"""
from frappe.monitor import get_trace_id
# Parameter ALERT:
# the title and message may be swapped
# the better API for this is log_error(title, message), and used in many cases this way
# this hack tries to be smart about whats a title (single line ;-)) and fixes it
traceback = None
if message:
if "\n" in title: # traceback sent as title
traceback, title = title, message
else:
traceback = message
title = title or "Error"
traceback = frappe.as_unicode(traceback or frappe.get_traceback(with_context=True))
if not frappe.db:
print(f"Failed to log error in db: {title}")
return
error_log = frappe.get_doc(
doctype="Error Log",
error=traceback,
method=title,
reference_doctype=reference_doctype,
reference_name=reference_name,
trace_id=get_trace_id(),
)
if frappe.flags.read_only or defer_insert:
error_log.deferred_insert()
else:
return error_log.insert(ignore_permissions=True)
def log_error_snapshot(exception: Exception):
if isinstance(exception, EXCLUDE_EXCEPTIONS) or _is_ldap_exception(exception):
return
logger = frappe.logger(with_more_info=True)
try:
log_error(title=str(exception), defer_insert=True)
logger.error("New Exception collected in error log")
except Exception as e:
logger.error(f"Could not take error snapshot: {e}", exc_info=True)
def get_default_args(func):
"""Get default arguments of a function from its signature."""
signature = inspect.signature(func)
return {
k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty
}
def raise_error_on_no_output(error_message, error_type=None, keep_quiet=None):
"""Decorate any function to throw error incase of missing output.
TODO: Remove keep_quiet flag after testing and fixing sendmail flow.
:param error_message: error message to raise
:param error_type: type of error to raise
:param keep_quiet: control error raising with external factor.
:type error_message: str
:type error_type: Exception Class
:type keep_quiet: function
>>> @raise_error_on_no_output("Ingradients missing")
... def get_indradients(_raise_error=1): return
...
>>> get_ingradients()
`Exception Name`: Ingradients missing
"""
def decorator_raise_error_on_no_output(func):
@functools.wraps(func)
def wrapper_raise_error_on_no_output(*args, **kwargs):
response = func(*args, **kwargs)
if callable(keep_quiet) and keep_quiet():
return response
default_kwargs = get_default_args(func)
default_raise_error = default_kwargs.get("_raise_error")
raise_error = kwargs.get("_raise_error") if "_raise_error" in kwargs else default_raise_error
if (not response) and raise_error:
frappe.throw(error_message, error_type or Exception)
return response
return wrapper_raise_error_on_no_output
return decorator_raise_error_on_no_output
def METHOD_NAME(exception: str) -> str | None:
"""Attempts to guess source of error based on traceback.
E.g.
- For unhandled exception last python file from apps folder is responsible.
- For frappe.throws the exception source is possibly present after skipping frappe.throw frames
- For server script the file name is `<serverscript>`
"""
with suppress(Exception):
installed_apps = frappe.get_installed_apps()
app_priority = {app: installed_apps.index(app) for app in installed_apps}
APP_NAME_REGEX = re.compile(r".*File.*apps/(?P<app_name>\w+)/\1/")
SERVER_SCRIPT_FRAME = re.compile(r".*<serverscript>")
apps = Counter()
for line in reversed(exception.splitlines()):
if SERVER_SCRIPT_FRAME.match(line):
return "Server Script"
if matches := APP_NAME_REGEX.match(line):
app_name = matches.group("app_name")
apps[app_name] += app_priority.get(app_name, 0)
if (probably_source := apps.most_common(1)) and probably_source[0][0] != "frappe":
return f"{probably_source[0][0]} (app)"
|
1,241 |
test get home dir
|
import unittest
import EXOSIMS.util.get_dirs as gd
import os
from unittest.mock import *
import numpy as np
import sys
class TestGetDirs(unittest.TestCase):
"""
Tests the get_dir tool.
Sonny Rappaport, Cornell, July 2021
"""
def METHOD_NAME(self):
"""
Tests that get_home_dir works in muiltiple OS environments.
Test method: Uses unittest's mock library to create fake OS environment
and paths to see if get_dirs returns the correct home directory. Because
get_dirs returns assertionerrors when the homedir isn't real, use the
assertion message itself to check that the homedir is correct.
This assumes that the os library does its job correctly as the mocking
library will overwrite whatever os has stored for testing purposes.
This method also assumes that winreg works as expected.
"""
# collect assertion errors and verify at the end that we only get the
# expected assertion errors.
# this tests the assertion error as well- it should be called for all
# of these cases as I use imaginary pathnames
assertErrors = []
# mock directories
directories = [
{"HOME": "posixhome"},
{},
{"HOME": "myshome", "MSYSTEM": "test"},
{"HOMESHARE": "sharehome"},
{"USERPROFILE": "userhome"},
{"HOME": "otherOShome"},
{},
]
# mock os names
os_name = ["posix", "posix", "nt", "nt", "nt", "door", "door"]
# names for home directory- 'none' shouldn't show up
home_names = [
"posixhome",
"none",
"myshome",
"sharehome",
"userhome",
"otherOShome",
"none",
]
# test all paths except for winreg
for i, dic in enumerate(directories):
with patch.dict(os.environ, dic, clear=True), patch.object(
os, "name", os_name[i]
):
# i==1 and i==6 correspond to where homedir isn't in environ
if i == 1 or i == 6:
with self.assertRaises(OSError):
gd.get_home_dir()
else:
try:
gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
# add all assertion errors so far to the expected list of assertion
# errors
exp_asrt = []
for s in home_names:
if s == "none":
continue
exp_asrt.append(
"Identified "
+ s
+ " as home directory, but it does"
+ " not exist or is not accessible/writeable"
)
# test winreg branch
# first, test that if winreg doesn't except, homedir is set
# (mock a key: make key functions do nothing.
# mock queryvalueex: return test homedir)
with patch.dict(os.environ, {}, clear=True), patch.object(
os, "name", "nt"
), patch.dict(sys.modules, {"winreg": MagicMock()}), patch(
"winreg.OpenKey"
), patch(
"winreg.QueryValueEx"
) as mockquery:
mockquery.return_value = ["winregHome"]
try:
gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
# second, test that home is tried if an exception is raised and attempt
# at homedir setting is made
with patch.dict(os.environ, {"HOME": "winreghome2"}, clear=True), patch.object(
os, "name", "nt"
), patch.dict(sys.modules, {"winreg": MagicMock()}), patch(
"winreg.OpenKey"
), patch(
"winreg.QueryValueEx"
) as mockquery:
mockquery.side_effect = Exception
try:
gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
with patch.dict(os.environ, {}, clear=True), patch.object(
os, "name", "nt"
), patch.dict(sys.modules, {"winreg": MagicMock()}), patch(
"winreg.OpenKey"
), patch(
"winreg.QueryValueEx"
) as mockquery:
mockquery.side_effect = Exception
with self.assertRaises(OSError):
gd.get_home_dir()
exp_asrt.append(
"Identified "
+ "winregHome"
+ " as home directory, but it does"
+ " not exist or is not accessible/writeable"
)
exp_asrt.append(
"Identified "
+ "winreghome2"
+ " as home directory, but it does"
+ " not exist or is not accessible/writeable"
)
np.testing.assert_array_equal(assertErrors, exp_asrt)
def test_get_paths(self):
"""
Tests that get_paths returns the proper (relative) paths.
Test method: Calls the method and tests to see if the path dictionary
matches expectations for various trivial inputs. For some cases, use the
python mock library to simplify testing
For the JSON, queue file, and and runqueue branches, just use a simple
dictionary (*although this should probably be changed to the respective
datatype. )
"""
# test no parameter output, testing branch #1.
# mock current working directory
dict_paths = gd.get_paths()
outputs = dict_paths.values()
outputs_rel = []
for x in outputs:
outputs_rel.append(os.path.relpath(x))
# test environment output, testing branch #2. mock environment dictionary
with patch.dict(
os.environ, {"EXOSIMS1": "exosims_path", "EXOSIMS2": "exosims_path2"}
):
# only keep the key/values i seek to test for each branch
test_dict = dict()
dict_paths = gd.get_paths()
for key in dict_paths:
if key == "EXOSIMS1" or key == "EXOSIMS2":
test_dict[key] = dict_paths[key]
self.assertDictEqual(
test_dict, {"EXOSIMS1": "exosims_path", "EXOSIMS2": "exosims_path2"}
)
# test JSON script output, branch #3. mock
paths = {
"EXOSIMS_SCRIPTS_PATH": "scriptspath",
"EXOSIMS_OBSERVING_BLOCK_CSV_PATH": "csvpath",
"EXOSIMS_FIT_FILES_FOLDER_PATH": "folderpath",
"EXOSIMS_PLOT_OUTPUT_PATH": "outputpath",
"EXOSIMS_RUN_SAVE_PATH": "savepath",
"EXOSIMS_RUN_LOG_PATH": "logpath",
"EXOSIMS_QUEUE_FILE_PATH": "filepath",
}
paths_test = {"paths": paths}
self.assertDictEqual(paths, gd.get_paths(specs=paths_test))
# test qFile script specified path, branch #4
self.assertDictEqual(paths, gd.get_paths(qFile=paths_test))
# test runQueue specified path, branch #5
self.assertDictEqual(paths, gd.get_paths(qFargs=paths))
|
1,242 |
test append
|
import pytest
pytestmark = [
pytest.mark.windows_whitelisted,
]
FIRST_IF_CONTENTS = """\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
"""
SECOND_IF_CONTENTS = """\
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
"""
@pytest.mark.parametrize("test", (True, False))
def METHOD_NAME(file, tmp_path, test):
"""
file.append
"""
name = tmp_path / "testfile"
name.write_text("#salty!")
ret = file.append(name=str(name), text="cheese", test=test)
if test is True:
assert ret.result is None
assert name.read_text() == "#salty!"
else:
assert ret.result is True
assert name.read_text() == "#salty!\ncheese\n"
def test_append_issue_1864_makedirs(file, tmp_path):
"""
file.append but create directories if needed as an option, and create
the file if it doesn't exist
"""
fname = "append_issue_1864_makedirs"
name = tmp_path / fname
# Non existing file get's touched
ret = file.append(name=str(name), text="cheese", makedirs=True)
assert ret.result is True
assert name.is_file()
# Nested directory and file get's touched
name = tmp_path / "issue_1864" / fname
ret = file.append(name=str(name), text="cheese", makedirs=True)
assert ret.result is True
assert name.is_file()
assert name.parent.is_dir()
# Parent directory exists but file does not and makedirs is False
name = name.with_name(name.name + "2")
ret = file.append(name=str(name), text="cheese", makedirs=False)
assert ret.result is True
assert name.is_file()
def test_issue_2227_file_append(file, tmp_path):
"""
Text to append includes a percent symbol
"""
# let's make use of existing state to create a file with contents to
# test against
tmp_file_append = tmp_path / "test.append"
tmp_file_append.write_text(FIRST_IF_CONTENTS + SECOND_IF_CONTENTS)
ret = file.append(name=str(tmp_file_append), text="HISTTIMEFORMAT='%F %T '")
assert ret.result is True
contents_pre = tmp_file_append.read_text()
# It should not append text again
ret = file.append(name=str(tmp_file_append), text="HISTTIMEFORMAT='%F %T '")
assert ret.result is True
contents_post = tmp_file_append.read_text()
assert contents_pre == contents_post
def test_issue_2379_file_append(modules, tmp_path):
# Get a path to the temporary file
tmp_file = tmp_path / "issue-2379-file-append.txt"
# Write some data to it
tmp_file.write_text(
"hello\nworld\n" # Some junk
"#PermitRootLogin yes\n" # Commented text
"# PermitRootLogin yes\n" # Commented text with space
)
# create the sls template
template_lines = [
"{}:".format(tmp_file),
" file.append:",
" - text: PermitRootLogin yes",
]
template = "\n".join(template_lines)
ret = modules.state.template_str(template)
for state_run in ret:
assert state_run.result is True
assert "Appended 1 lines" in state_run.comment
@pytest.mark.slow_test
def test_issue_1896_file_append_source(file, tmp_path, state_tree):
"""
Verify that we can append a file's contents
"""
testfile = tmp_path / "test.append"
testfile.touch()
firstif_file = pytest.helpers.temp_file(
"firstif", directory=state_tree / "testappend", contents=FIRST_IF_CONTENTS
)
secondif_file = pytest.helpers.temp_file(
"secondif", directory=state_tree / "testappend", contents=SECOND_IF_CONTENTS
)
with firstif_file, secondif_file:
ret = file.append(name=str(testfile), source="salt://testappend/firstif")
assert ret.result is True
ret = file.append(name=str(testfile), source="salt://testappend/secondif")
assert ret.result is True
testfile_contents = testfile.read_text()
assert testfile_contents == FIRST_IF_CONTENTS + SECOND_IF_CONTENTS
# Run it again
ret = file.append(name=str(testfile), source="salt://testappend/firstif")
assert ret.result is True
ret = file.append(name=str(testfile), source="salt://testappend/secondif")
assert ret.result is True
testfile_contents = testfile.read_text()
assert testfile_contents == FIRST_IF_CONTENTS + SECOND_IF_CONTENTS
def test_file_append_check_cmd(modules, state_tree, tmp_path):
"""
Test that check_cmd works for file.append
and those states do not run.
"""
sls_contents = f"""
append_in_file:
file.append:
- name: /tmp/test
- text: "appended text"
- check_cmd:
- "djasjahj"
"""
with pytest.helpers.temp_file(
"file-append-check-cmd.sls", sls_contents, state_tree
):
ret = modules.state.sls("file-append-check-cmd")
for state_run in ret:
assert state_run.result is False
assert state_run.comment == "check_cmd determined the state failed"
|
1,243 |
apply overwrites to inputs
|
"""Utilities mainly used in helping `modes` like replay and others."""
import json
from ruamel.yaml import YAML
from ruamel.yaml.composer import ComposerError
import os
import logging
from tackle.exceptions import (
ContextDecodingException,
UnsupportedBaseFileTypeException,
TackleImportError,
)
from tackle.utils.paths import make_sure_path_exists
logger = logging.getLogger(__name__)
def get_file_name(replay_dir, template_name, suffix='yaml'):
"""Get the name of file."""
suffix = '.' + suffix if not template_name.endswith('.' + suffix) else ''
file_name = '{}{}'.format(template_name, suffix)
return os.path.join(replay_dir, file_name)
def dump(output_dir, output_name, output_dict, dump_output='yaml'):
"""Write json data to file."""
if not make_sure_path_exists(output_dir):
raise IOError('Unable to create replay dir at {}'.format(output_dir))
replay_file = get_file_name(output_dir, output_name, dump_output)
if dump_output == 'json':
with open(replay_file, 'w') as f:
json.dump(output_dict, f, indent=2)
if dump_output in ['yaml', 'yml']:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
with open(replay_file, 'w') as f:
yaml.dump(output_dict, f)
def read_config_file(file, file_extension=None):
"""Read files into objects."""
if not file_extension:
file_extension = file.split('.')[-1]
if not os.path.exists(file):
raise FileNotFoundError(f"Can't find the file {file}.")
logger.debug(
'Using \"{}\" as input file and \"{}\" as file extension'.format(
file, file_extension
)
)
try:
if file_extension == 'json':
with open(file) as f:
config = json.load(f)
return config
elif file_extension in ('yaml', 'yml'):
# Try normal and then for documents that will output as list
yaml = YAML()
try:
with open(file, encoding='utf-8') as f:
config = yaml.load(f)
return config
except ComposerError:
output = []
with open(file, encoding='utf-8') as f:
for doc in yaml.load_all(f.read()):
output.append(doc)
return output
elif file_extension == 'toml':
try:
import toml
except ImportError:
raise TackleImportError(
f"Error parsing {file} No toml package installed. Install it with "
"`pip install toml` and try again."
) from None
with open(file) as f:
data = toml.load(f)
return data
else:
raise UnsupportedBaseFileTypeException(
'Unable to parse file {}. Error: Unsupported extension (json/yaml only)'
''.format(file)
) # noqa
except ValueError as e:
# JSON decoding error. Let's throw a new exception that is more
# friendly for the developer or user.
message = (
f'JSON decoding error while loading "{file}". Decoding'
f' error details: "{str(e)}"'
)
raise ContextDecodingException(message) from None
def METHOD_NAME(input, overwrite_dict):
"""Modify the given context in place based on the overwrite_context."""
for variable, overwrite in overwrite_dict.items():
if variable not in input:
# Do not include variables which are not used in the template
continue
context_value = input[variable]
if isinstance(context_value, list):
# We are dealing with a choice variable
if overwrite in context_value:
# This overwrite is actually valid for the given context
# Let's set it as default (by definition first item in list)
context_value.remove(overwrite)
context_value.insert(0, overwrite)
else:
# Simply overwrite the value for this variable
input[variable] = overwrite
|
1,244 |
close
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.servicedirectory_v1beta1 import gapic_version as package_version
from google.cloud.servicedirectory_v1beta1.types import lookup_service
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
class LookupServiceTransport(abc.ABC):
"""Abstract transport class for LookupService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "servicedirectory.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.resolve_service: gapic_v1.method.wrap_method(
self.resolve_service,
default_timeout=None,
client_info=client_info,
),
}
def METHOD_NAME(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def resolve_service(
self,
) -> Callable[
[lookup_service.ResolveServiceRequest],
Union[
lookup_service.ResolveServiceResponse,
Awaitable[lookup_service.ResolveServiceResponse],
],
]:
raise NotImplementedError()
@property
def get_location(
self,
) -> Callable[
[locations_pb2.GetLocationRequest],
Union[locations_pb2.Location, Awaitable[locations_pb2.Location]],
]:
raise NotImplementedError()
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest],
Union[
locations_pb2.ListLocationsResponse,
Awaitable[locations_pb2.ListLocationsResponse],
],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("LookupServiceTransport",)
|
1,245 |
test failure invalid days string
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.NetworkWatcherFlowLogPeriod import check
from checkov.common.models.enums import CheckResult
class TestNetworkWatcherFlowLogPeriod(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 7
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_no_retention_policy(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def METHOD_NAME(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = var.watcher_flow_logs.days
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 90
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_0_days(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = 0
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_valid_day_string(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_watcher_flow_log" "test" {
network_watcher_name = azurerm_network_watcher.test.name
resource_group_name = azurerm_resource_group.test.name
network_security_group_id = azurerm_network_security_group.test.id
storage_account_id = azurerm_storage_account.test.id
enabled = true
retention_policy {
enabled = true
days = "100"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_watcher_flow_log']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
1,246 |
test get default
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from beeswax.server.dbms import get_query_server_config
from desktop.lib.exceptions_renderable import PopupException
from desktop.settings import CACHES_HIVE_DISCOVERY_KEY
from django.core.cache import caches
from nose.tools import assert_equal, assert_raises
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
LOG = logging.getLogger()
cache = caches[CACHES_HIVE_DISCOVERY_KEY]
class TestGetQueryServerConfig():
def setUp(self):
cache.clear()
def METHOD_NAME(self):
with patch('beeswax.conf.HIVE_SERVER_HOST.get') as HIVE_SERVER_HOST:
with patch('beeswax.conf.HIVE_SERVER_PORT.get') as HIVE_SERVER_PORT:
HIVE_SERVER_HOST.return_value = 'hive.gethue.com'
HIVE_SERVER_PORT.return_value = 10002
query_server = get_query_server_config()
assert_equal(query_server['server_name'], 'beeswax')
assert_equal(query_server['server_host'], 'hive.gethue.com')
assert_equal(query_server['server_port'], 10002)
def test_get_impala(self):
with patch('impala.conf.SERVER_HOST.get') as SERVER_HOST:
with patch('impala.conf.SERVER_PORT.get') as SERVER_PORT:
SERVER_HOST.return_value = 'impala.gethue.com'
SERVER_PORT.return_value = 10002
query_server = get_query_server_config(name='impala')
assert_equal(query_server['server_name'], 'impala')
assert_equal(query_server['server_host'], 'impala.gethue.com')
assert_equal(query_server['server_port'], 10002)
def test_get_llap(self):
with patch('beeswax.conf.LLAP_SERVER_HOST.get') as LLAP_SERVER_HOST:
with patch('beeswax.conf.LLAP_SERVER_PORT.get') as LLAP_SERVER_PORT:
LLAP_SERVER_HOST.return_value = 'hive-llap.gethue.com'
LLAP_SERVER_PORT.return_value = 10002
query_server = get_query_server_config(name='llap')
assert_equal(query_server['server_name'], 'beeswax')
assert_equal(query_server['server_host'], 'hive-llap.gethue.com')
assert_equal(query_server['server_port'], 10002)
def test_get_llap_discovery(self):
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP.get') as HIVE_DISCOVERY_LLAP:
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP_HA.get') as HIVE_DISCOVERY_LLAP_HA:
with patch('beeswax.server.dbms.KazooClient') as KazooClient:
with patch(
'beeswax.conf.LLAP_SERVER_PORT.get') as LLAP_SERVER_PORT: # Workaround, to remove when assert
# server_port ok
HIVE_DISCOVERY_LLAP.return_value = True
HIVE_DISCOVERY_LLAP_HA.return_value = False
LLAP_SERVER_PORT.return_value = 25000
KazooClient.return_value = Mock(
exists=Mock(return_value=True),
# Bug "TypeError: expected string or buffer" if False, to add a new test case and fix
get_children=Mock(return_value=['llap1=hive-llap-1.gethue.com:20000;llap2=hive-llap-2.gethue.com:20000'])
)
query_server = get_query_server_config(name='llap')
assert_equal(query_server['server_name'], 'beeswax')
assert_equal(query_server['server_host'], 'hive-llap-1.gethue.com')
# assert_equal(query_server['server_port'], 20000) # Bug Always set to LLAP_SERVER_PORT?
assert_equal(query_server['server_port'], 25000) # To remove this line and comment above when fixed.
def test_get_llap_ha_discovery_all_server_down(self):
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP.get') as HIVE_DISCOVERY_LLAP:
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP_HA.get') as HIVE_DISCOVERY_LLAP_HA:
with patch('beeswax.server.dbms.KazooClient') as KazooClient:
HIVE_DISCOVERY_LLAP.return_value = True
HIVE_DISCOVERY_LLAP_HA.return_value = True
KazooClient.return_value = Mock(
exists=Mock(return_value=True),
# Bug "TypeError: expected string or buffer" if False, to add a new test case and fix
get_children=Mock(return_value=[])
)
assert_raises(PopupException, get_query_server_config, name='llap')
try:
query_server = get_query_server_config(name='llap')
except PopupException as e:
assert_equal(e.message, 'There is no running Hive LLAP server available')
def test_get_hive_ha_discovery_all_server_down(self):
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP.get') as HIVE_DISCOVERY_LLAP:
with patch('beeswax.conf.HIVE_DISCOVERY_LLAP_HA.get') as HIVE_DISCOVERY_LLAP_HA:
with patch('beeswax.conf.HIVE_DISCOVERY_HS2.get') as HIVE_DISCOVERY_HS2:
with patch('beeswax.conf.HIVE_DISCOVERY_HIVESERVER2_ZNODE.get') as HIVE_DISCOVERY_HIVESERVER2_ZNODE:
with patch('beeswax.server.dbms.KazooClient') as KazooClient:
HIVE_DISCOVERY_LLAP.return_value = False
HIVE_DISCOVERY_LLAP_HA.return_value = False
HIVE_DISCOVERY_HS2.return_value = True
HIVE_DISCOVERY_HIVESERVER2_ZNODE.return_value = True
KazooClient.return_value = Mock(
exists=Mock(return_value=True),
# Bug "TypeError: expected string or buffer" if False, to add a new test case and fix
get_children=Mock(return_value=[])
)
assert_raises(PopupException, get_query_server_config, name='hive')
try:
query_server = get_query_server_config(name='hive')
except PopupException as e:
assert_equal(e.message, 'There are no running Hive server available')
def test_get_hs2_discovery(self):
with patch('beeswax.conf.HIVE_DISCOVERY_HS2.get') as HIVE_DISCOVERY_HS2:
with patch('beeswax.conf.HIVE_DISCOVERY_HIVESERVER2_ZNODE.get') as HIVE_DISCOVERY_HIVESERVER2_ZNODE:
with patch('beeswax.server.dbms.KazooClient') as KazooClient:
HIVE_DISCOVERY_HS2.return_value = True
HIVE_DISCOVERY_HIVESERVER2_ZNODE.return_value = True
KazooClient.return_value = Mock(
exists=Mock(return_value=True),
# Bug "TypeError: expected string or buffer" if False, to add a new test case and fix
get_children=Mock(return_value=[
'serverUri=hive-llap-1.gethue.com:10000;serverUri=hive-llap-2.gethue.com:10000'])
)
try:
query_server = get_query_server_config(name='hive')
except PopupException as e:
assert_equal(e.message, 'There are no running Hive server available')
assert_equal(query_server['server_name'], 'beeswax')
assert_equal(query_server['server_host'], 'hive-llap-1.gethue.com')
assert_equal(query_server['server_port'], 10000)
# TODO: all the combinations in new test methods, e.g.:
# HIVE_DISCOVERY_LLAP_HA.get() --> True
# ...
|
1,247 |
fix path
|
# Copyright 2020-2022 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
import typing
from typing import Optional
from range_typed_integers import u8, u16
from skytemple_files.data.item_p.handler import ItemPHandler
from skytemple_files.data.item_p.protocol import ItemPProtocol, ItemPEntryProtocol
from skytemple_files_test.data.item_p.fixture import (
EXPECTED_ITEM_P_ENTRIES,
eq_item_p_protocol,
)
from skytemple_files_test.case import SkyTempleFilesTestCase, romtest, fixpath
class ItemPTestCase(
SkyTempleFilesTestCase[ItemPHandler, ItemPProtocol[ItemPEntryProtocol]]
):
handler = ItemPHandler
def setUp(self) -> None:
self.fixture = self._load_main_fixture(self.METHOD_NAME())
def test_entries_read(self) -> None:
self.assertEqual(len(self.fixture.item_list), len(EXPECTED_ITEM_P_ENTRIES))
for entry_fixture, entry_expected in zip(
self.fixture.item_list, EXPECTED_ITEM_P_ENTRIES
):
self.assertItemPEntriesEqual(entry_expected, entry_fixture)
def test_entries_write(self) -> None:
item_p_after = self._save_and_reload_main_fixture(self.fixture)
self.assertEqual(len(self.fixture.item_list), len(item_p_after.item_list))
for entry_before, entry_after in zip(
self.fixture.item_list, item_p_after.item_list
):
self.assertItemPEntriesEqual(entry_before, entry_after)
def test_entries_attrs(self):
e = self.fixture.item_list[0]
e.buy_price = u16(0xFFF)
e.sell_price = u16(0xFFF)
e.category = u8(123)
e.sprite = u8(123)
e.item_id = u16(0xFFF)
e.move_id = u16(0xFFF)
e.range_min = u8(123)
e.range_max = u8(123)
e.palette = u8(123)
e.action_name = u8(123)
e.is_valid = True
e.is_in_td = False
e.ai_flag_1 = True
e.ai_flag_2 = False
e.ai_flag_3 = True
self.assertEqual(e.buy_price, u16(0xFFF))
self.assertEqual(e.sell_price, u16(0xFFF))
self.assertEqual(e.category, u8(123))
self.assertEqual(e.sprite, u8(123))
self.assertEqual(e.item_id, u16(0xFFF))
self.assertEqual(e.move_id, u16(0xFFF))
self.assertEqual(e.range_min, u8(123))
self.assertEqual(e.range_max, u8(123))
self.assertEqual(e.palette, u8(123))
self.assertEqual(e.action_name, u8(123))
self.assertEqual(e.is_valid, True)
self.assertEqual(e.is_in_td, False)
self.assertEqual(e.ai_flag_1, True)
self.assertEqual(e.ai_flag_2, False)
self.assertEqual(e.ai_flag_3, True)
def test_write_bin(self) -> None:
item_p_after = self._save_and_reload_main_fixture(self.fixture)
with open(self.METHOD_NAME(), "rb") as f:
self.assertEqual(f.read(), self.handler.serialize(item_p_after))
def test_entries_eq(self) -> None:
for entry_fixture, entry_fixture_plus_1 in zip(
self.fixture.item_list, self.fixture.item_list[1:]
):
self.assertEqual(entry_fixture, entry_fixture)
self.assertNotEqual(entry_fixture, entry_fixture_plus_1)
@romtest(file_names=["item_p.bin"], path="BALANCE/")
def test_using_rom(self, _, file):
item_p_before = self.handler.deserialize(file)
item_p_after = self._save_and_reload_main_fixture(item_p_before)
self.assertEqual(len(item_p_before.item_list), len(item_p_after.item_list))
for entry_before, entry_after in zip(
item_p_before.item_list, item_p_after.item_list
):
self.assertItemPEntriesEqual(entry_before, entry_after)
def assertItemPEntriesEqual(
self,
entry_before: ItemPEntryProtocol,
entry_after: ItemPEntryProtocol,
msg: Optional[str] = None,
):
if msg is None:
msg = ""
else:
msg += "\n"
self.assertTrue(
eq_item_p_protocol(entry_before, entry_after),
f"{msg}Entries must be equal.\n1st: \n{entry_before}\n2nd:{entry_after}",
)
@typing.no_type_check
@classmethod
@fixpath
def METHOD_NAME(cls):
return "fixtures", "fixture.bin"
|
1,248 |
test
|
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
from typing import Any, Generator, Tuple, cast
from unittest.mock import AsyncMock, Mock, call
from twisted.internet import defer, reactor as _reactor
from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
from synapse.types import ISynapseReactor, JsonDict
from synapse.util import Clock
from tests import unittest
from tests.utils import MockClock
reactor = cast(ISynapseReactor, _reactor)
class HttpTransactionCacheTestCase(unittest.TestCase):
def setUp(self) -> None:
self.clock = MockClock()
self.hs = Mock()
self.hs.get_clock = Mock(return_value=self.clock)
self.hs.get_auth = Mock()
self.cache = HttpTransactionCache(self.hs)
self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"})
# Here we make sure that we're setting all the fields that HttpTransactionCache
# uses to build the transaction key.
self.mock_request = Mock()
self.mock_request.path = b"/foo/bar"
self.mock_requester = Mock()
self.mock_requester.app_service = None
self.mock_requester.is_guest = False
self.mock_requester.access_token_id = 1234
@defer.inlineCallbacks
def test_executes_given_function(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
cb = AsyncMock(return_value=self.mock_http_response)
res = yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg"
)
cb.assert_called_once_with("some_arg", keyword="arg")
self.assertEqual(res, self.mock_http_response)
@defer.inlineCallbacks
def test_deduplicates_based_on_key(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
cb = AsyncMock(return_value=self.mock_http_response)
for i in range(3): # invoke multiple times
res = yield self.cache.fetch_or_execute_request(
self.mock_request,
self.mock_requester,
cb,
"some_arg",
keyword="arg",
changing_args=i,
)
self.assertEqual(res, self.mock_http_response)
# expect only a single call to do the work
cb.assert_called_once_with("some_arg", keyword="arg", changing_args=0)
@defer.inlineCallbacks
def test_logcontexts_with_async_result(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
@defer.inlineCallbacks
def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]:
yield Clock(reactor).sleep(0)
return 1, {}
@defer.inlineCallbacks
def METHOD_NAME() -> Generator["defer.Deferred[Any]", object, None]:
with LoggingContext("c") as c1:
res = yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb
)
self.assertIs(current_context(), c1)
self.assertEqual(res, (1, {}))
# run the test twice in parallel
d = defer.gatherResults([METHOD_NAME(), METHOD_NAME()])
self.assertIs(current_context(), SENTINEL_CONTEXT)
yield d
self.assertIs(current_context(), SENTINEL_CONTEXT)
@defer.inlineCallbacks
def test_does_not_cache_exceptions(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
"""Checks that, if the callback throws an exception, it is called again
for the next request.
"""
called = [False]
def cb() -> "defer.Deferred[Tuple[int, JsonDict]]":
if called[0]:
# return a valid result the second time
return defer.succeed(self.mock_http_response)
called[0] = True
raise Exception("boo")
with LoggingContext("test") as test_context:
try:
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb
)
except Exception as e:
self.assertEqual(e.args[0], "boo")
self.assertIs(current_context(), test_context)
res = yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb
)
self.assertEqual(res, self.mock_http_response)
self.assertIs(current_context(), test_context)
@defer.inlineCallbacks
def test_does_not_cache_failures(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
"""Checks that, if the callback returns a failure, it is called again
for the next request.
"""
called = [False]
def cb() -> "defer.Deferred[Tuple[int, JsonDict]]":
if called[0]:
# return a valid result the second time
return defer.succeed(self.mock_http_response)
called[0] = True
return defer.fail(Exception("boo"))
with LoggingContext("test") as test_context:
try:
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb
)
except Exception as e:
self.assertEqual(e.args[0], "boo")
self.assertIs(current_context(), test_context)
res = yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb
)
self.assertEqual(res, self.mock_http_response)
self.assertIs(current_context(), test_context)
@defer.inlineCallbacks
def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]:
cb = AsyncMock(return_value=self.mock_http_response)
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "an arg"
)
# should NOT have cleaned up yet
self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2)
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "an arg"
)
# still using cache
cb.assert_called_once_with("an arg")
self.clock.advance_time_msec(CLEANUP_PERIOD_MS)
yield self.cache.fetch_or_execute_request(
self.mock_request, self.mock_requester, cb, "an arg"
)
# no longer using cache
self.assertEqual(cb.call_count, 2)
self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")])
|
1,249 |
delete marker version id
|
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request/response of DeleteObjects API."""
from __future__ import absolute_import
from .xml import Element, SubElement, findall, findtext
class DeleteObject:
"""Delete object request information."""
def __init__(self, name, version_id=None):
self._name = name
self._version_id = version_id
def toxml(self, element):
"""Convert to XML."""
element = SubElement(element, "Object")
SubElement(element, "Key", self._name)
if self._version_id is not None:
SubElement(element, "VersionId", self._version_id)
return element
class DeleteRequest:
"""Delete object request."""
def __init__(self, object_list, quiet=False):
self._object_list = object_list
self._quiet = quiet
def toxml(self, element):
"""Convert to XML."""
element = Element("Delete")
if self._quiet:
SubElement(element, "Quiet", "true")
for obj in self._object_list:
obj.toxml(element)
return element
class DeletedObject:
"""Deleted object information."""
def __init__(self, name, version_id, delete_marker,
METHOD_NAME):
self._name = name
self._version_id = version_id
self._delete_marker = delete_marker
self._delete_marker_version_id = METHOD_NAME
@property
def name(self):
"""Get name."""
return self._name
@property
def version_id(self):
"""Get version ID."""
return self._version_id
@property
def delete_marker(self):
"""Get delete marker."""
return self._delete_marker
@property
def METHOD_NAME(self):
"""Get delete marker version ID."""
return self._delete_marker_version_id
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
name = findtext(element, "Key", True)
version_id = findtext(element, "VersionId")
delete_marker = findtext(element, "DeleteMarker")
delete_marker = (
delete_marker is not None and delete_marker.title() == "True"
)
METHOD_NAME = findtext(element, "DeleteMarkerVersionId")
return cls(name, version_id, delete_marker, METHOD_NAME)
class DeleteError:
"""Delete error information."""
def __init__(self, code, message, name, version_id):
self._code = code
self._message = message
self._name = name
self._version_id = version_id
@property
def code(self):
"""Get error code."""
return self._code
@property
def message(self):
"""Get error message."""
return self._message
@property
def name(self):
"""Get name."""
return self._name
@property
def version_id(self):
"""Get version ID."""
return self._version_id
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
code = findtext(element, "Code", True)
message = findtext(element, "Message")
name = findtext(element, "Key")
version_id = findtext(element, "VersionId")
return cls(code, message, name, version_id)
class DeleteResult:
"""Delete object result."""
def __init__(self, object_list, error_list):
self._object_list = object_list
self._error_list = error_list
@property
def object_list(self):
"""Get object list."""
return self._object_list
@property
def error_list(self):
"""Get error list."""
return self._error_list
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
elements = findall(element, "Deleted")
object_list = []
for tag in elements:
object_list.append(DeletedObject.fromxml(tag))
elements = findall(element, "Error")
error_list = []
for tag in elements:
error_list.append(DeleteError.fromxml(tag))
return cls(object_list, error_list)
|
1,250 |
get pending payment transactions ids
|
import datetime
import logging
from decimal import Decimal
import sqlalchemy as sa
from aiohttp import web
from aiopg.sa.result import ResultProxy
from models_library.api_schemas_webserver.wallets import PaymentID
from models_library.basic_types import IDStr
from models_library.emails import LowerCaseEmailStr
from models_library.products import ProductName
from models_library.users import UserID
from models_library.wallets import WalletID
from pydantic import BaseModel, PositiveInt, parse_obj_as
from simcore_postgres_database.models.payments_transactions import payments_transactions
from sqlalchemy import literal_column
from sqlalchemy.sql import func
from ..db.plugin import get_database_engine
_logger = logging.getLogger(__name__)
#
# NOTE: this will be moved to the payments service
# NOTE: with https://sqlmodel.tiangolo.com/ we would only define this once!
class PaymentsTransactionsDB(BaseModel):
payment_id: IDStr
price_dollars: Decimal # accepts negatives
osparc_credits: Decimal # accepts negatives
product_name: ProductName
user_id: UserID
user_email: LowerCaseEmailStr
wallet_id: WalletID
comment: str | None
initiated_at: datetime.datetime
completed_at: datetime.datetime | None
success: bool | None
errors: str | None
async def create_payment_transaction( # noqa: PLR0913
app: web.Application,
*,
payment_id: str,
price_dollars: Decimal,
osparc_credits: Decimal,
product_name: str,
user_id: UserID,
user_email: str,
wallet_id: WalletID,
comment: str | None,
initiated_at: datetime.datetime,
) -> PaymentsTransactionsDB:
async with get_database_engine(app).acquire() as conn:
result = await conn.execute(
payments_transactions.insert()
.values(
payment_id=payment_id,
price_dollars=price_dollars,
osparc_credits=osparc_credits,
product_name=product_name,
user_id=user_id,
user_email=user_email,
wallet_id=wallet_id,
comment=comment,
initiated_at=initiated_at,
)
.returning(literal_column("*"))
)
row = await result.first()
assert row # nosec
return PaymentsTransactionsDB.parse_obj(dict(row.items()))
async def list_user_payment_transactions(
app,
*,
user_id: UserID,
offset: PositiveInt,
limit: PositiveInt,
) -> tuple[int, list[PaymentsTransactionsDB]]:
"""List payments done by a give user
Sorted by newest-first
"""
async with get_database_engine(app).acquire() as conn:
total_number_of_items = await conn.scalar(
sa.select(sa.func.count())
.select_from(payments_transactions)
.where(payments_transactions.c.user_id == user_id)
)
assert total_number_of_items is not None # nosec
# NOTE: what if between these two calls there are new rows? can we get this in an atomic call?
if offset > total_number_of_items:
msg = f"{offset=} exceeds {total_number_of_items=}"
raise ValueError(msg)
result: ResultProxy = await conn.execute(
payments_transactions.select()
.where(payments_transactions.c.user_id == user_id)
.order_by(payments_transactions.c.created.desc()) # newest first
.offset(offset)
.limit(limit)
)
rows = await result.fetchall() or []
page = parse_obj_as(list[PaymentsTransactionsDB], rows)
return total_number_of_items, page
async def METHOD_NAME(app: web.Application) -> list[PaymentID]:
async with get_database_engine(app).acquire() as conn:
result = await conn.execute(
sa.select(payments_transactions.c.payment_id)
.where(payments_transactions.c.completed_at == None) # noqa: E711
.order_by(payments_transactions.c.initiated_at.asc()) # oldest first
)
rows = await result.fetchall() or []
return [parse_obj_as(PaymentID, row.payment_id) for row in rows]
async def complete_payment_transaction(
app: web.Application, *, payment_id: PaymentID, success: bool, error_msg: str | None
) -> PaymentsTransactionsDB:
optional = {}
if error_msg:
optional["errors"] = error_msg
async with get_database_engine(app).acquire() as conn:
result = await conn.execute(
payments_transactions.update()
.values(completed_at=func.now(), success=success, **optional)
.where(payments_transactions.c.payment_id == payment_id)
.returning(literal_column("*"))
)
row = await result.first()
assert row # nosec
return PaymentsTransactionsDB.parse_obj(dict(row.items()))
|
1,251 |
entry status
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2019, Red Hat inc,
# Copyright (C) 2018, William Brown <[email protected]>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
import json
import ldap
from lib389.idm.role import (
Role,
Roles,
ManagedRoles,
FilteredRoles,
NestedRoles,
MUST_ATTRIBUTES,
MUST_ATTRIBUTES_NESTED,
RDN,
)
from lib389.cli_base import (
populate_attr_arguments,
_get_arg,
_get_attributes,
_generic_get,
_generic_get_dn,
_generic_list,
_generic_delete,
_generic_modify_dn,
_generic_create,
_get_dn_arg,
_warn,
)
from lib389.cli_idm import _generic_rename_dn
MANY = Roles
SINGULAR = Role
def list(inst, basedn, log, args):
_generic_list(inst, basedn, log.getChild('_generic_list'), MANY, args)
def get(inst, basedn, log, args):
rdn = _get_arg( args.selector, msg="Enter %s to retrieve" % RDN)
_generic_get(inst, basedn, log.getChild('_generic_get'), MANY, rdn, args)
def get_dn(inst, basedn, log, args):
dn = _get_dn_arg(args.dn, msg="Enter dn to retrieve")
_generic_get_dn(inst, basedn, log.getChild('_generic_get_dn'), MANY, dn, args)
def create_managed(inst, basedn, log, args):
kwargs = _get_attributes(args, MUST_ATTRIBUTES)
_generic_create(inst, basedn, log.getChild('_generic_create'), ManagedRoles, kwargs, args)
def create_filtered(inst, basedn, log, args):
kwargs = _get_attributes(args, MUST_ATTRIBUTES)
_generic_create(inst, basedn, log.getChild('_generic_create'), FilteredRoles, kwargs, args)
def create_nested(inst, basedn, log, args):
kwargs = _get_attributes(args, MUST_ATTRIBUTES_NESTED)
_generic_create(inst, basedn, log.getChild('_generic_create'), NestedRoles, kwargs, args)
def delete(inst, basedn, log, args, warn=True):
dn = _get_dn_arg(args.dn, msg="Enter dn to delete")
if warn:
_warn(dn, msg="Deleting %s %s" % (SINGULAR.__name__, dn))
_generic_delete(inst, basedn, log.getChild('_generic_delete'), SINGULAR, dn, args)
def modify(inst, basedn, log, args, warn=True):
dn = _get_dn_arg(args.dn, msg="Enter dn to modify")
_generic_modify_dn(inst, basedn, log.getChild('_generic_modify_dn'), MANY, dn, args)
def rename(inst, basedn, log, args, warn=True):
dn = _get_dn_arg(args.dn, msg="Enter dn to modify")
_generic_rename_dn(inst, basedn, log.getChild('_generic_rename_dn'), MANY, dn, args)
def METHOD_NAME(inst, basedn, log, args):
dn = _get_dn_arg(args.dn, msg="Enter dn to check")
roles = Roles(inst, basedn)
try:
role = roles.get(dn=dn)
except ldap.NO_SUCH_OBJECT:
raise ValueError("Role \"{}\" is not found or the entry is not a role.".format(dn))
status = role.status()
info_dict = {}
if args.json:
info_dict["dn"] = dn
info_dict["state"] = f'{status["state"].describe(status["role_dn"])}'
log.info(json.dumps({"type": "status", "info": info_dict}, indent=4))
else:
log.info(f'Entry DN: {dn}')
log.info(f'Entry State: {status["state"].describe(status["role_dn"])}\n')
def subtree_status(inst, basedn, log, args):
basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check")
filter = ""
scope = ldap.SCOPE_SUBTREE
role_list = Roles(inst, basedn).filter(filter, scope)
if not role_list:
raise ValueError(f"No entries were found under {basedn} or the user doesn't have an access")
for entry in role_list:
status = entry.status()
log.info(f'Entry DN: {entry.dn}')
log.info(f'Entry State: {status["state"].describe(status["role_dn"])}\n')
def lock(inst, basedn, log, args):
dn = _get_dn_arg(args.dn, msg="Enter dn to check")
role = Role(inst, dn=dn)
role.lock()
log.info(f'Entry {dn} is locked')
def unlock(inst, basedn, log, args):
dn = _get_dn_arg(args.dn, msg="Enter dn to check")
role = Role(inst, dn=dn)
role.unlock()
log.info(f'Entry {dn} is unlocked')
def create_parser(subparsers):
role_parser = subparsers.add_parser('role', help='''Manage roles.''')
subcommands = role_parser.add_subparsers(help='action')
list_parser = subcommands.add_parser('list', help='list roles that could login to the directory')
list_parser.set_defaults(func=list)
get_parser = subcommands.add_parser('get', help='get')
get_parser.set_defaults(func=get)
get_parser.add_argument('selector', nargs='?', help='The term to search for')
get_dn_parser = subcommands.add_parser('get-by-dn', help='get-by-dn <dn>')
get_dn_parser.set_defaults(func=get_dn)
get_dn_parser.add_argument('dn', nargs='?', help='The dn to get and display')
create_managed_parser = subcommands.add_parser('create-managed', help='create')
create_managed_parser.set_defaults(func=create_managed)
populate_attr_arguments(create_managed_parser, MUST_ATTRIBUTES)
create_filtered_parser = subcommands.add_parser('create-filtered', help='create')
create_filtered_parser.set_defaults(func=create_filtered)
populate_attr_arguments(create_filtered_parser, MUST_ATTRIBUTES)
create_nested_parser = subcommands.add_parser('create-nested', help='create')
create_nested_parser.set_defaults(func=create_nested)
populate_attr_arguments(create_nested_parser, MUST_ATTRIBUTES_NESTED)
modify_dn_parser = subcommands.add_parser('modify-by-dn', help='modify-by-dn <dn> <add|delete|replace>:<attribute>:<value> ...')
modify_dn_parser.set_defaults(func=modify)
modify_dn_parser.add_argument('dn', nargs=1, help='The dn to modify')
modify_dn_parser.add_argument('changes', nargs='+', help="A list of changes to apply in format: <add|delete|replace>:<attribute>:<value>")
rename_dn_parser = subcommands.add_parser('rename-by-dn', help='rename the object')
rename_dn_parser.set_defaults(func=rename)
rename_dn_parser.add_argument('dn', help='The dn to rename')
rename_dn_parser.add_argument('new_dn', help='A new account dn')
rename_dn_parser.add_argument('--keep-old-rdn', action='store_true', help="Specify whether the old RDN (i.e. 'cn: old_account') should be kept as an attribute of the entry or not")
delete_parser = subcommands.add_parser('delete', help='deletes the role')
delete_parser.set_defaults(func=delete)
delete_parser.add_argument('dn', nargs='?', help='The dn of the role to delete')
lock_parser = subcommands.add_parser('lock', help='lock')
lock_parser.set_defaults(func=lock)
lock_parser.add_argument('dn', nargs='?', help='The dn to lock')
unlock_parser = subcommands.add_parser('unlock', help='unlock')
unlock_parser.set_defaults(func=unlock)
unlock_parser.add_argument('dn', nargs='?', help='The dn to unlock')
status_parser = subcommands.add_parser('entry-status', help='status of a single entry')
status_parser.set_defaults(func=METHOD_NAME)
status_parser.add_argument('dn', nargs='?', help='The single entry dn to check')
status_parser = subcommands.add_parser('subtree-status', help='status of a subtree')
status_parser.set_defaults(func=subtree_status)
status_parser.add_argument('basedn', help="Search base for finding entries")
status_parser.add_argument('-f', '--filter', help="Search filter for finding entries")
status_parser.add_argument('-s', '--scope', choices=['base', 'one', 'sub'], help="Search scope (base, one, sub - default is sub")
|
1,252 |
supports auto limit
|
import logging
from redash.query_runner import (
BaseSQLQueryRunner,
JobTimeoutException,
register,
)
from redash.query_runner.mssql import types_map
from redash.utils import json_dumps, json_loads
logger = logging.getLogger(__name__)
try:
import pyodbc
enabled = True
except ImportError:
enabled = False
class SQLServerODBC(BaseSQLQueryRunner):
should_annotate_query = False
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"server": {"type": "string"},
"port": {"type": "number", "default": 1433},
"user": {"type": "string"},
"password": {"type": "string"},
"db": {"type": "string", "title": "Database Name"},
"charset": {
"type": "string",
"default": "UTF-8",
"title": "Character Set",
},
"use_ssl": {
"type": "boolean",
"title": "Use SSL",
"default": False,
},
"verify_ssl": {
"type": "boolean",
"title": "Verify SSL certificate",
"default": True,
},
},
"order": [
"server",
"port",
"user",
"password",
"db",
"charset",
"use_ssl",
"verify_ssl",
],
"required": ["server", "user", "password", "db"],
"secret": ["password"],
"extra_options": ["verify_ssl", "use_ssl"],
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def name(cls):
return "Microsoft SQL Server (ODBC)"
@classmethod
def type(cls):
return "mssql_odbc"
@property
def METHOD_NAME(self):
return False
def _get_tables(self, schema):
query = """
SELECT table_schema, table_name, column_name
FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_schema NOT IN ('guest','INFORMATION_SCHEMA','sys','db_owner','db_accessadmin'
,'db_securityadmin','db_ddladmin','db_backupoperator','db_datareader'
,'db_datawriter','db_denydatareader','db_denydatawriter'
);
"""
results, error = self.run_query(query, None)
if error is not None:
self._handle_run_query_error(error)
results = json_loads(results)
for row in results["rows"]:
if row["table_schema"] != self.configuration["db"]:
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
else:
table_name = row["table_name"]
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["column_name"])
return list(schema.values())
def run_query(self, query, user):
connection = None
try:
server = self.configuration.get("server")
user = self.configuration.get("user", "")
password = self.configuration.get("password", "")
db = self.configuration["db"]
port = self.configuration.get("port", 1433)
connection_string_fmt = "DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={},{};DATABASE={};UID={};PWD={}"
connection_string = connection_string_fmt.format(server, port, db, user, password)
if self.configuration.get("use_ssl", False):
connection_string += ";Encrypt=YES"
if not self.configuration.get("verify_ssl"):
connection_string += ";TrustServerCertificate=YES"
connection = pyodbc.connect(connection_string)
cursor = connection.cursor()
logger.debug("SQLServerODBC running query: %s", query)
cursor.execute(query)
data = cursor.fetchall()
if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(zip((column["name"] for column in columns), row)) for row in data]
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
error = None
else:
error = "No data was returned."
json_data = None
cursor.close()
except pyodbc.Error as e:
try:
# Query errors are at `args[1]`
error = e.args[1]
except IndexError:
# Connection errors are `args[0][1]`
error = e.args[0][1]
json_data = None
except (KeyboardInterrupt, JobTimeoutException):
connection.cancel()
raise
finally:
if connection:
connection.close()
return json_data, error
register(SQLServerODBC)
|
1,253 |
test with revealed submissions
|
import json
import pytest
from rest_framework import status
from usaspending_api.awards.models import Award
from usaspending_api.idvs.tests.data.idv_test_data import PARENTS, IDVS, AWARD_COUNT
AGGREGATE_ENDPOINT = "/api/v2/idvs/funding_rollup/"
CURRENT_YEAR = 2020
def _generate_expected_response(award_id):
"""
Rather than manually generate an insane number of potential responses
to test the various parameter combinations, we're going to procedurally
generate them. award_ids is the list of ids we expect back from the
request in the order we expect them. Unfortunately, for this to work,
test data had to be generated in a specific way. If you change how
test data is generated you will probably also have to change this.
"""
children = [k for k in PARENTS if PARENTS[k] == award_id and award_id in IDVS]
grandchildren = [k for k in PARENTS if PARENTS[k] in children and PARENTS[k] in IDVS]
non_idv_children = [k for k in children if k not in IDVS]
non_idv_grandchildren = [k for k in grandchildren if k not in IDVS]
count = len(non_idv_children) + len(non_idv_grandchildren)
summ = sum(non_idv_children) + sum(non_idv_grandchildren)
results = {
"total_transaction_obligated_amount": count * 200000.0 + summ,
"awarding_agency_count": len(non_idv_children) + len(non_idv_grandchildren),
"funding_agency_count": len(non_idv_children) + len(non_idv_grandchildren),
"federal_account_count": len(non_idv_children) + len(non_idv_grandchildren),
}
return results
def _test_post(client, request, expected_response_parameters_tuple=None, expected_status_code=status.HTTP_200_OK):
"""
Perform the actual request and interrogates the results.
request is the Python dictionary that will be posted to the endpoint.
expected_response_parameters are the values that you would normally
pass into _generate_expected_response but we're going to do that
for you so just pass the parameters as a tuple or list.
expected_status_code is the HTTP status we expect to be returned from
the call to the endpoint.
Returns... nothing useful.
"""
response = client.post(AGGREGATE_ENDPOINT, request)
assert response.status_code == expected_status_code
if expected_response_parameters_tuple is not None:
expected_response = _generate_expected_response(*expected_response_parameters_tuple)
assert json.loads(response.content.decode("utf-8")) == expected_response
@pytest.mark.django_db
def test_complete_queries(client, monkeypatch, basic_idvs):
for _id in range(1, AWARD_COUNT + 1):
_test_post(client, {"award_id": _id}, (_id,))
@pytest.mark.django_db
def test_with_nonexistent_id(client, monkeypatch, basic_idvs):
_test_post(client, {"award_id": 0}, (0,))
_test_post(client, {"award_id": "CONT_IDV_000"}, (0,))
@pytest.mark.django_db
def test_with_bogus_id(client, monkeypatch, basic_idvs):
_test_post(client, {"award_id": "BOGUS_ID"}, (0,))
@pytest.mark.django_db
def test_null_agencies_accounts(client, monkeypatch, basic_idvs):
"""
We are going to null out some accounts/agencies to ensure our count is
correct. According the LOVELY drawing in idv_test_data.py, C14 will
be a great candidate for this exercise. It's ultimate parent is I2.
"""
# Grab the counts for I2.
response = client.post(AGGREGATE_ENDPOINT, {"award_id": 2})
awarding_agency_count = response.data["awarding_agency_count"]
funding_agency_count = response.data["funding_agency_count"]
# Grab the treasury appropriation account for C14 and null out its agency values.
Award.objects.filter(pk=14).update(awarding_agency_id=None, funding_agency_id=None)
# Now re-grab the rollup values and ensure they are decremented accordingly.
response = client.post(AGGREGATE_ENDPOINT, {"award_id": 2})
assert awarding_agency_count == response.data["awarding_agency_count"] + 1
assert funding_agency_count == response.data["funding_agency_count"] + 1
@pytest.mark.django_db
def test_with_unrevealed_submissions(client, monkeypatch, idv_with_unreleased_submissions):
response = client.post(AGGREGATE_ENDPOINT, {"award_id": 2})
assert json.loads(response.content.decode("utf-8")) == {
"awarding_agency_count": 0,
"federal_account_count": 0,
"funding_agency_count": 0,
"total_transaction_obligated_amount": 0.0,
}
@pytest.mark.django_db
def METHOD_NAME(client, monkeypatch, idv_with_released_submissions):
response = client.post(AGGREGATE_ENDPOINT, {"award_id": 2})
assert json.loads(response.content.decode("utf-8")) == {
"awarding_agency_count": 7,
"federal_account_count": 7,
"funding_agency_count": 7,
"total_transaction_obligated_amount": 1400084.0,
}
|
1,254 |
create glossary
|
# Copyright © Michal Čihař <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
# Generated by Django 3.1.4 on 2021-02-01 14:12
import os
from django.conf import settings
from django.db import migrations
from django.utils.text import slugify
from translate.misc.xml_helpers import valid_chars_only
from weblate.formats.ttkit import TBXFormat
from weblate.utils.hash import calculate_hash
from weblate.utils.state import STATE_READONLY, STATE_TRANSLATED
from weblate.vcs.git import GitRepository, LocalRepository
def METHOD_NAME(project, name, slug, glossary, license):
return project.component_set.create(
slug=slug,
name=name,
is_glossary=True,
glossary_name=glossary.name,
glossary_color=glossary.color,
allow_translation_propagation=False,
manage_units=True,
file_format="tbx",
filemask="*.tbx",
vcs="local",
repo="local:",
branch="main",
source_language=glossary.source_language,
license=license,
)
def migrate_glossaries(apps, schema_editor):
Project = apps.get_model("trans", "Project")
Language = apps.get_model("lang", "Language")
db_alias = schema_editor.connection.alias
GitRepository.global_setup()
projects = Project.objects.using(db_alias).all()
total = len(projects)
processed = 0
for processed, project in enumerate(projects):
component_slugs = set(project.component_set.values_list("slug", flat=True))
percent = int(100 * processed / total)
print(f"Migrating glossaries {percent}% [{processed}/{total}]...{project.name}")
glossaries = project.glossary_set.all()
try:
license = project.component_set.exclude(license="").values_list(
"license", flat=True
)[0]
except IndexError:
license = ""
for glossary in glossaries:
if len(glossaries) == 1:
name = "Glossary"
slug = "glossary"
else:
name = f"Glossary: {glossary.name}"
slug = f"glossary-{slugify(glossary.name)}"
base_name = name
base_slug = slug
# Create component
attempts = 0
while True:
if slug not in component_slugs:
component = METHOD_NAME(project, name, slug, glossary, license)
component_slugs.add(slug)
break
attempts += 1
name = f"{base_name} - {attempts}"
slug = f"{base_slug}-{attempts}"
repo_path = os.path.join(settings.DATA_DIR, "vcs", project.slug, slug)
# Create VCS repository
repo = LocalRepository.from_files(repo_path, {})
# Migrate links
component.links.set(glossary.links.all())
# Create source translation
source_translation = component.translation_set.create(
language=glossary.source_language,
check_flags="read-only",
filename="",
plural=glossary.source_language.plural_set.filter(source=0)[0],
language_code=glossary.source_language.code,
)
source_units = {}
# Get list of languages
languages = (
Language.objects.filter(term__glossary=glossary)
.exclude(pk=glossary.source_language.pk)
.distinct()
)
# Migrate ters
for language in languages:
base_filename = f"{language.code}.tbx"
filename = os.path.join(repo_path, base_filename)
# Create translation object
translation = component.translation_set.create(
language=language,
plural=language.plural_set.filter(source=0)[0],
filename=base_filename,
language_code=language.code,
)
# Create store file
TBXFormat.create_new_file(filename, language.code, "")
store = TBXFormat(
filename,
language_code=language.code,
source_language=glossary.source_language.code,
)
id_hashes = set()
for position, term in enumerate(
glossary.term_set.filter(language=language)
):
source = valid_chars_only(term.source)
target = valid_chars_only(term.target)
context = ""
# Store to the file
id_hash = calculate_hash(source, context)
offset = 0
while id_hash in id_hashes:
offset += 1
context = str(offset)
id_hash = calculate_hash(source, context)
id_hashes.add(id_hash)
if id_hash not in source_units:
source_units[id_hash] = source_translation.unit_set.create(
context=context,
source=source,
target=source,
state=STATE_READONLY,
position=position,
num_words=len(source.split()),
id_hash=id_hash,
)
source_units[id_hash].source_unit = source_units[id_hash]
source_units[id_hash].save()
store.new_unit(context, source, target)
# Migrate database
unit = translation.unit_set.create(
context=context,
source=source,
target=target,
state=STATE_TRANSLATED,
position=position,
num_words=len(source.split()),
id_hash=id_hash,
source_unit=source_units[id_hash],
)
# Adjust history entries to include unit details,
# language and project should be already set
term.change_set.update(
unit=unit,
translation=translation,
component=component,
)
store.save()
# Update translation hash
translation.revision = repo.get_object_hash(filename)
translation.save(update_fields=["revision"])
# Commit files
with repo.lock:
repo.execute(["add", repo_path])
if repo.needs_commit():
repo.commit("Migrate glossary content")
if total:
print(f"Migrating glossaries completed [{total}/{total}]")
class Migration(migrations.Migration):
dependencies = [
("trans", "0115_auto_20210201_1305"),
("glossary", "0005_set_source_language"),
]
operations = [migrations.RunPython(migrate_glossaries, elidable=True)]
|
1,255 |
process
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to calculate temperature lapse rates for given temperature and
orogrophy datasets."""
from improver import cli
from improver.constants import DALR
@cli.clizefy
@cli.with_output
def METHOD_NAME(
temperature: cli.inputcube,
orography: cli.inputcube = None,
land_sea_mask: cli.inputcube = None,
*,
max_height_diff: float = 35,
nbhood_radius: int = 7,
max_lapse_rate: float = -3 * DALR,
min_lapse_rate: float = DALR,
dry_adiabatic=False,
model_id_attr: str = None,
):
"""Calculate temperature lapse rates in units of K m-1 over orography grid.
Args:
temperature (iris.cube.Cube):
Air temperature data. This is required even when returning DALR,
as this defines the grid on which lapse rates are required.
orography (iris.cube.Cube):
Orography data.
land_sea_mask (iris.cube.Cube):
Binary land-sea mask data. True for land-points, False for sea.
max_height_diff (float):
Maximum allowable height difference between the central point and
points in the neighbourhood over which the lapse rate will be
calculated.
nbhood_radius (int):
Radius of neighbourhood in grid points around each point. The
neighbourhood is a square array with side length
2*nbhood_radius + 1. The default value of 7 is from the reference
paper (see plugin documentation).
max_lapse_rate (float):
Maximum lapse rate allowed, in K m-1.
min_lapse_rate (float):
Minimum lapse rate allowed, in K m-1.
dry_adiabatic (bool):
If True, returns a cube containing the dry adiabatic lapse rate
rather than calculating the true lapse rate.
model_id_attr (str):
Name of the attribute used to identify the source model for
blending. This is inherited from the input temperature cube.
Returns:
iris.cube.Cube:
Lapse rate (K m-1)
Raises:
ValueError: If minimum lapse rate is greater than maximum.
ValueError: If Maximum height difference is less than zero.
ValueError: If neighbourhood radius is less than zero.
RuntimeError: If calculating the true lapse rate and orography or
land mask arguments are not given.
"""
import numpy as np
from improver.lapse_rate import LapseRate
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
if dry_adiabatic:
attributes = generate_mandatory_attributes(
[temperature], model_id_attr=model_id_attr
)
result = create_new_diagnostic_cube(
"air_temperature_lapse_rate",
"K m-1",
temperature,
attributes,
data=np.full_like(temperature.data, DALR).astype(np.float32),
)
return result
if min_lapse_rate > max_lapse_rate:
msg = "Minimum lapse rate specified is greater than the maximum."
raise ValueError(msg)
if max_height_diff < 0:
msg = "Maximum height difference specified is less than zero."
raise ValueError(msg)
if nbhood_radius < 0:
msg = "Neighbourhood radius specified is less than zero."
raise ValueError(msg)
if orography is None or land_sea_mask is None:
msg = "Missing orography and/or land mask arguments."
raise RuntimeError(msg)
result = LapseRate(
max_height_diff=max_height_diff,
nbhood_radius=nbhood_radius,
max_lapse_rate=max_lapse_rate,
min_lapse_rate=min_lapse_rate,
)(temperature, orography, land_sea_mask, model_id_attr=model_id_attr)
return result
|
1,256 |
test throw on failure env var
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print('Running "%s". . .' % ' '.join(command))
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def METHOD_NAME(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
1,257 |
test select words output
|
import unittest
import numpy as np
from AnyQt.QtCore import QItemSelectionModel
from Orange.widgets.tests.base import WidgetTest
from Orange.data import StringVariable, Domain
from scipy.sparse import csr_matrix
from orangecontrib.text.corpus import Corpus
from orangecontrib.text.topics import Topic
from orangecontrib.text.widgets.owwordcloud import OWWordCloud
class TestWordCloudWidget(WidgetTest):
def setUp(self):
self.widget = self.create_widget(OWWordCloud)
self.corpus = Corpus.from_file('deerwester')
self.topic = self.create_topic()
def create_topic(self):
words = [[f"a{i}"] for i in range(10)]
weights = list(range(10))
t = Topic.from_numpy(
Domain([], metas=[
StringVariable("Topic 1")
]),
X=np.empty((10, 0)),
metas=np.array(words),
W=weights #np.array(weights).reshape(-1, 1)
)
t.attributes["topic-method-name"] = "LsiModel"
return t
def test_data(self):
"""
Just basic test.
GH-244
"""
self.send_signal(self.widget.Inputs.corpus, self.corpus)
self.send_signal(self.widget.Inputs.corpus, None)
self.wait_until_finished()
def test_empty_data(self):
"""
Widget crashes when receives zero length data.
GH-244
"""
self.send_signal(self.widget.Inputs.corpus, self.corpus)
self.send_signal(self.widget.Inputs.corpus, self.corpus[:0])
self.wait_until_finished()
def test_bow_features(self):
"""
When bag of words features are at the input word cloud must be made
based on BOW weights.
"""
data = self.corpus[:3]
data = data.extend_attributes(
csr_matrix([[3, 2, 0], [0, 3, 6], [0, 1, 0]]),
["Word1", "Word2", "Word3"])
for v in data.domain.attributes:
v.attributes["bow-feature"] = True
self.send_signal(self.widget.Inputs.corpus, data)
self.wait_until_finished()
weights = list(zip(*sorted(self.widget.corpus_counter.items())))[1]
# due to computation error in computing mean use array_almost_equal
np.testing.assert_array_almost_equal(weights, [1, 2, 2])
output = self.get_output(self.widget.Outputs.word_counts)
np.testing.assert_array_almost_equal([2, 2, 1], output.X.flatten())
np.testing.assert_array_equal(
["Word3", "Word2", "Word1"], output.metas.flatten())
self.assertTupleEqual(
("Word3", "Word2", "Word1"),
list(zip(*self.widget.tablemodel[:]))[1])
np.testing.assert_array_almost_equal(
[2, 2, 1],
list(zip(*self.widget.tablemodel[:]))[0])
# try with one word not bow-feature
data = self.corpus[:3]
data = data.extend_attributes(
csr_matrix([[3, 2, 0], [0, 3, 6], [0, 1, 0]]),
["Word1", "Word2", "Word3"])
for v in data.domain.attributes[:2]:
v.attributes["bow-feature"] = True
self.send_signal(self.widget.Inputs.corpus, data)
self.wait_until_finished()
weights = list(zip(*sorted(self.widget.corpus_counter.items())))[1]
np.testing.assert_array_almost_equal(weights, [1, 2])
output = self.get_output(self.widget.Outputs.word_counts)
np.testing.assert_array_almost_equal([2, 1], output.X.flatten())
np.testing.assert_array_equal(
["Word2", "Word1"], output.metas.flatten())
self.assertTupleEqual(
("Word2", "Word1"),
list(zip(*self.widget.tablemodel[:]))[1])
np.testing.assert_array_almost_equal(
[2, 1],
list(zip(*self.widget.tablemodel[:]))[0])
def test_bow_info(self):
"""
Widget shows info when bow-features used. This test tests this info.
"""
data = self.corpus[:3]
# no data no info
self.assertFalse(self.widget.Info.bow_weights.is_shown())
self.send_signal(self.widget.Inputs.corpus, data)
self.wait_until_finished()
self.assertFalse(self.widget.Info.bow_weights.is_shown())
self.send_signal(self.widget.Inputs.corpus, None)
self.wait_until_finished()
self.assertFalse(self.widget.Info.bow_weights.is_shown())
# send bow data
data = data.extend_attributes(
csr_matrix([[3, 2, 0], [0, 3, 6], [0, 1, 0]]),
["Word1", "Word2", "Word3"])
for v in data.domain.attributes:
v.attributes["bow-feature"] = True
self.send_signal(self.widget.Inputs.corpus, data)
self.wait_until_finished()
self.assertTrue(self.widget.Info.bow_weights.is_shown())
self.send_signal(self.widget.Inputs.corpus, None)
self.wait_until_finished()
self.assertFalse(self.widget.Info.bow_weights.is_shown())
def test_topic(self):
self.send_signal(self.widget.Inputs.topic, self.topic)
self.assertIsNotNone(self.widget.topic)
self.assertEqual("a0", self.widget.wordlist[0][0])
self.assertEqual(10, self.widget.wordlist[0][1])
self.assertEqual("a9", self.widget.wordlist[9][0])
self.assertEqual(40, self.widget.wordlist[9][1])
self.assertListEqual(
self.topic.metas[:, 0].tolist(), self.widget.shown_words.tolist())
np.testing.assert_array_almost_equal(self.topic.W, self.widget.shown_weights)
def test_no_tokens(self):
"""
In some very rare cases (when all text strings empty) word cloud all
token lists empty. Widget must work in those cases.
"""
with self.corpus.unlocked():
self.corpus.metas = np.array([[" "]] * len(self.corpus))
self.send_signal(self.widget.Inputs.corpus, self.corpus)
self.wait_until_finished()
def METHOD_NAME(self):
self.send_signal(self.widget.Inputs.corpus, self.corpus)
self.assertIsNone(self.get_output(self.widget.Outputs.selected_words))
mode = QItemSelectionModel.Rows | QItemSelectionModel.Select
view = self.widget.tableview
view.clearSelection()
view.selectionModel().select(self.widget.tablemodel.index(2, 0), mode)
view.selectionModel().select(self.widget.tablemodel.index(3, 0), mode)
output = self.get_output(self.widget.Outputs.selected_words)
self.assertEqual(2, len(output))
self.assertEqual("words", output.domain["Words"].attributes["type"])
if __name__ == "__main__":
unittest.main()
|
1,258 |
set
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
import re
import copy
from chempy import io
class SDFRec:
def __init__(self,sdflist):
getkee = re.compile("^>\s+<([^>]*)>")
gettag = re.compile("^>\s+<[^>]*>\s+\((.*)\)")
ll = len(sdflist)
if ll<4:
print(" SDFRec: invalid SDF record format #1")
raise RuntimeError
self.kees = ['MOL'] # separate key list to preserve order
self.data = {}
self.ref_code = {}
self.data['MOL'] = []
mol = self.data['MOL']
l = 0
while l<ll:
mol.append(sdflist[l])
if (sdflist[l][0:6]=='M END') or (sdflist[l][0:5]=='M END'):
break;
if sdflist[l][0:1]=='>':
mol[len(mol)-1]='M END\n'
sdflist.insert(l,'M END\n')
ll=len(sdflist)
break;
l = l + 1
if l>=ll:
print(" SDFRec: invalid SDF record format #2")
raise RuntimeError
while l<ll:
if sdflist[l][0]=='>':
sl = sdflist[l]
kee_match = getkee.match(sl)
if not kee_match:
print(" SDFRec: invalid SDF record format #3")
raise RuntimeError
kee = kee_match.group(1)
self.kees.append(kee)
ref_code_match = gettag.match(sl)
if ref_code_match:
self.ref_code[kee] = ref_code_match.group(1)
else:
self.ref_code[kee] = ''
self.data[kee] = []
sd = self.data[kee]
l = l + 1
while l<ll:
if sdflist[l].strip():
sd.append(sdflist[l])
l = l + 1
else:
break;
else:
l = l + 1
def toList(self):
r = []
for k in self.kees:
if k!='MOL':
if self.ref_code[k]!='':
r.append("> <"+k+"> ("+self.ref_code[k]+")\n")
else:
r.append("> <"+k+">\n")
for a in self.data[k]:
r.append(a)
if k!='MOL':
r.append("\n")
return r
def get(self,kee):
if kee in self.data:
return self.data[kee]
else:
return None
def get_single(self,kee): # automatic stripping
if kee in self.data:
sdk = self.data[kee]
if len(sdk):
return sdk[0].strip()
else:
return None
else:
return None
def set_single(self,kee,data,ref_code=None): # adds LF
self.METHOD_NAME(kee,[data+'\n'],ref_code)
def get_model(self):
return io.mol.fromList(self.get('MOL'))
def set_model(self,model):
self.METHOD_NAME('MOL',io.mol.toList(model))
def METHOD_NAME(self,kee,data,ref_code=None):
if kee not in self.kees:
self.kees.append(kee)
self.ref_code[kee]=''
if ref_code is not None:
self.ref_code[kee]=ref_code
self.data[kee] = copy.deepcopy(data)
def delete(self,kee):
self.kees.remove(kee)
del self.data[kee]
class SDF:
def __init__(*args):
mode = 'r'
if len(args)<2:
raise ValueError
self = args[0]
fname = args[1]
if len(args)==3:
mode = args[2]
self.mode = mode
self.at_eof = 0
if mode not in ('w','r','wa','pf','url'):
print(" SDF: bad mode")
return None
if mode=='pf': # pseudofile
self.file = fname
elif mode[0:1] == 'r' and '://' in fname:
# does this look like a URL? (but not a DOS path)
from urllib.request import urlopen
self.file = urlopen(fname)
else:
self.file = open(fname,mode)
def write(self,rec):
lst = rec.toList()
for a in lst:
self.file.write(a)
self.file.write('$$$$\n')
def read(self): # returns SDFRec or None at end of file
cur = []
while 1:
s = self.file.readline()
if not s:
return None
elif s[0:4]==r'$$$$':
return SDFRec(cur)
else:
cur.append(s)
def close(self):
self.file.close()
|
1,259 |
parse mixed kwargs
|
# Copyright 2019-2022 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from asreview.models.query.base import BaseQueryStrategy
from asreview.models.query.utils import get_query_model
from asreview.utils import get_random_state
def METHOD_NAME(kwargs, strategy_name):
kwargs_new = {}
for key, value in kwargs.items():
if key.startswith(strategy_name):
new_key = key[len(strategy_name) + 1 :]
kwargs_new[new_key] = value
return kwargs_new
class MixedQuery(BaseQueryStrategy):
"""Mixed query strategy.
Use two different query strategies at the same time with a
ratio of one to the other. A mix of two query strategies is used. For
example mixing max and random sampling with a mix ratio of 0.95 would mean
that at each query 95% of the instances would be sampled with the max
query strategy after which the remaining 5% would be sampled with the
random query strategy. It would be called the `max_random` query strategy.
Every combination of primitive query strategy is possible.
Arguments
---------
strategy_1: str
Name of the first query strategy. Default 'max'.
strategy_2: str
Name of the second query strategy. Default 'random'
mix_ratio: float
Sampling from strategy_1 and strategy_2 according a Bernoulli
distribution. E.g. for mix_ratio=0.95, this implies strategy_1
with probability 0.95 and strategy_2 with probability 0.05.
Default 0.95.
random_state: float
Seed for the numpy random number generator.
**kwargs: dict
Keyword arguments for the two strategy. To specify which of the
strategies the argument is for, prepend with the name of the query
strategy and an underscore, e.g. 'max' for maximal sampling.
"""
def __init__(
self,
strategy_1="max",
strategy_2="random",
mix_ratio=0.95,
random_state=None,
**kwargs
):
"""Initialize the Mixed query strategy."""
super(MixedQuery, self).__init__()
self.strategy_1 = strategy_1
self.strategy_2 = strategy_2
self.mix_ratio = mix_ratio
self._random_state = get_random_state(random_state)
self.kwargs_1 = METHOD_NAME(kwargs, strategy_1)
self.kwargs_2 = METHOD_NAME(kwargs, strategy_2)
self.query_model1 = get_query_model(strategy_1, **self.kwargs_1)
if "random_state" in self.query_model1.default_param:
self.query_model1 = get_query_model(
strategy_1, random_state=self._random_state, **self.kwargs_1
)
self.query_model2 = get_query_model(strategy_2, **self.kwargs_2)
if "random_state" in self.query_model2.default_param:
self.query_model2 = get_query_model(
strategy_2, random_state=self._random_state, **self.kwargs_2
)
def query(self, X, classifier, n_instances=None, **kwargs):
# set the number of instances to len(X) if None
if n_instances is None:
n_instances = X.shape[0]
# compute the predictions
predictions = classifier.predict_proba(X)
# Perform the query with strategy 1.
try:
query_idx_1 = self.query_model1._query(predictions, n_instances=n_instances)
except AttributeError:
# for random for example
query_idx_1 = self.query_model1.query(X, classifier, n_instances)
# Perform the query with strategy 2.
try:
query_idx_2 = self.query_model2._query(predictions, n_instances=n_instances)
except AttributeError:
# for random for example
query_idx_2 = self.query_model2.query(X, classifier, n_instances)
# mix the 2 query strategies into one list
query_idx_mix = []
i = 0
j = 0
while i < len(query_idx_1) and j < len(query_idx_2):
if self._random_state.rand() < self.mix_ratio:
query_idx_mix.append(query_idx_1[i])
i = i + 1
else:
query_idx_mix.append(query_idx_2[j])
j = j + 1
indexes = np.unique(query_idx_mix, return_index=True)[1]
return [query_idx_mix[i] for i in sorted(indexes)][0:n_instances]
def full_hyper_space(self):
from hyperopt import hp
space_1, choices_1 = self.query_model1.hyper_space()
space_2, choices_2 = self.query_model2.hyper_space()
parameter_space = {}
hyper_choices = {}
for key, value in space_1.items():
new_key = "qry_" + self.strategy_1 + key[4:]
parameter_space[new_key] = value
hyper_choices[new_key] = choices_1[key]
for key, value in space_2.items():
new_key = "qry_" + self.strategy_2 + key[4:]
parameter_space[new_key] = value
hyper_choices[new_key] = choices_2[key]
parameter_space["qry_mix_ratio"] = hp.uniform("qry_mix_ratio", 0, 1)
return parameter_space, hyper_choices
@property
def name(self):
return "_".join([self.strategy_1, self.strategy_2])
class MaxRandomQuery(MixedQuery):
"""Mixed (95% Maximum and 5% Random) query strategy (``max_random``).
A mix of maximum and random query strategies with a mix ratio of 0.95.
At each query 95% of the instances would be sampled with the maximum
query strategy after which the remaining 5% would be sampled with
the random query strategy.
"""
name = "max_random"
label = "Mixed (95% Maximum and 5% Random)"
def __init__(self, mix_ratio=0.95, random_state=None, **kwargs):
"""Initialize the Mixed (Maximum and Random) query strategy."""
super(MaxRandomQuery, self).__init__(
strategy_1="max",
strategy_2="random",
mix_ratio=mix_ratio,
random_state=random_state,
**kwargs
)
class MaxUncertaintyQuery(MixedQuery):
"""Mixed (95% Maximum and 5% Uncertainty) query strategy (``max_uncertainty``).
A mix of maximum and random query strategies with a mix ratio of 0.95.
At each query 95% of the instances would be sampled with the maximum
query strategy after which the remaining 5% would be sampled with
the uncertainty query strategy.
"""
name = "max_uncertainty"
label = "Mixed (95% Maximum and 5% Uncertainty)"
def __init__(self, mix_ratio=0.95, random_state=None, **kwargs):
"""Initialize the Mixed (Maximum and Uncertainty) query strategy."""
super(MaxUncertaintyQuery, self).__init__(
strategy_1="max",
strategy_2="uncertainty",
mix_ratio=mix_ratio,
random_state=random_state,
**kwargs
)
|
1,260 |
getbalance
|
# -*- coding: utf-8 -*-
#
# BitcoinLib - Python Cryptocurrency Library
# Chain.so client
# © 2017-2022 October - 1200 Web Development <http://1200wd.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
from datetime import datetime
from bitcoinlib.main import MAX_TRANSACTIONS
from bitcoinlib.services.baseclient import BaseClient, ClientError
from bitcoinlib.transactions import Transaction
_logger = logging.getLogger(__name__)
PROVIDERNAME = 'chainso'
class ChainSo(BaseClient):
def __init__(self, network, base_url, denominator, *args):
super(self.__class__, self).__init__(network, PROVIDERNAME, base_url, denominator, *args)
def compose_request(self, function, data='', parameter='', variables=None, method='get'):
url_path = function
url_path += '/' + self.provider_coin_id
if data:
url_path += '/' + data
if parameter:
url_path += '/' + parameter
if variables is None:
variables = {}
if self.api_key:
variables.update({'api_key': self.api_key})
return self.request(url_path, variables, method)
def sendrawtransaction(self, rawtx):
res = self.compose_request('send_tx', variables={'tx_hex': rawtx}, method='post')
return {
'txid': '' if 'data' not in res else res['data']['txid'],
'response_dict': res
}
def METHOD_NAME(self, addresslist):
balance = 0.0
for address in addresslist:
res = self.compose_request('get_address_balance', address)
balance += float(res['data']['confirmed_balance']) + float(res['data']['unconfirmed_balance'])
return int(balance * self.units)
def getutxos(self, address, after_txid='', limit=MAX_TRANSACTIONS):
txs = []
lasttx = after_txid
res = self.compose_request('get_tx_unspent', address, lasttx)
if res['status'] != 'success':
pass
for tx in res['data']['txs'][:limit]:
txs.append({
'address': address,
'txid': tx['txid'],
'confirmations': tx['confirmations'],
'output_n': -1 if 'output_no' not in tx else tx['output_no'],
'input_n': -1 if 'input_no' not in tx else tx['input_no'],
'block_height': None,
'fee': None,
'size': 0,
'value': int(round(float(tx['value']) * self.units, 0)),
'script': tx['script_hex'],
'date': datetime.utcfromtimestamp(tx['time']),
})
if len(txs) >= 1000:
_logger.warning("ChainSo: transaction list has been truncated, and thus is incomplete")
return txs
def getrawtransaction(self, txid):
res = self.compose_request('get_tx', txid)
return res['data']['tx_hex']
def gettransaction(self, txid, block_height=None):
res = self.compose_request('get_tx', txid)
tx = res['data']
rawtx = tx['tx_hex']
t = Transaction.parse_hex(rawtx, strict=self.strict, network=self.network)
input_total = 0
output_total = 0
if not t.coinbase:
for n, i in enumerate(t.inputs):
i.value = int(round(float(tx['inputs'][n]['value']) * self.units, 0))
input_total += i.value
for o in t.outputs:
o.spent = None
output_total += o.value
if not t.block_height and tx['confirmations']:
t.block_height = self.getblock(tx['blockhash'], False, 1, 1)['height']
t.block_hash = tx['blockhash']
t.rawtx = bytes.fromhex(rawtx)
t.size = tx['size']
t.network = self.network
t.locktime = tx['locktime']
t.input_total = input_total
t.output_total = output_total
t.fee = 0
if t.input_total:
t.fee = t.input_total - t.output_total
t.confirmations = tx['confirmations']
if tx['confirmations']:
t.status = 'confirmed'
t.date = datetime.utcfromtimestamp(tx['time'])
else:
t.status = 'unconfirmed'
t.date = None
return t
def gettransactions(self, address, after_txid='', limit=MAX_TRANSACTIONS):
txs = []
res1 = self.compose_request('get_tx_received', address, after_txid)
if res1['status'] != 'success':
raise ClientError("Chainso get_tx_received request unsuccessful, status: %s" % res1['status'])
res2 = self.compose_request('get_tx_spent', address, after_txid)
if res2['status'] != 'success':
raise ClientError("Chainso get_tx_spent request unsuccessful, status: %s" % res2['status'])
res = res1['data']['txs'] + res2['data']['txs']
res = sorted(res, key=lambda x: x['time'])
tx_conf = []
for t in res:
tt = (t['confirmations'], t['txid'])
if tt not in tx_conf:
tx_conf.append(tt)
for tx in tx_conf[:limit]:
t = self.gettransaction(tx[1])
txs.append(t)
return txs
def blockcount(self):
return self.compose_request('get_info')['data']['blocks']
def mempool(self, txid):
res = self.compose_request('is_tx_confirmed', txid)
if res['status'] == 'success' and res['data']['confirmations'] == 0:
return [txid]
return []
def getblock(self, blockid, parse_transactions, page, limit):
if limit > 5:
limit = 5
bd = self.compose_request('get_block', str(blockid))['data']
if parse_transactions:
txs = []
for txid in bd['txs'][(page-1)*limit:page*limit]:
# try:
txs.append(self.gettransaction(txid, block_height=bd['block_no']))
# except Exception as e:
# raise ClientError("Could not parse tx %s with error %s" % (txid, e))
else:
txs = bd['txs']
n_txs = len(bd['txs'])
block = {
'bits': None,
'depth': bd['confirmations'],
'block_hash': bd['blockhash'],
'height': bd['block_no'],
'merkle_root': bd['merkleroot'],
'nonce': None,
'prev_block': bd['previous_blockhash'],
'time': bd['time'],
'tx_count': n_txs,
'txs': txs,
'version': b'',
'page': page,
'pages': None if not limit else int(n_txs // limit) + (n_txs % limit > 0),
'limit': limit
}
return block
# def getrawblock(self, blockid):
# def isspent(self, txid, output_n):
def getinfo(self):
info = self.compose_request('get_info')['data']
return {
'blockcount': info['blocks'],
'chain': info['name'],
'difficulty': int(float(info['mining_difficulty'])),
'hashrate': int(float(info['hashrate'])),
'mempool_size': int(info['unconfirmed_txs']),
}
|
1,261 |
make suggestion list
|
import warnings
import pytest
from ert.config import ConfigWarning, ErtConfig
from ert.config.parsing.config_schema_deprecations import (
JUST_REMOVE_KEYWORDS,
REPLACE_WITH_GEN_KW,
RSH_KEYWORDS,
USE_QUEUE_OPTION,
)
from ert.config.parsing.deprecation_info import DeprecationInfo
def test_is_angle_bracketed():
assert DeprecationInfo.is_angle_bracketed("<KEY>")
assert not DeprecationInfo.is_angle_bracketed("KEY")
assert not DeprecationInfo.is_angle_bracketed("K<E>Y")
assert not DeprecationInfo.is_angle_bracketed("")
def METHOD_NAME(path):
with warnings.catch_warnings(record=True) as all_warnings:
_ = ErtConfig.from_file(path)
return [
str(w.message)
for w in all_warnings
if w.category == ConfigWarning and w.message.info.is_deprecation
]
@pytest.mark.parametrize("kw", JUST_REMOVE_KEYWORDS)
def test_that_suggester_gives_simple_migrations(tmp_path, kw):
(tmp_path / "config.ert").write_text(f"NUM_REALIZATIONS 1\n{kw}\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(f"The keyword {kw} no longer" in s for s in suggestions)
def test_that_suggester_gives_havana_fault_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nHAVANA_FAULT\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The behavior of HAVANA_FAULT can be reproduced using" in s for s in suggestions
)
@pytest.mark.parametrize("kw", REPLACE_WITH_GEN_KW)
def test_that_suggester_gives_gen_kw_migrations(tmp_path, kw):
(tmp_path / "config.ert").write_text(f"NUM_REALIZATIONS 1\n{kw}\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"ert.readthedocs.io/en/latest/reference/configuration/keywords.html#gen-kw" in s
for s in suggestions
)
@pytest.mark.parametrize("kw", RSH_KEYWORDS)
def test_that_suggester_gives_rsh_migrations(tmp_path, kw):
(tmp_path / "config.ert").write_text(f"NUM_REALIZATIONS 1\n{kw}\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"deprecated and removed support for RSH queues." in s for s in suggestions
)
@pytest.mark.parametrize("kw", USE_QUEUE_OPTION)
def test_that_suggester_gives_queue_option_migrations(tmp_path, kw):
(tmp_path / "config.ert").write_text(f"NUM_REALIZATIONS 1\n{kw}\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
f"The {kw} keyword has been removed. For most cases " in s for s in suggestions
)
def test_that_suggester_gives_refcase_list_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nREFCASE_LIST case.DATA\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The corresponding plotting functionality was removed in 2015" in s
for s in suggestions
)
def test_that_suggester_gives_rftpath_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nRFTPATH rfts/\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The corresponding plotting functionality was removed in 2015" in s
for s in suggestions
)
def test_that_suggester_gives_end_date_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nEND_DATE 2023.01.01\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any("only display a warning in case of problems" in s for s in suggestions)
def test_that_suggester_gives_rerun_start_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nRERUN_START 2023.01.01\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"used for the deprecated run mode ENKF_ASSIMILATION" in s for s in suggestions
)
def test_that_suggester_gives_delete_runpath_migration(tmp_path):
(tmp_path / "config.ert").write_text("NUM_REALIZATIONS 1\nDELETE_RUNPATH TRUE\n")
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any("It was removed in 2017" in s for s in suggestions)
def test_suggester_gives_runpath_deprecated_specifier_migration(tmp_path):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nRUNPATH real-%d/iter-%d\n"
)
with pytest.warns(
ConfigWarning,
match="RUNPATH keyword contains deprecated value"
r" placeholders: %d, instead use: .*real-<IENS>\/iter-<ITER>",
):
_ = ErtConfig.from_file(tmp_path / "config.ert")
def test_suggester_gives_no_runpath_deprecated_specifier_migration(tmp_path):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nRUNPATH real-<IENS>/iter-<ITER>\n"
)
no_suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
(tmp_path / "config.wrong.ert").write_text(
"NUM_REALIZATIONS 1\nRUNPATH real-%d/iter-%d\n"
)
suggestions = METHOD_NAME(str(tmp_path / "config.wrong.ert"))
assert not any(
"RUNPATH keyword contains deprecated value placeholders" in s
for s in no_suggestions
) and any(
"RUNPATH keyword contains deprecated value placeholders" in s
for s in suggestions
)
def test_suggester_gives_plot_settings_migration(tmp_path):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nPLOT_SETTINGS some args\n"
)
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The keyword PLOT_SETTINGS was removed in 2019 and has no effect" in s
for s in suggestions
)
def test_suggester_gives_update_settings_migration(tmp_path):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nUPDATE_SETTINGS some args\n"
)
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The UPDATE_SETTINGS keyword has been removed and no longer" in s
for s in suggestions
)
@pytest.mark.parametrize("definer", ["DEFINE", "DATA_KW"])
def test_suggester_gives_deprecated_define_migration_hint(tmp_path, definer):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\n"
f"{definer} <KEY1> x1\n"
f"{definer} A B\n"
f"{definer} <A<B>> C\n"
f"{definer} <A><B> C\n"
)
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert len(suggestions) == 3
for suggestion, expected in zip(
suggestions,
[
" Please change A to <A>",
" Please change <A<B>> to <AB>",
" Please change <A><B> to <AB>",
],
):
assert (
f"Using {definer} with substitution strings"
" that are not of the form '<KEY>' is deprecated." in suggestion
)
assert suggestion.endswith(expected)
def test_suggester_does_not_report_non_existent_path_due_to_missing_pre_defines(
tmp_path,
):
(tmp_path / "workflow").write_text("")
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nLOAD_WORKFLOW <CONFIG_PATH>/workflow\n"
)
assert [
x
for x in METHOD_NAME(str(tmp_path / "config.ert"))
if "DATA_KW" not in x and "DEFINE" not in x
] == []
def test_that_suggester_gives_schedule_prediciton_migration(tmp_path):
(tmp_path / "config.ert").write_text(
"NUM_REALIZATIONS 1\nSCHEDULE_PREDICTION_FILE no no no\n"
)
suggestions = METHOD_NAME(str(tmp_path / "config.ert"))
assert any(
"The 'SCHEDULE_PREDICTION_FILE' config keyword has been removed" in s
for s in suggestions
)
|
1,262 |
visit primitive literal
|
# Generated from stix_shifter_utils/stix_translation/src/patterns/grammar/STIXPattern.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .STIXPatternParser import STIXPatternParser
else:
from STIXPatternParser import STIXPatternParser
# This class defines a complete generic visitor for a parse tree produced by STIXPatternParser.
class STIXPatternVisitor(ParseTreeVisitor):
# Visit a parse tree produced by STIXPatternParser#pattern.
def visitPattern(self, ctx:STIXPatternParser.PatternContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressions.
def visitObservationExpressions(self, ctx:STIXPatternParser.ObservationExpressionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionOr.
def visitObservationExpressionOr(self, ctx:STIXPatternParser.ObservationExpressionOrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionAnd.
def visitObservationExpressionAnd(self, ctx:STIXPatternParser.ObservationExpressionAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionRepeated.
def visitObservationExpressionRepeated(self, ctx:STIXPatternParser.ObservationExpressionRepeatedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionSimple.
def visitObservationExpressionSimple(self, ctx:STIXPatternParser.ObservationExpressionSimpleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionCompound.
def visitObservationExpressionCompound(self, ctx:STIXPatternParser.ObservationExpressionCompoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionWithin.
def visitObservationExpressionWithin(self, ctx:STIXPatternParser.ObservationExpressionWithinContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionStartStop.
def visitObservationExpressionStartStop(self, ctx:STIXPatternParser.ObservationExpressionStartStopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAnd_.
def visitComparisonExpressionAnd_(self, ctx:STIXPatternParser.ComparisonExpressionAnd_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionOred.
def visitComparisonExpressionOred(self, ctx:STIXPatternParser.ComparisonExpressionOredContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAndPropTest.
def visitComparisonExpressionAndPropTest(self, ctx:STIXPatternParser.ComparisonExpressionAndPropTestContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAnded.
def visitComparisonExpressionAnded(self, ctx:STIXPatternParser.ComparisonExpressionAndedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestEqual.
def visitPropTestEqual(self, ctx:STIXPatternParser.PropTestEqualContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestOrder.
def visitPropTestOrder(self, ctx:STIXPatternParser.PropTestOrderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestSet.
def visitPropTestSet(self, ctx:STIXPatternParser.PropTestSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestLike.
def visitPropTestLike(self, ctx:STIXPatternParser.PropTestLikeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestRegex.
def visitPropTestRegex(self, ctx:STIXPatternParser.PropTestRegexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSubset.
def visitPropTestIsSubset(self, ctx:STIXPatternParser.PropTestIsSubsetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSuperset.
def visitPropTestIsSuperset(self, ctx:STIXPatternParser.PropTestIsSupersetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestParen.
def visitPropTestParen(self, ctx:STIXPatternParser.PropTestParenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#orderingComparator.
def visitOrderingComparator(self, ctx:STIXPatternParser.OrderingComparatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#stringLiteral.
def visitStringLiteral(self, ctx:STIXPatternParser.StringLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#startStopQualifier.
def visitStartStopQualifier(self, ctx:STIXPatternParser.StartStopQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#withinQualifier.
def visitWithinQualifier(self, ctx:STIXPatternParser.WithinQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#repeatedQualifier.
def visitRepeatedQualifier(self, ctx:STIXPatternParser.RepeatedQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectPath.
def visitObjectPath(self, ctx:STIXPatternParser.ObjectPathContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectType.
def visitObjectType(self, ctx:STIXPatternParser.ObjectTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#firstPathComponent.
def visitFirstPathComponent(self, ctx:STIXPatternParser.FirstPathComponentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#indexPathStep.
def visitIndexPathStep(self, ctx:STIXPatternParser.IndexPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#pathStep.
def visitPathStep(self, ctx:STIXPatternParser.PathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#keyPathStep.
def visitKeyPathStep(self, ctx:STIXPatternParser.KeyPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#setLiteral.
def visitSetLiteral(self, ctx:STIXPatternParser.SetLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#primitiveLiteral.
def METHOD_NAME(self, ctx:STIXPatternParser.PrimitiveLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#orderableLiteral.
def visitOrderableLiteral(self, ctx:STIXPatternParser.OrderableLiteralContext):
return self.visitChildren(ctx)
del STIXPatternParse
|
1,263 |
test asarray chkfinite
|
import unittest
import pytest
import numpy
import cupy
from cupy import testing
class TestKind(unittest.TestCase):
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def METHOD_NAME(self, xp, dtype, order):
a = [0, 4, 0, 5]
return xp.asarray_chkfinite(a, dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes(no_bool=True)
def test_asarray_chkfinite_non_finite_vals(self, dtype, order):
a = [-numpy.inf, 0., numpy.inf, numpy.nan]
for xp in (numpy, cupy):
if xp.issubdtype(dtype, xp.integer):
error = OverflowError
else:
error = ValueError
with pytest.raises(error):
xp.asarray_chkfinite(a, dtype=dtype, order=order)
@testing.for_all_dtypes()
def test_asfarray(self, dtype):
a = cupy.asarray([1, 2, 3])
a_gpu = cupy.asfarray(a, dtype)
a_cpu = numpy.asfarray(a.get(), dtype)
assert a_cpu.dtype == a_gpu.dtype
@testing.for_all_dtypes()
def test_asfortranarray1(self, dtype):
def func(xp):
x = xp.zeros((2, 3), dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray2(self, dtype):
def func(xp):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray3(self, dtype):
def func(xp):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(xp.asfortranarray(x))
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray4(self, dtype):
def func(xp):
x = xp.zeros((2, 3), dtype)
x = xp.transpose(x, (1, 0))
ret = xp.asfortranarray(x)
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_asfortranarray5(self, dtype):
def func(xp):
x = testing.shaped_arange((2, 3), xp, dtype)
ret = xp.asfortranarray(x)
assert x.flags.c_contiguous
assert ret.flags.f_contiguous
return ret.strides
assert func(numpy) == func(cupy)
@testing.for_all_dtypes()
def test_require_flag_check(self, dtype):
possible_flags = [['C_CONTIGUOUS'], ['F_CONTIGUOUS']]
x = cupy.zeros((2, 3, 4), dtype)
for flags in possible_flags:
arr = cupy.require(x, dtype, flags)
for parameter in flags:
assert arr.flags[parameter]
assert arr.dtype == dtype
@testing.for_all_dtypes()
def test_require_owndata(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
arr = x.view()
arr = cupy.require(arr, dtype, ['O'])
assert arr.flags['OWNDATA']
@testing.for_all_dtypes()
def test_require_C_and_F_flags(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, dtype, ['C', 'F'])
@testing.for_all_dtypes()
def test_require_incorrect_requirments(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, dtype, ['W'])
@testing.for_all_dtypes()
def test_require_incorrect_dtype(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
with pytest.raises(ValueError):
cupy.require(x, 'random', 'C')
@testing.for_all_dtypes()
def test_require_empty_requirements(self, dtype):
x = cupy.zeros((2, 3, 4), dtype)
x = cupy.require(x, dtype, [])
assert x.flags['C_CONTIGUOUS']
|
1,264 |
client
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common setup and fixtures for the pytest suite used by this service."""
import asyncio
import datetime
import os
import random
import time
from contextlib import contextmanager
import pytest
from flask import Flask
from flask_migrate import Migrate, upgrade
from legal_api import db as _db
from legal_api import jwt as _jwt
from nats.aio.METHOD_NAME import Client as Nats
from sqlalchemy import event, text
from sqlalchemy.schema import MetaData
from stan.aio.METHOD_NAME import Client as Stan
from entity_pay.config import get_named_config
from . import FROZEN_DATETIME
@contextmanager
def not_raises(exception):
"""Corallary to the pytest raises builtin.
Assures that an exception is NOT thrown.
"""
try:
yield
except exception:
raise pytest.fail(f'DID RAISE {exception}')
# fixture to freeze utcnow to a fixed date-time
@pytest.fixture
def freeze_datetime_utcnow(monkeypatch):
"""Fixture to return a static time for utcnow()."""
class _Datetime:
@classmethod
def utcnow(cls):
return FROZEN_DATETIME
monkeypatch.setattr(datetime, 'datetime', _Datetime)
@pytest.fixture(scope='session')
def app():
"""Return a session-wide application configured in TEST mode."""
# _app = create_app('testing')
_app = Flask(__name__)
_app.config.from_object(get_named_config('testing'))
_db.init_app(_app)
return _app
@pytest.fixture
def config(app):
"""Return the application config."""
return app.config
@pytest.fixture(scope='session')
def METHOD_NAME(app): # pylint: disable=redefined-outer-name
"""Return a session-wide Flask test client."""
return app.test_client()
@pytest.fixture(scope='session')
def jwt():
"""Return a session-wide jwt manager."""
return _jwt
@pytest.fixture(scope='session')
def client_ctx(app): # pylint: disable=redefined-outer-name
"""Return session-wide Flask test client."""
with app.test_client() as _client:
yield _client
@pytest.fixture(scope='function')
def client_id():
"""Return a unique client_id that can be used in tests."""
_id = random.SystemRandom().getrandbits(0x58)
# _id = (base64.urlsafe_b64encode(uuid.uuid4().bytes)).replace('=', '')
return f'client-{_id}'
@pytest.fixture(scope='session')
def db(app): # pylint: disable=redefined-outer-name, invalid-name
"""Return a session-wide initialised database.
Drops all existing tables - Meta follows Postgres FKs
"""
with app.app_context():
# Clear out any existing tables
metadata = MetaData(_db.engine)
metadata.reflect()
metadata.drop_all()
_db.drop_all()
sequence_sql = """SELECT sequence_name FROM information_schema.sequences
WHERE sequence_schema='public'
"""
sess = _db.session()
for seq in [name for (name,) in sess.execute(text(sequence_sql))]:
try:
sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
print('DROP SEQUENCE public.%s ' % seq)
except Exception as err: # pylint: disable=broad-except # noqa: B902
print(f'Error: {err}')
sess.commit()
# ##############################################
# There are 2 approaches, an empty database, or the same one that the app will use
# create the tables
# _db.create_all()
# or
# Use Alembic to load all of the DB revisions including supporting lookup data
# This is the path we'll use in legal_api!!
# even though this isn't referenced directly, it sets up the internal configs that upgrade needs
legal_api_dir = os.path.abspath('..').replace('queue_services', 'legal-api')
legal_api_dir = os.path.join(legal_api_dir, 'migrations')
Migrate(app, _db, directory=legal_api_dir)
upgrade()
return _db
@pytest.fixture(scope='function')
def session(app, db): # pylint: disable=redefined-outer-name, invalid-name
"""Return a function-scoped session."""
with app.app_context():
conn = db.engine.connect()
txn = conn.begin()
options = dict(bind=conn, binds={})
sess = db.create_scoped_session(options=options)
# establish a SAVEPOINT just before beginning the test
# (http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#using-savepoint)
sess.begin_nested()
@event.listens_for(sess(), 'after_transaction_end')
def restart_savepoint(sess2, trans): # pylint: disable=unused-variable
# Detecting whether this is indeed the nested transaction of the test
if trans.nested and not trans._parent.nested: # pylint: disable=protected-access
# Handle where test DOESN'T session.commit(),
sess2.expire_all()
sess.begin_nested()
db.session = sess
sql = text('select 1')
sess.execute(sql)
yield sess
# Cleanup
sess.remove()
# This instruction rollsback any commit that were executed in the tests.
txn.rollback()
conn.close()
@pytest.fixture(scope='session')
def stan_server(docker_services):
"""Create the nats / stan services that the integration tests will use."""
if os.getenv('TEST_NATS_DOCKER'):
docker_services.start('nats')
time.sleep(2)
# TODO get the wait part working, as opposed to sleeping for 2s
# public_port = docker_services.wait_for_service("nats", 4222)
# dsn = "{docker_services.docker_ip}:{public_port}".format(**locals())
# return dsn
@pytest.fixture(scope='function')
@pytest.mark.asyncio
async def stan(event_loop, client_id):
"""Create a stan connection for each function, to be used in the tests."""
nc = Nats()
sc = Stan()
cluster_name = 'test-cluster'
await nc.connect(io_loop=event_loop, name='entity.filing.worker')
await sc.connect(cluster_name, client_id, nats=nc)
yield sc
await sc.close()
await nc.close()
@pytest.fixture(scope='function')
@pytest.mark.asyncio
async def entity_stan(app, event_loop, client_id):
"""Create a stan connection for each function.
Uses environment variables for the cluster name.
"""
nc = Nats()
sc = Stan()
await nc.connect(io_loop=event_loop)
cluster_name = os.getenv('STAN_CLUSTER_NAME')
if not cluster_name:
raise ValueError('Missing env variable: STAN_CLUSTER_NAME')
await sc.connect(cluster_name, client_id, nats=nc)
yield sc
await sc.close()
await nc.close()
@pytest.fixture(scope='function')
def future(event_loop):
"""Return a future that is used for managing function tests."""
_future = asyncio.Future(loop=event_loop)
return _future
@pytest.fixture
def create_mock_coro(mocker, monkeypatch):
"""Return a mocked coroutine, and optionally patch-it in."""
def _create_mock_patch_coro(to_patch=None):
mock = mocker.Mock()
async def _coro(*args, **kwargs):
return mock(*args, **kwargs)
if to_patch: # <-- may not need/want to patch anything
monkeypatch.setattr(to_patch, _coro)
return mock, _coro
return _create_mock_patch_coro
|
1,265 |
source
|
#!/usr/bin/env python3
#
"""
一般椭圆方程的任意次有限元方法。
作者:西安交通大学数学与统计学院 杨迪
说明:FEALPy短课程第三次作业
版本:1.0
日期:31/07/2020
"""
import argparse
import numpy as np
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from fealpy.decorator import cartesian, barycentric
from fealpy.mesh import MeshFactory as MF
from fealpy.functionspace import ParametricLagrangeFiniteElementSpace
from fealpy.boundarycondition import DirichletBC
from fealpy.tools.show import showmultirate, show_error_table
class PDE:
"""
Equation:
-\\nabla\cdot(A(x)\\nabla u + b(x)u) + cu = f in \Omega
B.C.:
u = g_D on \partial\Omega
Exact Solution:
u = cos(pi*x)*cos(pi*y)
Coefficients:
A(x) = [10.0, -1.0; -1.0, 2.0]
b(x) = [-1; -1]
c(x) = 1 + x^2 + y^2
"""
def __init__(self):
pass
def domain(self):
return np.array([0, 1, 0, 1])
@cartesian
def solution(self, p):
"""
The exact solution
Parameters
---------
p :
Examples
-------
p = np.array([0, 1], dtype=np.float64)
p = np.array([[0, 1], [0.5, 0.5]], dtype=np.float64)
"""
x = p[..., 0]
y = p[..., 1]
pi = np.pi
val = np.cos(pi*x)*np.cos(pi*y)
return val # val.shape == x.shape
@cartesian
def METHOD_NAME(self, p):
"""
The right hand side of convection-diffusion-reaction equation
INPUT:
p: array object,
"""
x = p[..., 0]
y = p[..., 1]
pi = np.pi
val = 12*pi*pi*np.cos(pi*x)*np.cos(pi*y)
val += 2*pi*pi*np.sin(pi*x)*np.sin(pi*y)
val += np.cos(pi*x)*np.cos(pi*y)*(x**2 + y**2 + 1)
val -= pi*np.cos(pi*x)*np.sin(pi*y)
val -= pi*np.cos(pi*y)*np.sin(pi*x)
return val
@cartesian
def gradient(self, p):
"""
The gradient of the exact solution
"""
x = p[..., 0]
y = p[..., 1]
pi = np.pi
val = np.zeros(p.shape, dtype=np.float64)
val[..., 0] = -pi*np.sin(pi*x)*np.cos(pi*y)
val[..., 1] = -pi*np.cos(pi*x)*np.sin(pi*y)
return val # val.shape == p.shape
@cartesian
def diffusion_coefficient(self, p):
return np.array([[10.0, -1.0], [-1.0, 2.0]], dtype=np.float64)
@cartesian
def convection_coefficient(self, p):
return np.array([-1.0, -1.0], dtype=np.float64)
@cartesian
def reaction_coefficient(self, p):
x = p[..., 0]
y = p[..., 1]
return 1 + x**2 + y**2
@cartesian
def dirichlet(self, p):
return self.solution(p)
## 参数解析
parser = argparse.ArgumentParser(description=
"""
三角形, 四边形网格上求解一般椭圆问题的任意次有限元方法
""")
parser.add_argument('--degree',
default=1, type=int,
help='Lagrange 有限元空间的次数, 默认为 1 次.')
parser.add_argument('--mtype',
default='tri', type=str,
help='网格类型, 默认为 tri, 即三角形网格, 还可以选择 quad, 即四边形网格.')
parser.add_argument('--ns',
default=10, type=int,
help='初始网格 X 与 Y 方向剖分的段数, 默认 10 段.')
parser.add_argument('--maxit',
default=4, type=int,
help='默认网格加密求解的次数, 默认加密求解 4 次')
args = parser.parse_args()
degree = args.degree
ns = args.ns
maxit = args.maxit
mtype = args.mtype
pde = PDE()
domain = pde.domain()
mesh = MF.boxmesh2d(domain, nx=ns, ny=ns, meshtype=mtype, p=degree)
errorType = ['$|| u - u_h ||_0$', '$|| \\nabla u - \\nabla u_h||_0$']
errorMatrix = np.zeros((2, maxit), dtype=mesh.ftype)
NDof = np.zeros(maxit, dtype=mesh.itype)
for i in range(maxit):
print('Step:', i)
space = ParametricLagrangeFiniteElementSpace(mesh, p=degree)
NDof[i] = space.number_of_global_dofs()
uh = space.function() # 返回一个有限元函数,初始自由度值全为 0
A = space.stiff_matrix(c=pde.diffusion_coefficient)
B = space.convection_matrix(c=pde.convection_coefficient)
M = space.mass_matrix(c=pde.reaction_coefficient)
F = space.source_vector(pde.METHOD_NAME)
A += B
A += M
bc = DirichletBC(space, pde.dirichlet)
A, F = bc.apply(A, F, uh)
uh[:] = spsolve(A, F)
errorMatrix[0, i] = space.integralalg.error(pde.solution, uh.value)
errorMatrix[1, i] = space.integralalg.error(pde.gradient, uh.grad_value)
if i < maxit-1:
mesh.uniform_refine()
# 函数解图像
uh.add_plot(plt, cmap='rainbow')
# 收敛阶图像
showmultirate(plt, 0, NDof, errorMatrix, errorType,
propsize=40)
# 输出误差的 latex 表格
show_error_table(NDof, errorType, errorMatrix)
plt.show()
|
1,266 |
run job
|
from . import helpers
def METHOD_NAME(config):
config["general"]["relevant_filetypes"] = [
"log",
"mon",
"outdata",
"restart_out",
"bin",
"config",
"forcing",
"input",
"restart_in",
"ignore",
]
helpers.evaluate(config, "dataprocess", "data_recipe")
return config
def _assemble_dataprocess_tasks(config):
"""
Generates all tasks for data processing which will be written to the run file.
Parameters
----------
data_file
File handle to which information should be written.
Returns
-------
data_task_list : list
The list of post commands which will be executed. These are written
to the run file.
"""
datafile = config["general"]["post_file"]
data_task_list = []
# First find the correct subjob_cluster:
this_cluster = config["general"]["job_type"]
config["general"]["this_cluster"] = copy.deepcopy(
config["general"]["workflow"]["subjob_clusters"][this_cluster]
)
# gather the information on the clusters to be submitted by this job.
config["general"]["next_clusters"] = {}
for cluster in config["general"]["this_cluster"]["next_submit"]:
config["general"]["next_clusters"].update(
config["general"]["workflow"]["subjob_clusters"][cluster]
)
for subcluster in config["general"]["next_clusters"]:
data_task_list.append(tasks_of_one_cluster(config, subcluster))
return data_task_list
def add_environment(env, config):
env_dict = env(config) # that wont work and needs correction!
return export_string(env_dict)
def assemble_srun_command(scriptcall, config):
...
def add_scriptcall(scriptcall, cluster, config):
submit = cluster.get("submit_to_batch_system", False)
order = cluster.get("order_in_cluster", False)
if submit:
scriptcall = assemble_srun_command(script)
if order == "concurrent":
scriptcall = scriptcall + ";"
return scriptcall
def tasks_of_one_subjob(config, cluster, subjob):
task_list = []
subjob_config = config["general"]["worksflow"]["subjobs"][subjob]
env_preparation = subjob_config.get("env_preparation", False)
scriptdir = subjob_config.get("script_dir", False)
script = subjob_config.get(script, False)
if env_preparation:
env_preparation = assemble_filename(env_preparation, scriptdir, config)
task_list.append(add_environment(env_preparation, config))
if script:
script = assemble_filename(script, scriptdir, config)
task_list.append(add_scriptcall(script, cluster, config))
return task_list
def tasks_of_one_cluster(config, cluster):
task_list = []
clusterconfig = config["general"]["next_clusters"][cluster]
for subjob in clusterconfig["subjobs"]:
task_list.append(tasks_of_one_subjob(config, cluster, subjob))
return task_list
for component in config["general"]["valid_model_names"]:
post_file.write(40 * "+ " + "\n")
post_file.write("Generating post-processing tasks for: %s \n" % component)
post_task_list.append("\n#Postprocessing %s\n" % component)
post_task_list.append(
"cd " + config[component]["experiment_outdata_dir"] + "\n"
)
pconfig_tasks = config[component].get("postprocess_tasks", {})
pconfig_scripts = config[component].get("postprocessing_scripts", {})
post_file.write("Configuration for post processing: %s \n" % pconfig_tasks)
for script in pconfig_scripts:
postscript_name = pconfig_scripts.get("postprocessing_script_name", None)
postscript_dir = pconfig_scripts.get("postprocessing_dir", None)
envscript_name = pconfig_scripts.get("postprocessing_envscript_name", None)
postscript_name = assemble_filename(postscript_name, postscript_dir, config)
envscript_name = assemble_filename(envscript_name, postscript_dir, config)
if envscript_name:
environment_dict = envscript_name(config)
post_task_list += export_string(environment_dict)
if postscript_name:
post_task_list.append(postscript_name)
post_task_list.append("cd -\n")
config["general"]["post_task_list"] = post_task_list
return config
def assemble_filename(filename, dirname, config):
if filename.startswith("/"):
return filename
if filename.startswith(".") or dirname == "." or dirname == "./":
return os.path.join(["general"]["started_from"], filename)
if dirname:
return os.path.join(dirname, filename)
return os.path.join(["general"]["started_from"], filename)
def export_string(environment_dict):
export_string = []
for entry in environment_dict:
value = environment_dict[entry]
export_string.append([f"export {entry}={value}"])
return export_string
# ?????
# def write_simple_postscript(config):
# batch_system.write_simple_runscript(config)
# return config
|
1,267 |
test timeout overflow
|
# Test case for the select.devpoll() function
# Initial tests are copied as is from "test_poll.py"
import os
import random
import select
import unittest
from test.support import cpython_only
if not hasattr(select, 'devpoll') :
raise unittest.SkipTest('test works only on Solaris OS family')
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class DevPollTests(unittest.TestCase):
def test_devpoll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.devpoll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
self.fail("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
self.fail("no pipes ready for reading")
self.assertEqual([w2r[wr]], ready_readers)
rd = ready_readers[0]
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close(rd)
p.unregister(r2w[rd])
p.unregister(rd)
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def METHOD_NAME(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
pollster.poll(-1)
self.assertRaises(OverflowError, pollster.poll, -2)
self.assertRaises(OverflowError, pollster.poll, -1 << 31)
self.assertRaises(OverflowError, pollster.poll, -1 << 64)
pollster.poll(0)
pollster.poll(1)
pollster.poll(1 << 30)
self.assertRaises(OverflowError, pollster.poll, 1 << 31)
self.assertRaises(OverflowError, pollster.poll, 1 << 63)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
def test_close(self):
open_file = open(__file__, "rb")
self.addCleanup(open_file.close)
fd = open_file.fileno()
devpoll = select.devpoll()
# test fileno() method and closed attribute
self.assertIsInstance(devpoll.fileno(), int)
self.assertFalse(devpoll.closed)
# test close()
devpoll.close()
self.assertTrue(devpoll.closed)
self.assertRaises(ValueError, devpoll.fileno)
# close() can be called more than once
devpoll.close()
# operations must fail with ValueError("I/O operation on closed ...")
self.assertRaises(ValueError, devpoll.modify, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.poll)
self.assertRaises(ValueError, devpoll.register, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.unregister, fd)
def test_fd_non_inheritable(self):
devpoll = select.devpoll()
self.addCleanup(devpoll.close)
self.assertEqual(os.get_inheritable(devpoll.fileno()), False)
def test_events_mask_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(ValueError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(ValueError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_events_mask_overflow_c_limits(self):
from _testcapi import USHRT_MAX
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
if __name__ == '__main__':
unittest.main()
|
1,268 |
test update bc b month
|
# Copyright 2019 Akretion - Renato Lima <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
from datetime import datetime
from os import environ
from unittest import mock
from dateutil.relativedelta import relativedelta
from decorator import decorate
from odoo import fields
from odoo.tests import SavepointCase
_logger = logging.getLogger(__name__)
def _not_every_day_test(method, self, modulo=7, remaining=1):
if datetime.now().day % modulo == remaining or environ.get("CI_FORCE_IBPT"):
return method(self)
else:
return lambda: _logger.info(
"Skipping test today because datetime.now().day %% %s != %s"
% (modulo, remaining)
)
def not_every_day_test(method):
"""
Decorate test methods to query the Banco Do Brasil only
1 day out of 7 and skip tests otherwise.
Indeed the IBPT webservice often returns errors and it sucks
to crash the entire l10n-brazil test suite because of this.
the CI_FORCE_IBPT env var can be set to force the test anyhow.
"""
return decorate(method, _not_every_day_test)
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def ok(self):
return True
def json(self):
return self.json_data
# the same as rates during 2 days:
return MockResponse(
{
"value": [
{
"paridadeCompra": 1.0,
"paridadeVenda": 1.0,
"cotacaoCompra": 4.9786,
"cotacaoVenda": 4.9792,
"dataHoraCotacao": "2023-05-29 10:10:20.119",
"tipoBoletim": "Abertura",
},
{
"paridadeCompra": 1.0,
"paridadeVenda": 1.0,
"cotacaoCompra": 4.9948,
"cotacaoVenda": 4.9954,
"dataHoraCotacao": "2023-05-29 13:10:18.54",
"tipoBoletim": "Fechamento",
},
{
"paridadeCompra": 1.0,
"paridadeVenda": 1.0,
"cotacaoCompra": 5.0497,
"cotacaoVenda": 5.0503,
"dataHoraCotacao": "2023-05-30 10:09:35.311",
"tipoBoletim": "Abertura",
},
{
"paridadeCompra": 1.0,
"paridadeVenda": 1.0,
"cotacaoCompra": 5.0587,
"cotacaoVenda": 5.0593,
"dataHoraCotacao": "2023-05-30 13:11:51.392",
"tipoBoletim": "Fechamento",
},
]
},
200,
)
class TestCurrencyRateUpdateBCB(SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.Company = cls.env["res.company"]
cls.CurrencyRate = cls.env["res.currency.rate"]
cls.CurrencyRateProvider = cls.env["res.currency.rate.provider"]
cls.today = fields.Date.today()
cls.brl_currency = cls.env.ref("base.BRL")
cls.brl_currency.write({"active": True})
cls.eur_currency = cls.env.ref("base.EUR")
cls.usd_currency = cls.env.ref("base.USD")
cls.usd_currency.write({"active": True})
cls.company = cls.Company.create(
{"name": "Test company BRL", "currency_id": cls.brl_currency.id}
)
cls.env.user.company_ids += cls.company
cls.env.user.company_id = cls.company
cls.bcb_provider = cls.CurrencyRateProvider.create(
{
"service": "BCB",
"currency_ids": [
(4, cls.usd_currency.id),
(4, cls.eur_currency.id),
],
}
)
cls.CurrencyRate.search([]).unlink()
@mock.patch("requests.get", side_effect=mocked_requests_get)
def test_mock(self, mock_get):
self.bcb_provider._update(self.today - relativedelta(days=2), self.today)
rates = self.CurrencyRate.search(
[("currency_id", "=", self.usd_currency.id)], limit=1
)
self.assertTrue(rates)
self.CurrencyRate.search([("currency_id", "=", self.usd_currency.id)]).unlink()
@not_every_day_test
def test_get_supported_currencies(self):
currencies = self.bcb_provider._get_supported_currencies()
self.assertTrue(currencies)
@not_every_day_test
def test_update_BCB_today(self):
"""No checks are made since today may not be a banking day"""
self.bcb_provider._update(self.today, self.today)
self.CurrencyRate.search([("currency_id", "=", self.usd_currency.id)]).unlink()
@not_every_day_test
def METHOD_NAME(self):
self.bcb_provider._update(self.today - relativedelta(months=1), self.today)
rates = self.CurrencyRate.search(
[("currency_id", "=", self.usd_currency.id)], limit=1
)
self.assertTrue(rates)
self.CurrencyRate.search([("currency_id", "=", self.usd_currency.id)]).unlink()
@not_every_day_test
def test_update_BCB_year(self):
self.bcb_provider._update(self.today - relativedelta(years=1), self.today)
rates = self.CurrencyRate.search(
[("currency_id", "=", self.usd_currency.id)], limit=1
)
self.assertTrue(rates)
self.CurrencyRate.search([("currency_id", "=", self.usd_currency.id)]).unlink()
@not_every_day_test
def test_update_BCB_scheduled(self):
self.bcb_provider.interval_type = "days"
self.bcb_provider.interval_number = 14
self.bcb_provider.next_run = self.today - relativedelta(days=1)
self.bcb_provider._scheduled_update()
rates = self.CurrencyRate.search(
[("currency_id", "=", self.usd_currency.id)], limit=1
)
self.assertTrue(rates)
self.CurrencyRate.search([("currency_id", "=", self.usd_currency.id)]).unlink()
@not_every_day_test
def test_update_BCB_no_base_update(self):
self.bcb_provider.interval_type = "days"
self.bcb_provider.interval_number = 14
self.bcb_provider.next_run = self.today - relativedelta(days=1)
self.bcb_provider._scheduled_update()
rates = self.CurrencyRate.search(
[
("company_id", "=", self.company.id),
(
"currency_id",
"in",
[self.usd_currency.id, self.eur_currency.id],
),
],
limit=1,
)
self.assertTrue(rates)
self.CurrencyRate.search([("company_id", "=", self.company.id)]).unlink()
|
1,269 |
release
|
"""
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import contextmanager
import platform
import sys
import importlib
from pathlib import Path
from .base_evaluator import BaseEvaluator
from ..presenters import generate_csv_report
from ..metrics import MetricsExecutor
class ModuleEvaluator(BaseEvaluator):
def __init__(self, internal_module, config):
super().__init__()
self._internal_module = internal_module
self._config = config
@classmethod
def from_configs(cls, config, *args, **kwargs):
module = config['module']
module_config = config.get('module_config')
python_path = config.get('python_path')
kwargs['orig_config'] = config
return cls(load_module(module, python_path).from_configs(module_config, *args, **kwargs), config)
def process_dataset(self, *args, stored_predictions=None, progress_reporter=None, **kwargs):
self._internal_module.process_dataset(
*args, stored_predictions=stored_predictions, progress_reporter=progress_reporter, **kwargs
)
def compute_metrics(self, print_results=True, ignore_results_formatting=False, ignore_metric_reference=False):
return self._internal_module.compute_metrics(print_results, ignore_results_formatting, ignore_metric_reference)
def print_metrics_results(self, ignore_results_formatting=False, ignore_metric_reference=False):
self._internal_module.print_metrics(ignore_results_formatting, ignore_metric_reference)
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False,
ignore_metric_reference=False):
return self._internal_module.extract_metrics_results(print_results, ignore_results_formatting,
ignore_metric_reference)
def METHOD_NAME(self):
self._internal_module.METHOD_NAME()
del self._internal_module
def reset(self):
self._internal_module.reset()
def load_network(self, network=None):
self._internal_module.load_network(network)
def load_network_from_ir(self, models_dict):
self._internal_module.load_network_from_ir(models_dict)
def get_network(self):
return self._internal_module.get_network()
def get_metrics_attributes(self):
return self._internal_module.get_metrics_attributes()
def register_metric(self, metric_config):
self._internal_module.register_metric(metric_config)
def register_postprocessor(self, postprocessing_config):
self._internal_module.register_postprocessor(postprocessing_config)
def register_dumped_annotations(self):
self._internal_module.register_dumped_annotations()
def select_dataset(self, dataset_tag):
self._internal_module.select_dataset(dataset_tag)
def process_dataset_async(
self,
nreq=None,
subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
**kwargs
):
self._internal_module.process_dataset(
subset=subset,
num_images=num_images,
check_progress=check_progress,
dataset_tag=dataset_tag,
output_callback=output_callback,
allow_pairwise_subset=allow_pairwise_subset,
dump_prediction_to_annotation=dump_prediction_to_annotation,
**kwargs
)
@property
def dataset(self):
return self._internal_module.dataset
@staticmethod
def get_processing_info(config):
module = config['module']
python_path = config.get('python_path')
return load_module(module, python_path).get_processing_info(config)
def send_processing_info(self, sender):
if sender is None:
return {}
module_config = self._config['module_config']
launcher_config = module_config['launchers'][0]
framework = launcher_config['framework']
device = launcher_config.get('device', 'CPU')
details = {
'custom_evaluator': self._config['module'],
'platform': platform.system(),
'framework': framework if framework != 'dlsdk' else 'openvino',
'device': device.upper(),
'inference_mode': 'sync'
}
details.update(self._internal_module.send_processing_info(sender))
return details
def set_profiling_dir(self, profiler_dir):
self._internal_module.set_profiling_dir(profiler_dir)
@property
def dataset_size(self):
return self._internal_module.dataset_size
@classmethod
def provide_metric_references(cls, conf, return_header=True):
processing_info = cls.get_processing_info(conf)
dataset_config = conf['module_config']['datasets'][0]
metric_dispatcher = MetricsExecutor(dataset_config.get('metrics', []), postpone_metrics=True)
extracted_results, extracted_meta = [], []
for result_presenter, metric_result in metric_dispatcher.get_metric_result_template(
dataset_config.get('metrics', []), False):
result, metadata = result_presenter.extract_result(metric_result, names_from_refs=True)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
header, report = generate_csv_report(processing_info, extracted_results, 0, extracted_meta)
if not return_header:
return report
return header, report
def set_launcher_property(self, property_dict):
self._internal_module.set_launcher_property(property_dict)
def load_module(model_cls, python_path=None):
module_parts = model_cls.split(".")
model_cls = module_parts[-1]
module_as_path = '/'.join(module_parts[:-1]) + '.py'
relative_path = Path(__file__).parent / module_as_path
if not relative_path.exists():
model_path = ".".join(module_parts[:-1])
with append_to_path(python_path):
module_cls = importlib.import_module(model_path).__getattribute__(model_cls)
return module_cls
model_path = ".{}".format(".".join(module_parts[:-1]))
with append_to_path(python_path):
package = ".".join(__name__.split(".")[:-1])
module_cls = importlib.import_module(model_path, package=package).__getattribute__(model_cls)
return module_cls
@contextmanager
def append_to_path(path):
if path:
sys.path.append(path)
yield
if path:
sys.path.remove(path)
|
1,270 |
get framework config for customer
|
"""Framework Configuration utilities"""
import configparser
import time
import os
import random
import string
import datetime
import threading
from processor.helper.file.file_utils import exists_file, exists_dir
FRAMEWORKDIR = None
FRAMEWORKCONFIG = None
CURRENTDATA = None
DATABASE = 'MONGODB'
TESTS = 'TESTS'
DBTESTS = 'database'
DBNAME = 'dbname'
DBURL = 'dburl'
CFGFILE = 'config.ini'
NONE = 'NONE'
SNAPSHOT = 'SNAPSHOT'
FULL = 'FULL'
REMOTE = 'REMOTE'
SINGLETEST = 'singletest'
EXCLUSION = 'exclusion'
CUSTOMER = "customer"
DBVALUES = [NONE, SNAPSHOT, FULL, REMOTE]
CRAWL_AND_COMPLIANCE = "CRAWL_AND_COMPLIANCE"
CRAWL = "CRAWL"
COMPLIANCE = "COMPLIANCE"
RUN_TYPE = [CRAWL_AND_COMPLIANCE, CRAWL, COMPLIANCE]
CACHEDATA = None
def generateid(name):
pwdSize = 5
digits = False
chars = string.digits if digits else string.ascii_letters
numval = (random.choice(chars) for x in range(pwdSize))
pwdSize = 4
digits = True
chars1 = string.digits if digits else string.ascii_letters
charval = (random.choice(chars1) for x in range(pwdSize))
if name:
idval = '%s_%s_%s' % (name, ''.join(numval), ''.join(charval))
else:
idval = '%s_%s' % (''.join(numval), ''.join(charval))
return idval.lower()
def parseint(value, default=0):
intvalue = default
try:
intvalue = int(value)
except:
pass
return intvalue
def parsebool(val, defval=False):
"Parse boolean from the input value"
retval = defval
if val:
if isinstance(val, str) and val.lower() in ['false', 'true']:
retval = True if val.lower() == 'true' else False
else:
retval = bool(parseint(val))
return retval
def get_framework_currentdata_for_customer(space_id):
"""Return the framework currentdata file path for customer."""
global CURRENTDATA
if CURRENTDATA:
return CURRENTDATA
CURRENTDATA = '%s/config/%s/rundata_%d_%s' % (framework_dir(), space_id, int(time.time() * 1000000), generateid(None))
return CURRENTDATA
def get_cache_data():
global CACHEDATA
if CACHEDATA:
return CACHEDATA
else:
CACHEDATA = {}
return CACHEDATA
def set_cache_data(ctxdata):
CACHEDATA = ctxdata
def framework_currentdata():
"""Return the framework current data."""
space_id = os.getenv(str(threading.currentThread().ident) + "_SPACE_ID", None)
if space_id:
return get_framework_currentdata_for_customer(space_id)
else:
global CURRENTDATA
if CURRENTDATA:
return CURRENTDATA
timestamp_now = int(time.time())
dt_object = datetime.datetime.fromtimestamp(timestamp_now)
path_add = '/%s/%s/%s' %(dt_object.year, dt_object.month, dt_object.day)
full_path = "".join([framework_dir(), "/rundata", path_add])
if not os.path.exists(full_path):
os.makedirs(full_path)
CURRENTDATA = '%s/rundata_%d_%s' % (full_path, (timestamp_now * 100000), generateid(None))
return CURRENTDATA
def framework_config():
"""Return the framework config file."""
space_id = os.getenv(str(threading.currentThread().ident) + "_SPACE_ID", None)
if space_id:
return METHOD_NAME(space_id)
else:
global FRAMEWORKCONFIG
if FRAMEWORKCONFIG:
return FRAMEWORKCONFIG
FRAMEWORKCONFIG = '%s/%s' % (framework_dir(), CFGFILE)
return FRAMEWORKCONFIG
def METHOD_NAME(space_id):
"""Return the framework config file path for customer."""
FRAMEWORKCONFIG = '%s/config/%s/%s' % (framework_dir(), space_id, CFGFILE)
return FRAMEWORKCONFIG
def get_base_log_dir():
logdir = os.getenv('LOGDIR', None)
return logdir
def framework_dir():
"""Return top level framework directory"""
global FRAMEWORKDIR
if FRAMEWORKDIR:
return FRAMEWORKDIR
fwdir = os.getenv('FRAMEWORKDIR', None)
if fwdir and exists_dir(fwdir):
FRAMEWORKDIR = fwdir
else:
FRAMEWORKDIR = os.getcwd()
return FRAMEWORKDIR
def get_config_data(config_file):
"""Return config data from the config file."""
config_data = None
if exists_file(config_file):
config_data = configparser.ConfigParser(allow_no_value=True)
config_data.read(config_file)
return config_data
def config_value(section, key, configfile=None, default=None):
"""Get value for the key from the given config section"""
if not configfile:
configfile = framework_config()
config_data = get_config_data(configfile)
if config_data and section in config_data:
return config_data.get(section, key, fallback=default)
return default
def get_test_json_dir():
""" Path to check and run the tests from the test containers."""
fw_dir = framework_dir()
env_test_dir = config_value('TESTS', 'containerFolder')
test_path = '%s/%s' % (fw_dir, env_test_dir)
return test_path.replace('//', '/')
def container_exists(container):
""" Check if the container directory exists"""
container_dir = '%s/%s' % (get_test_json_dir(), container)
return True if exists_dir(container_dir) else False
|
1,271 |
hostname or filename
|
import jinja2
from dateutil import parser
from h.presenters.document_html import DocumentHTMLPresenter
class AnnotationHTMLPresenter:
"""Wraps Annotation model objects and adds some HTML properties."""
def __init__(self, annotation):
self.annotation = annotation
if self.annotation.document:
self.document = DocumentHTMLPresenter(self.annotation.document)
else:
self.document = None
@property
def uri(self):
return jinja2.escape(self.annotation.target_uri)
@property
def text_rendered(self):
"""
Get the body text of this annotation.
This return value of this field is marked safe because it is rendered
to HTML on write by :py:func:`h.util.markdown.render`, which must take
care of all necessary escaping.
"""
if self.annotation.text_rendered:
return jinja2.Markup(self.annotation.text_rendered)
return jinja2.Markup("")
@property
def quote(self):
"""Get the text in the document which this annotation refers to."""
selection = self._get_selection()
if selection:
return jinja2.escape(selection)
return ""
@property
def description(self):
"""
Get an HTML-formatted description of this annotation.
The description contains the target text that the user selected to
annotate, as a <blockquote>, and the body text of the annotation
itself.
"""
description = ""
selection = self._get_selection()
if selection:
selection = jinja2.escape(selection)
description += f"<blockquote>{selection}</blockquote>"
text = self.annotation.text
if text:
text = jinja2.escape(text)
description += f"{text}"
return description
@property
def created_day_string(self):
"""
Get a simple created day string for this annotation.
Returns a day string like '2015-03-11' from the annotation's 'created'
date.
"""
created_string = jinja2.escape(self.annotation.created)
return parser.parse(created_string).strftime("%Y-%m-%d")
@property
def document_link(self):
"""Return a link to this annotation's document."""
if self.document:
return self.document.link
return ""
@property
def filename(self):
"""Return the filename of this annotation's document."""
if self.document:
return self.document.filename
return ""
@property
def METHOD_NAME(self):
"""Return the hostname of this annotation's document."""
if self.document:
return self.document.METHOD_NAME
return ""
@property
def href(self):
"""Return an href for this annotation's document, or ''."""
if self.document:
return self.document.href
return ""
@property
def link_text(self):
"""Return some link text for this annotation's document."""
if self.document:
return self.document.link_text
return ""
@property
def title(self):
"""Return a title for this annotation."""
if self.document:
return self.document.title
return ""
# Explicitly forward some annotation properties for convenient access.
@property
def id(self):
return self.annotation.id
@property
def created(self):
return self.annotation.created
@property
def updated(self):
return self.annotation.updated
@property
def userid(self):
return self.annotation.userid
@property
def username(self):
return self.annotation.userid.split(":")[1].split("@")[0]
@property
def shared(self):
return self.annotation.shared
@property
def tags(self):
return self.annotation.tags
def _get_selection(self):
selectors = self.annotation.target_selectors
for selector in selectors:
if "exact" in selector:
return selector["exact"]
return None
|
1,272 |
url
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"nginx deployment certificate show",
)
class Show(AAZCommand):
"""Get the properties of a specific Nginx certificate.
:example: Certificate Get
az nginx deployment certificate show --certificate-name myCertificate --deployment-name myDeployment --resource-group myResourceGroup
"""
_aaz_info = {
"version": "2022-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/nginx.nginxplus/nginxdeployments/{}/certificates/{}", "2022-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.certificate_name = AAZStrArg(
options=["-n", "--name", "--certificate-name"],
help="The name of certificate",
required=True,
id_part="child_name_1",
)
_args_schema.deployment_name = AAZStrArg(
options=["--deployment-name"],
help="The name of targeted Nginx deployment",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.CertificatesGet(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class CertificatesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Nginx.NginxPlus/nginxDeployments/{deploymentName}/certificates/{certificateName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-08-01",
required=True,
),
}
return parameters
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"certificateName", self.ctx.args.certificate_name,
required=True,
),
**self.serialize_url_param(
"deploymentName", self.ctx.args.deployment_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType()
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.certificate_virtual_path = AAZStrType(
serialized_name="certificateVirtualPath",
)
properties.key_vault_secret_id = AAZStrType(
serialized_name="keyVaultSecretId",
)
properties.key_virtual_path = AAZStrType(
serialized_name="keyVirtualPath",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["Show"]
|
1,273 |
acquire
|
import inspect
from ..utils.common import get_current_user
from ..common.trex_api_annotators import client_api
from ..common.trex_client import TRexClient
from ..common.trex_ctx import TRexCtx
from ..common.trex_logger import ScreenLogger
class DummyConnection():
"""
A dummy connection for compatability only.
"""
DISCONNECTED = 1
CONNECTED = 2
MARK_FOR_DISCONNECT = 3
def __init__(self):
self.state = (self.DISCONNECTED, None)
def connect(self):
"""
Connect
"""
self.state = (self.CONNECTED, None)
def disconnect(self):
"""
Disconnect
"""
self.state = (self.DISCONNECTED, None)
def is_connected(self):
"""
Is Connected?
:returns:
bool: Is connected?
"""
return self.state[0] == self.CONNECTED
is_any_connected = is_connected
def is_marked_for_disconnect (self):
"""
Is marked for disconnect?
:returns:
bool: Is marked for disconnect?
"""
return self.state[0] == self.MARK_FOR_DISCONNECT
def get_disconnection_cause(self):
"""
Get disconnection cause.
:returns:
string: Disconnection cause.
"""
return self.state[1]
def mark_for_disconnect(self, cause):
"""
A multithread safe call
any thread can mark the current connection
as not valid
and will require the main thread to reconnect
"""
self.state = (self.MARKED_FOR_DISCONNECT, cause)
# as long as it is connected, it is alive.
is_alive = is_connected
def sigint_on_conn_lost_enable(self):
"""
when enabled, if connection
is lost a SIGINT will be sent
to the main thread.
Declared for compatibility only.
"""
pass
def sigint_on_conn_lost_disable(self):
"""
disable SIGINT dispatching
on case of connection lost.
Declared for compatibility on.y
"""
pass
class ConsoleDummyClient(TRexClient):
def __init__(self,
username = get_current_user(),
server = "localhost",
verbose_level = "error",
logger = None,
):
"""
TRex Dummy Client for Console purposes only.
We use this client to be able to load the console without having to start
a TRex instance. The capabilities of this client are very limited, by design.
:parameters:
username : string
the user name, for example bdollma
server : string
the server name or ip
verbose_level: str
one of "none", "critical", "error", "info", "debug"
logger: instance of AbstractLogger
if None, will use ScreenLogger
"""
api_ver = {'name': 'Dummy', 'major': 0, 'minor': 1}
# logger
logger = logger if logger is not None else ScreenLogger()
logger.set_verbose(verbose_level)
# first create a TRex context
self.ctx = TRexCtx(api_ver,
username,
server,
None,
None,
logger,
None,
None)
self.conn = DummyConnection()
self.ctx.server_version = self.probe_server()
self.ctx.system_info = self.get_system_info()
self.supported_cmds = [] # Server supported cmds, no server -> no supported commands.
self.ports = {} # No ports.
def get_mode(self):
"""
Returns running mode of TRex.
"""
return "Dummy"
def _register_events(self):
# overload register event so that classic port events aren't registered.
pass
def get_system_info(self):
"""
Get System Info returns some system information for the Console to show upon
introduction.
"""
return {
'hostname': 'N/A',
'uptime': 'N/A',
'dp_core_count': 'N/A',
'dp_core_count_per_port': 'N/A',
'core_type': 'N/A',
'is_multiqueue_mode': False,
'advanced_per_stream_stats': False,
'port_count': 'N/A',
'ports': []
}
def get_console_methods(self):
"""
Get Console Methods decides which methods are shown in the console help section.
The parent function decides that each function that has @console_api decorator
is shown.
Here we override that, since all those functions are not relevant in this mode.
"""
def predicate (x):
return False
return {cmd[1].name : cmd[1] for cmd in inspect.getmembers(self, predicate = predicate)}
#######################################################
# Overriding TRexClient Getters #
#######################################################
@client_api('getter', False)
def probe_server(self):
"""
Probe the server for the version / mode
Can be used to determine mode.
:parameters:
None
:return:
dictionary describing server version and configuration
:raises:
None
"""
return {'version': "v{}.{}".format(self.ctx.api_ver["major"], self.ctx.api_ver["minor"]),
'mode': 'Dummy',
"build_date": 'N/A',
"build_time": 'N/A',
"build_by": self.ctx.username}
#######################################################
# Overriding TRexClient Commands #
#######################################################
@client_api('command', False)
def connect(self):
"""
Connects to the TRex server
:parameters:
None
"""
# Nothing to do here.
self.conn.connect()
@client_api('command', False)
def disconnect (self, stop_traffic = True, release_ports = True):
"""
Disconnects from the server
:parameters:
stop_traffic : bool
Attempts to stop traffic before disconnecting.
release_ports : bool
Attempts to release all the acquired ports.
"""
# no ports, nothing to do here.
self.conn.disconnect()
@client_api('command', True)
def METHOD_NAME(self, ports = None, force = False, sync_streams = True):
"""
Acquires ports for executing commands
:parameters:
ports : list
Ports on which to execute the command
force : bool
Force acquire the ports.
sync_streams: bool
sync with the server about the configured streams
"""
# All ports are acquired by default.
pass
|
1,274 |
ftp
|
import os
import subprocess
import sys
import time
import pytest
import fsspec
from fsspec import open_files
from fsspec.implementations.METHOD_NAME import FTPFileSystem
ftplib = pytest.importorskip("ftplib")
here = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def METHOD_NAME():
pytest.importorskip("pyftpdlib")
P = subprocess.Popen(
[sys.executable, "-m", "pyftpdlib", "-d", here],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
try:
time.sleep(1)
yield "localhost", 2121
finally:
P.terminate()
P.wait()
def test_basic(METHOD_NAME):
host, port = METHOD_NAME
fs = FTPFileSystem(host, port)
assert fs.ls("/", detail=False) == sorted(os.listdir(here))
out = fs.cat("/" + os.path.basename(__file__))
assert out == open(__file__, "rb").read()
def test_not_cached(METHOD_NAME):
host, port = METHOD_NAME
fs = FTPFileSystem(host, port)
fs2 = FTPFileSystem(host, port)
assert fs is not fs2
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_complex(ftp_writable, cache_type):
from fsspec.core import BytesCache
host, port, user, pw = ftp_writable
files = open_files(
"ftp:///ou*",
host=host,
port=port,
username=user,
password=pw,
block_size=10000,
cache_type=cache_type,
)
assert len(files) == 1
with files[0] as fo:
assert fo.read(10) == b"hellohello"
if isinstance(fo.cache, BytesCache):
assert len(fo.cache.cache) == 10010
assert fo.read(2) == b"he"
assert fo.tell() == 12
def test_write_small(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
with fs.open("/out2", "wb") as f:
f.write(b"oi")
assert fs.cat("/out2") == b"oi"
def test_with_url(ftp_writable):
host, port, user, pw = ftp_writable
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "wb")
with fo as f:
f.write(b"hello")
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "rb")
with fo as f:
assert f.read() == b"hello"
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_write_big(ftp_writable, cache_type):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw, block_size=1000, cache_type=cache_type)
fn = "/bigger"
with fs.open(fn, "wb") as f:
f.write(b"o" * 500)
assert not fs.exists(fn)
f.write(b"o" * 1000)
fs.invalidate_cache()
assert fs.exists(fn)
f.write(b"o" * 200)
f.flush()
assert fs.info(fn)["size"] == 1700
assert fs.cat(fn) == b"o" * 1700
def test_transaction(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fn = "/tr"
with fs.transaction:
with fs.open(fn, "wb") as f:
f.write(b"not")
assert not fs.exists(fn)
assert fs.exists(fn)
assert fs.cat(fn) == b"not"
fs.rm(fn)
assert not fs.exists(fn)
def test_transaction_with_cache(ftp_writable, tmpdir):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fs.mkdir("/tmp/dir")
assert "dir" in fs.ls("/tmp", detail=False)
with fs.transaction:
fs.rmdir("/tmp/dir")
assert "dir" not in fs.ls("/tmp", detail=False)
assert not fs.exists("/tmp/dir")
def test_cat_get(ftp_writable, tmpdir):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw, block_size=500)
fs.mkdir("/tmp")
data = b"hello" * 500
fs.pipe("/tmp/myfile", data)
assert fs.cat_file("/tmp/myfile") == data
fn = os.path.join(tmpdir, "lfile")
fs.get_file("/tmp/myfile", fn)
assert open(fn, "rb").read() == data
def test_mkdir(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
with pytest.raises(ftplib.error_perm):
fs.mkdir("/tmp/not/exist", create_parents=False)
fs.mkdir("/tmp/not/exist")
assert fs.exists("/tmp/not/exist")
fs.makedirs("/tmp/not/exist", exist_ok=True)
with pytest.raises(FileExistsError):
fs.makedirs("/tmp/not/exist", exist_ok=False)
fs.makedirs("/tmp/not/exist/inner/inner")
assert fs.isdir("/tmp/not/exist/inner/inner")
def test_rm_get_recursive(ftp_writable, tmpdir):
tmpdir = str(tmpdir)
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp/topdir")
fs.mkdir("/tmp/topdir/underdir")
fs.touch("/tmp/topdir/afile")
fs.touch("/tmp/topdir/underdir/afile")
fs.get("/tmp/topdir", tmpdir, recursive=True)
with pytest.raises(ftplib.error_perm):
fs.rmdir("/tmp/topdir")
fs.rm("/tmp/topdir", recursive=True)
assert not fs.exists("/tmp/topdir")
|
1,275 |
setup
|
import logging
import random
from discord import Embed
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours, NEGATIVE_REPLIES, Tokens
log = logging.getLogger(__name__)
class ScaryMovie(commands.Cog):
"""Selects a random scary movie and embeds info into Discord chat."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="scarymovie", alias=["smovie"])
async def random_movie(self, ctx: commands.Context) -> None:
"""Randomly select a scary movie and display information about it."""
async with ctx.typing():
selection = await self.select_movie()
if not selection:
await ctx.send(embed=Embed(
title=random.choice(NEGATIVE_REPLIES),
description=":warning: Failed to select a movie from the API",
color=Colours.soft_red
))
return
movie_details = await self.format_metadata(selection)
await ctx.send(embed=movie_details)
async def select_movie(self) -> dict:
"""Selects a random movie and returns a JSON of movie details from TMDb."""
url = "https://api.themoviedb.org/3/discover/movie"
params = {
"api_key": Tokens.tmdb.get_secret_value(),
"with_genres": "27",
"vote_count.gte": "5",
"include_adult": "false"
}
headers = {
"Content-Type": "application/json;charset=utf-8"
}
# Get total page count of horror movies
async with self.bot.http_session.get(url=url, params=params, headers=headers) as response:
data = await response.json()
total_pages = data.get("total_pages")
# Get movie details from one random result on a random page
params["page"] = random.randint(1, min(total_pages, 500))
async with self.bot.http_session.get(url=url, params=params, headers=headers) as response:
data = await response.json()
if (results := data.get("results")) is None:
log.warning("Failed to select a movie - data returned from API has no 'results' key")
return {}
selection_id = random.choice(results).get("id")
if selection_id is None:
log.warning("Failed to select a movie - selected film didn't have an id")
return {}
# Get full details and credits
async with self.bot.http_session.get(
url=f"https://api.themoviedb.org/3/movie/{selection_id}",
params={"api_key": Tokens.tmdb.get_secret_value(), "append_to_response": "credits"}
) as selection:
return await selection.json()
@staticmethod
async def format_metadata(movie: dict) -> Embed:
"""Formats raw TMDb data to be embedded in Discord chat."""
# Build the relevant URLs.
movie_id = movie.get("id")
poster_path = movie.get("poster_path")
tmdb_url = f"https://www.themoviedb.org/movie/{movie_id}" if movie_id else None
poster = f"https://image.tmdb.org/t/p/original{poster_path}" if poster_path else None
# Get cast names
cast = []
for actor in movie.get("credits", {}).get("cast", [])[:3]:
cast.append(actor.get("name"))
# Get director name
director = movie.get("credits", {}).get("crew", [])
if director:
director = director[0].get("name")
# Determine the spookiness rating
rating = ""
rating_count = movie.get("vote_average", 0) / 2
for _ in range(int(rating_count)):
rating += ":skull:"
if (rating_count % 1) >= .5:
rating += ":bat:"
# Try to get year of release and runtime
year = movie.get("release_date", [])[:4]
runtime = movie.get("runtime")
runtime = f"{runtime} minutes" if runtime else None
# Not all these attributes will always be present
movie_attributes = {
"Directed by": director,
"Starring": ", ".join(cast),
"Running time": runtime,
"Release year": year,
"Spookiness rating": rating,
}
embed = Embed(
colour=0x01d277,
title=f"**{movie.get('title')}**",
url=tmdb_url,
description=movie.get("overview")
)
if poster:
embed.set_image(url=poster)
# Add the attributes that we actually have data for, but not the others.
for name, value in movie_attributes.items():
if value:
embed.add_field(name=name, value=value)
embed.set_footer(text="This product uses the TMDb API but is not endorsed or certified by TMDb.")
embed.set_thumbnail(url="https://i.imgur.com/LtFtC8H.png")
return embed
async def METHOD_NAME(bot: Bot) -> None:
"""Load the Scary Movie Cog."""
if not Tokens.tmdb:
log.warning("No TMDB Token. Not loading ScaryMovie Cog.")
return
await bot.add_cog(ScaryMovie(bot))
|
1,276 |
get params
|
#!/usr/bin/env python3
from cereal import car
from panda import Panda
from openpilot.selfdrive.car import get_safety_config
from openpilot.selfdrive.car.chrysler.values import CAR, RAM_HD, RAM_DT, RAM_CARS, ChryslerFlags
from openpilot.selfdrive.car.interfaces import CarInterfaceBase
class CarInterface(CarInterfaceBase):
@staticmethod
def METHOD_NAME(ret, candidate, fingerprint, car_fw, experimental_long, docs):
ret.carName = "chrysler"
ret.dashcamOnly = candidate in RAM_HD
# radar parsing needs some work, see https://github.com/commaai/openpilot/issues/26842
ret.radarUnavailable = True # DBC[candidate]['radar'] is None
ret.steerActuatorDelay = 0.1
ret.steerLimitTimer = 0.4
# safety config
ret.safetyConfigs = [get_safety_config(car.CarParams.SafetyModel.chrysler)]
if candidate in RAM_HD:
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_CHRYSLER_RAM_HD
elif candidate in RAM_DT:
ret.safetyConfigs[0].safetyParam |= Panda.FLAG_CHRYSLER_RAM_DT
ret.minSteerSpeed = 3.8 # m/s
CarInterfaceBase.configure_torque_tune(candidate, ret.lateralTuning)
if candidate not in RAM_CARS:
# Newer FW versions standard on the following platforms, or flashed by a dealer onto older platforms have a higher minimum steering speed.
new_eps_platform = candidate in (CAR.PACIFICA_2019_HYBRID, CAR.PACIFICA_2020, CAR.JEEP_CHEROKEE_2019)
new_eps_firmware = any(fw.ecu == 'eps' and fw.fwVersion[:4] >= b"6841" for fw in car_fw)
if new_eps_platform or new_eps_firmware:
ret.flags |= ChryslerFlags.HIGHER_MIN_STEERING_SPEED.value
# Chrysler
if candidate in (CAR.PACIFICA_2017_HYBRID, CAR.PACIFICA_2018, CAR.PACIFICA_2018_HYBRID, CAR.PACIFICA_2019_HYBRID, CAR.PACIFICA_2020):
ret.mass = 2242.
ret.wheelbase = 3.089
ret.steerRatio = 16.2 # Pacifica Hybrid 2017
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kpBP, ret.lateralTuning.pid.kiBP = [[9., 20.], [9., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15, 0.30], [0.03, 0.05]]
ret.lateralTuning.pid.kf = 0.00006
# Jeep
elif candidate in (CAR.JEEP_CHEROKEE, CAR.JEEP_CHEROKEE_2019):
ret.mass = 1778
ret.wheelbase = 2.71
ret.steerRatio = 16.7
ret.steerActuatorDelay = 0.2
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kpBP, ret.lateralTuning.pid.kiBP = [[9., 20.], [9., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15, 0.30], [0.03, 0.05]]
ret.lateralTuning.pid.kf = 0.00006
# Ram
elif candidate == CAR.RAM_1500:
ret.steerActuatorDelay = 0.2
ret.wheelbase = 3.88
ret.steerRatio = 16.3
ret.mass = 2493.
ret.minSteerSpeed = 14.5
# Older EPS FW allow steer to zero
if any(fw.ecu == 'eps' and fw.fwVersion[:4] <= b"6831" for fw in car_fw):
ret.minSteerSpeed = 0.
elif candidate == CAR.RAM_HD:
ret.steerActuatorDelay = 0.2
ret.wheelbase = 3.785
ret.steerRatio = 15.61
ret.mass = 3405.
ret.minSteerSpeed = 16
CarInterfaceBase.configure_torque_tune(candidate, ret.lateralTuning, 1.0, False)
else:
raise ValueError(f"Unsupported car: {candidate}")
if ret.flags & ChryslerFlags.HIGHER_MIN_STEERING_SPEED:
# TODO: allow these cars to steer down to 13 m/s if already engaged.
ret.minSteerSpeed = 17.5 # m/s 17 on the way up, 13 on the way down once engaged.
ret.centerToFront = ret.wheelbase * 0.44
ret.enableBsm = 720 in fingerprint[0]
return ret
def _update(self, c):
ret = self.CS.update(self.cp, self.cp_cam)
# events
events = self.create_common_events(ret, extra_gears=[car.CarState.GearShifter.low])
# Low speed steer alert hysteresis logic
if self.CP.minSteerSpeed > 0. and ret.vEgo < (self.CP.minSteerSpeed + 0.5):
self.low_speed_alert = True
elif ret.vEgo > (self.CP.minSteerSpeed + 1.):
self.low_speed_alert = False
if self.low_speed_alert:
events.add(car.CarEvent.EventName.belowSteerSpeed)
ret.events = events.to_msg()
return ret
def apply(self, c, now_nanos):
return self.CC.update(c, self.CS, now_nanos)
|
1,277 |
load graph
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a trained audio graph against a WAVE file and reports the results.
The model, labels and .wav file specified in the arguments will be loaded, and
then the predictions from running the model against the audio data will be
printed to the console. This is a useful script for sanity checking trained
models, and as an example of how to use an audio model from Python.
Here's an example of running it:
python tensorflow/examples/speech_commands/label_wav.py \
--graph=/tmp/my_frozen_graph.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--wav=/tmp/speech_dataset/left/a5d485dc_nohash_0.wav
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
# pylint: enable=unused-import
FLAGS = None
def METHOD_NAME(filename):
"""Unpersists graph from file as default graph."""
with tf.io.gfile.GFile(filename, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.io.gfile.GFile(filename)]
def run_graph(wav_data, labels, input_layer_name, output_layer_name,
num_top_predictions):
"""Runs the audio data through the graph and prints predictions."""
with tf.compat.v1.Session() as sess:
# Feed the audio data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
# Sort to show labels in order of confidence
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
return 0
def label_wav(wav, labels, graph, input_name, output_name, how_many_labels):
"""Loads the model and labels, and runs the inference to print predictions."""
if not wav or not tf.io.gfile.exists(wav):
tf.compat.v1.logging.fatal('Audio file does not exist %s', wav)
if not labels or not tf.io.gfile.exists(labels):
tf.compat.v1.logging.fatal('Labels file does not exist %s', labels)
if not graph or not tf.io.gfile.exists(graph):
tf.compat.v1.logging.fatal('Graph file does not exist %s', graph)
labels_list = load_labels(labels)
# load graph, which is stored in the default session
METHOD_NAME(graph)
with open(wav, 'rb') as wav_file:
wav_data = wav_file.read()
run_graph(wav_data, labels_list, input_name, output_name, how_many_labels)
def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wav', type=str, default='', help='Audio file to be identified.')
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--labels', type=str, default='', help='Path to file containing labels.')
parser.add_argument(
'--input_name',
type=str,
default='wav_data:0',
help='Name of WAVE data input node in model.')
parser.add_argument(
'--output_name',
type=str,
default='labels_softmax:0',
help='Name of node outputting a prediction in the model.')
parser.add_argument(
'--how_many_labels',
type=int,
default=3,
help='Number of results to show.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
1,278 |
test rb prev
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
import collections
from drgn import NULL
from drgn.helpers import ValidationError
from drgn.helpers.linux.rbtree import (
RB_EMPTY_NODE,
RB_EMPTY_ROOT,
rb_find,
rb_first,
rb_last,
rb_next,
rb_parent,
rb_prev,
rbtree_inorder_for_each,
rbtree_inorder_for_each_entry,
validate_rbtree,
validate_rbtree_inorder_for_each_entry,
)
from tests.linux_kernel import LinuxKernelTestCase, skip_unless_have_test_kmod
@skip_unless_have_test_kmod
class TestRbtree(LinuxKernelTestCase):
@classmethod
def setUpClass(cls):
cls.root = cls.prog["drgn_test_rb_root"].address_of_()
cls.entries = cls.prog["drgn_test_rb_entries"]
cls.num_entries = 4
cls.empty_root = cls.prog["drgn_test_empty_rb_root"].address_of_()
def node(self, n):
return self.entries[n].node.address_of_()
def entry(self, n):
return self.entries[n].address_of_()
def test_RB_EMPTY_ROOT(self):
self.assertTrue(RB_EMPTY_ROOT(self.empty_root))
self.assertFalse(RB_EMPTY_ROOT(self.root))
def test_RB_EMPTY_NODE(self):
self.assertTrue(
RB_EMPTY_NODE(self.prog["drgn_test_empty_rb_node"].address_of_())
)
self.assertFalse(RB_EMPTY_NODE(self.node(0)))
def test_rb_parent(self):
if self.root.rb_node.rb_left:
self.assertEqual(rb_parent(self.root.rb_node.rb_left), self.root.rb_node)
if self.root.rb_node.rb_right:
self.assertEqual(rb_parent(self.root.rb_node.rb_right), self.root.rb_node)
def test_rb_first(self):
self.assertEqual(rb_first(self.root), self.node(0))
def test_rb_last(self):
self.assertEqual(rb_last(self.root), self.node(self.num_entries - 1))
def test_rb_next(self):
for i in range(self.num_entries - 1):
self.assertEqual(rb_next(self.node(i)), self.node(i + 1))
self.assertEqual(
rb_next(self.node(self.num_entries - 1)),
NULL(self.prog, "struct rb_node *"),
)
def METHOD_NAME(self):
for i in range(1, self.num_entries):
self.assertEqual(rb_prev(self.node(i)), self.node(i - 1))
self.assertEqual(rb_prev(self.node(0)), NULL(self.prog, "struct rb_node *"))
def test_rbtree_inorder_for_each(self):
self.assertEqual(
list(rbtree_inorder_for_each(self.root)),
[self.node(i) for i in range(self.num_entries)],
)
def test_rbtree_inorder_for_each_entry(self):
self.assertEqual(
list(
rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry", self.root, "node"
)
),
[self.entry(i) for i in range(self.num_entries)],
)
def test_rb_find(self):
def cmp(key, obj):
value = obj.value.value_()
return key - value
for i in range(self.num_entries):
self.assertEqual(
rb_find("struct drgn_test_rb_entry", self.root, "node", i, cmp),
self.entry(i),
)
self.assertEqual(
rb_find(
"struct drgn_test_rb_entry", self.root, "node", self.num_entries, cmp
),
NULL(self.prog, "struct drgn_test_rb_entry *"),
)
@staticmethod
def cmp_entries(a, b):
return a.value.value_() - b.value.value_()
def test_validate_rbtree_success(self):
for root, allow_equal in (
(self.root, False),
(self.empty_root, False),
(self.prog["drgn_test_rbtree_with_equal"].address_of_(), True),
):
validate_rbtree(
"struct drgn_test_rb_entry", root, "node", self.cmp_entries, allow_equal
)
self.assertEqual(
list(
validate_rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry",
root,
"node",
self.cmp_entries,
allow_equal,
)
),
list(
rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry", root, "node"
)
),
)
def assert_validation_error(self, regex, name):
self.assertRaisesRegex(
ValidationError,
regex,
validate_rbtree,
"struct drgn_test_rb_entry",
self.prog[name].address_of_(),
"node",
self.cmp_entries,
False,
)
self.assertRaisesRegex(
ValidationError,
regex,
collections.deque,
validate_rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry",
self.prog[name].address_of_(),
"node",
self.cmp_entries,
False,
),
0,
)
def test_validate_rbtree_has_equal(self):
self.assert_validation_error("compares equal", "drgn_test_rbtree_with_equal")
def test_validate_rbtree_out_of_order(self):
self.assert_validation_error(
"compares (greater|less) than", "drgn_test_rbtree_out_of_order"
)
def test_validate_rbtree_null_root_parent(self):
self.assert_validation_error(
"root node .* has parent", "drgn_test_rbtree_with_bad_root_parent"
)
def test_validate_rbtree_red_root(self):
self.assert_validation_error(
"root node .* is red", "drgn_test_rbtree_with_red_root"
)
def test_validate_rbtree_inconsistent_parents(self):
self.assert_validation_error(
"rb_parent", "drgn_test_rbtree_with_inconsistent_parents"
)
def test_validate_rbtree_red_violation(self):
self.assert_validation_error(
"red node .* has red child", "drgn_test_rbtree_with_red_violation"
)
def test_validate_rbtree_black_violation(self):
self.assert_validation_error(
"unequal black heights", "drgn_test_rbtree_with_black_violation"
)
|
1,279 |
update current contact
|
#!/usr/bin/env python3
from asciimatics.widgets import Frame, ListBox, Layout, Divider, Text, \
Button, TextBox, Widget
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, NextScene, StopApplication
import sys
import sqlite3
class ContactModel(object):
def __init__(self):
# Create a database in RAM.
self._db = sqlite3.connect(':memory:')
self._db.row_factory = sqlite3.Row
# Create the basic contact table.
self._db.cursor().execute('''
CREATE TABLE contacts(
id INTEGER PRIMARY KEY,
name TEXT,
phone TEXT,
address TEXT,
email TEXT,
notes TEXT)
''')
self._db.commit()
# Current contact when editing.
self.current_id = None
def add(self, contact):
self._db.cursor().execute('''
INSERT INTO contacts(name, phone, address, email, notes)
VALUES(:name, :phone, :address, :email, :notes)''',
contact)
self._db.commit()
def get_summary(self):
return self._db.cursor().execute(
"SELECT name, id from contacts").fetchall()
def get_contact(self, contact_id):
return self._db.cursor().execute(
"SELECT * from contacts WHERE id=:id", {"id": contact_id}).fetchone()
def get_current_contact(self):
if self.current_id is None:
return {"name": "", "address": "", "phone": "", "email": "", "notes": ""}
else:
return self.get_contact(self.current_id)
def METHOD_NAME(self, details):
if self.current_id is None:
self.add(details)
else:
self._db.cursor().execute('''
UPDATE contacts SET name=:name, phone=:phone, address=:address,
email=:email, notes=:notes WHERE id=:id''',
details)
self._db.commit()
def delete_contact(self, contact_id):
self._db.cursor().execute('''
DELETE FROM contacts WHERE id=:id''', {"id": contact_id})
self._db.commit()
class ListView(Frame):
def __init__(self, screen, model):
super(ListView, self).__init__(screen,
screen.height * 2 // 3,
screen.width * 2 // 3,
on_load=self._reload_list,
hover_focus=True,
can_scroll=False,
title="Contact List")
# Save off the model that accesses the contacts database.
self._model = model
# Create the form for displaying the list of contacts.
self._list_view = ListBox(
Widget.FILL_FRAME,
model.get_summary(),
name="contacts",
add_scroll_bar=True,
on_change=self._on_pick,
on_select=self._edit)
self._edit_button = Button("Edit", self._edit)
self._delete_button = Button("Delete", self._delete)
layout = Layout([100], fill_frame=True)
self.add_layout(layout)
layout.add_widget(self._list_view)
layout.add_widget(Divider())
layout2 = Layout([1, 1, 1, 1])
self.add_layout(layout2)
layout2.add_widget(Button("Add", self._add), 0)
layout2.add_widget(self._edit_button, 1)
layout2.add_widget(self._delete_button, 2)
layout2.add_widget(Button("Quit", self._quit), 3)
self.fix()
self._on_pick()
def _on_pick(self):
self._edit_button.disabled = self._list_view.value is None
self._delete_button.disabled = self._list_view.value is None
def _reload_list(self, new_value=None):
self._list_view.options = self._model.get_summary()
self._list_view.value = new_value
def _add(self):
self._model.current_id = None
raise NextScene("Edit Contact")
def _edit(self):
self.save()
self._model.current_id = self.data["contacts"]
raise NextScene("Edit Contact")
def _delete(self):
self.save()
self._model.delete_contact(self.data["contacts"])
self._reload_list()
@staticmethod
def _quit():
raise StopApplication("User pressed quit")
class ContactView(Frame):
def __init__(self, screen, model):
super(ContactView, self).__init__(screen,
screen.height * 2 // 3,
screen.width * 2 // 3,
hover_focus=True,
can_scroll=False,
title="Contact Details",
reduce_cpu=True)
# Save off the model that accesses the contacts database.
self._model = model
# Create the form for displaying the list of contacts.
layout = Layout([100], fill_frame=True)
self.add_layout(layout)
layout.add_widget(Text("Name:", "name"))
layout.add_widget(Text("Address:", "address"))
layout.add_widget(Text("Phone number:", "phone"))
layout.add_widget(Text("Email address:", "email"))
layout.add_widget(TextBox(
Widget.FILL_FRAME, "Notes:", "notes", as_string=True, line_wrap=True))
layout2 = Layout([1, 1, 1, 1])
self.add_layout(layout2)
layout2.add_widget(Button("OK", self._ok), 0)
layout2.add_widget(Button("Cancel", self._cancel), 3)
self.fix()
def reset(self):
# Do standard reset to clear out form, then populate with new data.
super(ContactView, self).reset()
self.data = self._model.get_current_contact()
def _ok(self):
self.save()
self._model.METHOD_NAME(self.data)
raise NextScene("Main")
@staticmethod
def _cancel():
raise NextScene("Main")
def demo(screen, scene):
scenes = [
Scene([ListView(screen, contacts)], -1, name="Main"),
Scene([ContactView(screen, contacts)], -1, name="Edit Contact")
]
screen.play(scenes, stop_on_resize=True, start_scene=scene, allow_int=True)
contacts = ContactModel()
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=True, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
|
1,280 |
get queue
|
import boto3
import itertools
import json
from toolz import dicttoolz
from typing import Any, Iterable, Mapping, Optional
def redrive_queue(
queue_name: str,
to_queue_name: Optional[str] = None,
limit: Optional[int] = None,
dryrun: bool = False,
max_wait: int = 5,
messages_per_request: int = 10,
):
"""
Redrive messages from one queue to another. Default usage is to define
a "deadletter" queue, and pick its "alive" counterpart, and redrive
messages to that queue.
"""
def post_messages(to_queue, messages):
message_bodies = [
{"Id": str(n), "MessageBody": m.body} for n, m in enumerate(messages)
]
to_queue.send_messages(Entries=message_bodies)
# Delete after sending, not before
for message in messages:
message.delete()
return []
dead_queue = METHOD_NAME(queue_name)
if to_queue_name is not None:
alive_queue = METHOD_NAME(to_queue_name)
else:
source_queues = list(dead_queue.dead_letter_source_queues.all())
if len(source_queues) == 0:
raise Exception(
"No alive queue found for the deadletter queue, please check your configuration."
)
if len(source_queues) > 1:
raise Exception(
"Deadletter queue has more than one source, please specify the target queue name."
)
alive_queue = source_queues[0]
messages = get_messages(
dead_queue,
limit=limit,
max_wait=max_wait,
messages_per_request=messages_per_request,
)
count_messages = 0
approx_n_messages = dead_queue.attributes.get("ApproximateNumberOfMessages")
try:
count_messages = int(approx_n_messages)
except TypeError:
print("Couldn't get approximate number of messages, setting to 0")
# If there's no messages then there's no work to do. If it's a dryrun, we
# don't do anything either.
if count_messages == 0 or dryrun:
return count_messages
count = 0
message_group = []
for message in messages:
# Assume this works. Exception handling elsewhere.
message_group.append(message)
count += 1
if count % 10 == 0:
message_group = post_messages(alive_queue, message_group)
# Post the last few messages
if len(message_group) > 0:
message_group = post_messages(alive_queue, message_group)
# Return the number of messages that were re-driven.
return count
def METHOD_NAME(queue_name: str):
"""
Return a queue resource by name, e.g., alex-really-secret-queue
"""
sqs = boto3.resource("sqs")
queue = sqs.get_queue_by_name(QueueName=queue_name)
return queue
def get_queues(prefix: str = None, contains: str = None):
"""
Return a list of sqs queues which the user is allowed to see and filtered by the parameters provided
"""
sqs = boto3.resource("sqs")
queues = sqs.queues.all()
if prefix is not None:
queues = queues.filter(QueueNamePrefix=prefix)
if contains is not None:
for queue in queues:
if contains in queue.attributes.get("QueueArn").split(":")[-1]:
yield queue
else:
yield from queues
def publish_message(
queue, message: str, message_attributes: Optional[Mapping[str, Any]] = None
):
"""
Publish a message to a queue resource. Message should be a JSON object dumped as a
string.
"""
if message_attributes is None:
message_attributes = {}
queue.send_message(
QueueUrl=queue.url, MessageBody=message, MessageAttributes=message_attributes
)
def publish_messages(queue, messages):
"""
Publish messages to a queue resource.
"""
queue.send_messages(Entries=messages)
def _sqs_message_stream(queue, **kw):
while True:
messages = queue.receive_messages(**kw)
if len(messages) == 0:
return
for msg in messages:
yield msg
def get_messages(
queue,
limit: Optional[int] = None,
visibility_timeout: int = 60,
message_attributes: Optional[Iterable[str]] = None,
max_wait: int = 1,
messages_per_request: int = 1,
**kw,
):
"""
Get messages from SQS queue resource. Returns a lazy sequence of message objects.
:queue: queue URL
:param limit: the maximum number of messages to return from the queue (default to all)
:param visibility_timeout: A period of time in seconds during which Amazon SQS prevents other consumers
from receiving and processing the message
:param message_attributes: Select what attributes to include in the messages, default All
:param max_wait: Longest to wait in seconds before assuming queue is empty (default: 10)
:param messages_per_request:
:**kw: Any other arguments are passed to ``.receive_messages()`` boto3 call
:return: Iterator of sqs messages
"""
if message_attributes is None:
message_attributes = ["All"]
messages = _sqs_message_stream(
queue,
VisibilityTimeout=visibility_timeout,
MaxNumberOfMessages=messages_per_request,
WaitTimeSeconds=max_wait,
MessageAttributeNames=message_attributes,
**kw,
)
if limit is None:
return messages
if limit < 1:
raise Exception(f"Limit {limit} is not valid.")
return itertools.islice(messages, limit)
def capture_attributes(action: str, stac: dict):
"""Determine SNS message attributes"""
product = dicttoolz.get_in(["properties", "odc:product"], stac)
date_time = dicttoolz.get_in(["properties", "datetime"], stac)
maturity = dicttoolz.get_in(["properties", "dea:dataset_maturity"], stac)
if not product:
product = stac.get("collection")
return {
"action": {"DataType": "String", "StringValue": action},
"product": {"DataType": "String", "StringValue": product},
"datetime": {"DataType": "String", "StringValue": date_time},
**(
{"maturity": {"DataType": "String", "StringValue": maturity}}
if maturity
else {}
),
}
def publish_to_topic(arn: str, action: str, stac: dict):
"""
Publish 'added' or 'archived' action to the provided sns topic
"""
sns = boto3.client("sns")
attrs = capture_attributes(action, stac)
sns.publish(
TopicArn=arn,
Message=json.dumps(stac),
MessageAttributes=attrs,
)
|
1,281 |
prepare git
|
import salt.exceptions
import logging
import os
from tempfile import mkdtemp
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
log = logging.getLogger(__name__)
# valid prefixes taken from Docker-CE to be compatible
valid_git_prefixes = ['http://', 'https://', 'git://', 'github.com/', 'git@']
valid_url_prefixes = ['http://', 'https://']
valid_url_suffixes = ['.tar.gz', '.tar.xz', '.tar.bz2', '.tgz', '.tar']
def _isLocal(source):
return __salt__['file.directory_exists'](source)
def _isGit(source):
for prefix in valid_git_prefixes:
if source.startswith(prefix):
return True
return False
def _isTarball(source):
prefix_ok = False
for prefix in valid_url_prefixes:
if source.startswith(prefix):
prefix_ok = True
break
if not prefix_ok:
return False
for suffix in valid_url_suffixes:
if source.endswith(suffix):
return True
return False
def _prepareDestDir(dest):
'''
Check target directory does not exists
'''
if os.path.isdir(dest):
raise salt.exceptions.SaltException('Working directory "{0}" exists before sources are prepared'.format(dest))
def _prepareLocal(source, dest):
'''
Make link from `source` to `dest`
'''
log.debug('Source is local directory')
_prepareDestDir(dest)
__salt__['file.symlink'](source, dest)
return dest
def _prepareHTTP(source, dest):
'''
Download tarball and extract to the directory
'''
log.debug('Source is HTTP')
_prepareDestDir(dest)
filename = os.path.join(dest, source.split("/")[-1])
res = __salt__['state.single']('file.managed', filename, source=source, makedirs=True, skip_verify=True)
for s, r in list(res.items()):
if not r['result']:
raise salt.exceptions.SaltException(r['comment'])
res = __salt__['state.single']('archive.extracted', name=dest, source=filename, skip_verify=True, overwrite=True)
for s, r in list(res.items()):
if not r['result']:
raise salt.exceptions.SaltException(r['comment'])
return dest
def METHOD_NAME(source, dest, root):
_prepareDestDir(dest)
# checkout git into temporary directory in our build root
# this is needed if we are interested only in git subtree
tmpdir = __salt__['temp.dir'](parent=root)
rev = 'master'
subdir = None
url = None
# parse git uri - i.e. [email protected]/repo/#rev:sub
# compatible with docker as per https://docs.docker.com/engine/reference/commandline/build/#git-repositories
try:
url, fragment = source.split('#', 1)
try:
rev, subdir = fragment.split(':', 1)
except:
rev = fragment
except:
url = source
# omitted rev means default 'master' branch revision
if rev == '':
rev = 'master'
log.debug('GIT URL: {0}, Revision: {1}, subdir: {2}'.format(url, rev, subdir))
__salt__['git.init'](tmpdir)
__salt__['git.remote_set'](tmpdir, url)
__salt__['git.fetch'](tmpdir)
__salt__['git.checkout'](tmpdir, rev=rev)
if subdir:
if _isLocal(os.path.join(tmpdir, subdir)):
__salt__['file.symlink'](os.path.join(tmpdir, subdir), dest)
else:
raise salt.exceptions.SaltException('Directory is not present in checked out source: {}'.format(subdir))
else:
__salt__['file.symlink'](tmpdir, dest)
return dest
def prepare_source(source, root):
'''
Prepare source directory based on different source types.
source -- string with either local directory path, remote http(s) archive or git repository
root -- local directory where to store processed source files
For git repository following format is understood:
[http[s]://|git://][user@]hostname/repository[#revision[:subdirectory]]
'''
dest = os.path.join(root, 'source')
log.debug('Preparing build source for {0} to {1}'.format(source, dest))
if _isLocal(source):
return _prepareLocal(source, dest)
elif _isTarball(source):
return _prepareHTTP(source, dest)
elif _isGit(source):
return METHOD_NAME(source, dest, root)
else:
raise salt.exceptions.SaltException('Unknown source format "{0}"'.format(source))
|
1,282 |
html zip
|
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Tests for qutebrowser.utils.resources."""
import os.path
import zipfile
import pytest
from qutebrowser.utils import utils, resources
@pytest.mark.usefixtures('freezer')
class TestReadFile:
@pytest.fixture
def package_path(self, tmp_path):
return tmp_path / 'qutebrowser'
@pytest.fixture
def html_path(self, package_path):
path = package_path / 'html'
path.mkdir(parents=True)
for filename in ['test1.html', 'test2.html', 'README', 'unrelatedhtml']:
(path / filename).touch()
subdir = path / 'subdir'
subdir.mkdir()
(subdir / 'subdir-file.html').touch()
return path
@pytest.fixture
def METHOD_NAME(self, tmp_path, html_path):
if not hasattr(zipfile, 'Path'):
pytest.skip("Needs zipfile.Path")
zip_path = tmp_path / 'qutebrowser.zip'
with zipfile.ZipFile(zip_path, 'w') as zf:
for path in html_path.rglob('*'):
zf.write(path, path.relative_to(tmp_path))
assert sorted(zf.namelist()) == [
'qutebrowser/html/README',
'qutebrowser/html/subdir/',
'qutebrowser/html/subdir/subdir-file.html',
'qutebrowser/html/test1.html',
'qutebrowser/html/test2.html',
'qutebrowser/html/unrelatedhtml',
]
return zipfile.Path(zip_path) / 'qutebrowser'
@pytest.fixture(params=['pathlib', 'zipfile'])
def resource_root(self, request):
"""Resource files packaged either directly or via a zip."""
if request.param == 'pathlib':
request.getfixturevalue('html_path')
return request.getfixturevalue('package_path')
elif request.param == 'zipfile':
return request.getfixturevalue('html_zip')
raise utils.Unreachable(request.param)
def test_glob_resources(self, resource_root):
files = sorted(resources._glob(resource_root, 'html', '.html'))
assert files == ['html/test1.html', 'html/test2.html']
def test_glob_resources_subdir(self, resource_root):
files = sorted(resources._glob(resource_root, 'html/subdir', '.html'))
assert files == ['html/subdir/subdir-file.html']
def test_readfile(self):
"""Read a test file."""
content = resources.read_file(os.path.join('utils', 'testfile'))
assert content.splitlines()[0] == "Hello World!"
@pytest.mark.parametrize('filename', ['javascript/scroll.js',
'html/error.html'])
def test_read_cached_file(self, mocker, filename):
resources.preload()
m = mocker.patch('qutebrowser.utils.resources.importlib_resources.files')
resources.read_file(filename)
m.assert_not_called()
def test_readfile_binary(self):
"""Read a test file in binary mode."""
content = resources.read_file_binary(os.path.join('utils', 'testfile'))
assert content.splitlines()[0] == b"Hello World!"
@pytest.mark.parametrize('name', ['read_file', 'read_file_binary'])
@pytest.mark.parametrize('fake_exception', [KeyError, FileNotFoundError, None])
def test_not_found(self, name, fake_exception, monkeypatch):
"""Test behavior when a resources file wasn't found.
With fake_exception, we emulate the rather odd error handling of certain Python
versions: https://bugs.python.org/issue43063
"""
class BrokenFileFake:
def __init__(self, exc):
self.exc = exc
def read_bytes(self):
raise self.exc("File does not exist")
def read_text(self, encoding):
raise self.exc("File does not exist")
def __truediv__(self, _other):
return self
if fake_exception is not None:
monkeypatch.setattr(resources.importlib_resources, 'files',
lambda _pkg: BrokenFileFake(fake_exception))
meth = getattr(resources, name)
with pytest.raises(FileNotFoundError):
meth('doesnotexist')
|
1,283 |
render mc2
|
import abc
from _typeshed import Incomplete
from typing import Any, ClassVar
from typing_extensions import Self
from passlib.ifc import PasswordHash
from passlib.utils.binary import BASE64_CHARS, HASH64_CHARS, LOWER_HEX_CHARS, PADDED_BASE64_CHARS, UPPER_HEX_CHARS
H64_CHARS = HASH64_CHARS
B64_CHARS = BASE64_CHARS
PADDED_B64_CHARS = PADDED_BASE64_CHARS
UC_HEX_CHARS = UPPER_HEX_CHARS
LC_HEX_CHARS = LOWER_HEX_CHARS
def parse_mc2(hash, prefix, sep="$", handler: Incomplete | None = None): ...
def parse_mc3(
hash, prefix, sep="$", rounds_base: int = 10, default_rounds: Incomplete | None = None, handler: Incomplete | None = None
): ...
def METHOD_NAME(ident, salt, checksum, sep="$"): ...
def render_mc3(ident, rounds, salt, checksum, sep="$", rounds_base: int = 10): ...
class MinimalHandler(PasswordHash, metaclass=abc.ABCMeta):
@classmethod
def using(cls, relaxed: bool = False) -> type[Self]: ... # type: ignore[override]
class TruncateMixin(MinimalHandler, metaclass=abc.ABCMeta):
truncate_error: ClassVar[bool]
truncate_verify_reject: ClassVar[bool]
@classmethod
def using(cls, truncate_error: object = None, *, relaxed: bool = ...) -> type[Self]: ... # type: ignore[override]
class GenericHandler(MinimalHandler):
setting_kwds: ClassVar[tuple[str, ...]]
context_kwds: ClassVar[tuple[str, ...]]
ident: ClassVar[str | None]
checksum_size: ClassVar[int | None]
checksum_chars: ClassVar[str | None]
checksum: str | None
use_defaults: bool
def __init__(self, checksum: str | None = None, use_defaults: bool = False) -> None: ...
@classmethod
def identify(cls, hash: str | bytes) -> bool: ...
@classmethod
def from_string(cls, hash: str | bytes, **context: Any) -> Self: ...
def to_string(self) -> str: ...
@classmethod
def hash(cls, secret: str | bytes, **kwds: Any) -> str: ...
@classmethod
def verify(cls, secret: str | bytes, hash: str | bytes, **context: Any) -> bool: ...
@classmethod
def genconfig(cls, **kwds: Any) -> str: ...
@classmethod
def genhash(cls, secret: str | bytes, config: str, **context: Any) -> str: ...
@classmethod
def needs_update(cls, hash: str | bytes, secret: str | bytes | None = None, **kwds: Any) -> bool: ...
@classmethod
def parsehash(cls, hash: str | bytes, checksum: bool = True, sanitize: bool = False) -> dict[str, Any]: ...
@classmethod
def bitsize(cls, **kwds: Any) -> dict[str, Any]: ...
class StaticHandler(GenericHandler):
setting_kwds: ClassVar[tuple[str, ...]]
class HasEncodingContext(GenericHandler):
default_encoding: ClassVar[str]
encoding: str
def __init__(self, encoding: str | None = None, **kwds) -> None: ...
class HasUserContext(GenericHandler):
user: Incomplete | None
def __init__(self, user: Incomplete | None = None, **kwds) -> None: ...
@classmethod
def hash(cls, secret, user: Incomplete | None = None, **context): ...
@classmethod
def verify(cls, secret, hash, user: Incomplete | None = None, **context): ...
@classmethod
def genhash(cls, secret, config, user: Incomplete | None = None, **context): ...
class HasRawChecksum(GenericHandler): ...
class HasManyIdents(GenericHandler):
default_ident: ClassVar[str | None]
ident_values: ClassVar[tuple[str, ...] | None]
ident_aliases: ClassVar[dict[str, str] | None]
ident: str # type: ignore[misc]
@classmethod
def using(cls, default_ident: Incomplete | None = None, ident: Incomplete | None = None, **kwds): ... # type: ignore[override]
def __init__(self, ident: Incomplete | None = None, **kwds) -> None: ...
class HasSalt(GenericHandler):
min_salt_size: ClassVar[int]
max_salt_size: ClassVar[int | None]
salt_chars: ClassVar[str | None]
default_salt_size: ClassVar[int | None]
default_salt_chars: ClassVar[str | None]
salt: str | bytes | None
@classmethod
def using(cls, default_salt_size: int | None = None, salt_size: int | None = None, salt: str | bytes | None = None, **kwds): ... # type: ignore[override]
def __init__(self, salt: str | bytes | None = None, **kwds) -> None: ...
@classmethod
def bitsize(cls, salt_size: int | None = None, **kwds): ...
class HasRawSalt(HasSalt):
salt_chars: ClassVar[bytes] # type: ignore[assignment]
class HasRounds(GenericHandler):
min_rounds: ClassVar[int]
max_rounds: ClassVar[int | None]
rounds_cost: ClassVar[str]
using_rounds_kwds: ClassVar[tuple[str, ...]]
min_desired_rounds: ClassVar[int | None]
max_desired_rounds: ClassVar[int | None]
default_rounds: ClassVar[int | None]
vary_rounds: ClassVar[Incomplete | None]
rounds: int
@classmethod
def using( # type: ignore[override]
cls,
min_desired_rounds: Incomplete | None = None,
max_desired_rounds: Incomplete | None = None,
default_rounds: Incomplete | None = None,
vary_rounds: Incomplete | None = None,
min_rounds: Incomplete | None = None,
max_rounds: Incomplete | None = None,
rounds: Incomplete | None = None,
**kwds,
): ...
def __init__(self, rounds: Incomplete | None = None, **kwds) -> None: ...
@classmethod
def bitsize(cls, rounds: Incomplete | None = None, vary_rounds: float = 0.1, **kwds): ...
class ParallelismMixin(GenericHandler):
parallelism: int
@classmethod
def using(cls, parallelism: Incomplete | None = None, **kwds): ... # type: ignore[override]
def __init__(self, parallelism: Incomplete | None = None, **kwds) -> None: ...
class BackendMixin(PasswordHash, metaclass=abc.ABCMeta):
backends: ClassVar[tuple[str, ...] | None]
@classmethod
def get_backend(cls): ...
@classmethod
def has_backend(cls, name: str = "any") -> bool: ...
@classmethod
def set_backend(cls, name: str = "any", dryrun: bool = False): ...
class SubclassBackendMixin(BackendMixin, metaclass=abc.ABCMeta): ...
class HasManyBackends(BackendMixin, GenericHandler): ...
class PrefixWrapper:
name: Any
prefix: Any
orig_prefix: Any
__doc__: Any
def __init__(
self,
name,
wrapped,
prefix="",
orig_prefix="",
lazy: bool = False,
doc: Incomplete | None = None,
ident: Incomplete | None = None,
) -> None: ...
@property
def wrapped(self): ...
@property
def ident(self): ...
@property
def ident_values(self): ...
def __dir__(self): ...
def __getattr__(self, attr: str): ...
def __setattr__(self, attr: str, value) -> None: ...
def using(self, **kwds): ...
def needs_update(self, hash, **kwds): ...
def identify(self, hash): ...
def genconfig(self, **kwds): ...
def genhash(self, secret, config, **kwds): ...
def encrypt(self, secret, **kwds): ...
def hash(self, secret, **kwds): ...
def verify(self, secret, hash, **kwds): ...
|
1,284 |
payload image
|
import json
import cv2
import base64
import threading
import time
from datetime import datetime
from websocket_server import WebsocketServer
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host):
t = threading.Thread(target=self.run_server)
self.payload = {'image': ''}
self.left_payload = {'image': ''}
self.server = None
self.client = None
self.host = host
# Image variables
self.image_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
self.left_image_to_be_shown = None
self.left_image_to_be_shown_updated = False
self.left_image_show_lock = threading.Lock()
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
# Take the console object to set the same websocket and client
t.start()
# Explicit initialization function
# Class method, so user can call it without instantiation
@classmethod
def initGUI(cls, host):
# self.payload = {'image': '', 'shape': []}
new_instance = cls(host)
return new_instance
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def METHOD_NAME(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
image_to_be_shown = self.image_to_be_shown
self.image_show_lock.release()
image = image_to_be_shown
payload = {'image': '', 'shape': ''}
if not image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payload
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadLeftImage(self):
self.left_image_show_lock.acquire()
left_image_to_be_shown_updated = self.left_image_to_be_shown_updated
left_image_to_be_shown = self.left_image_to_be_shown
self.left_image_show_lock.release()
image = left_image_to_be_shown
payload = {'image': '', 'shape': ''}
if not left_image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.left_image_show_lock.acquire()
self.left_image_to_be_shown_updated = False
self.left_image_show_lock.release()
return payload
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function for student to call
def showLeftImage(self, image):
self.left_image_show_lock.acquire()
self.left_image_to_be_shown = image
self.left_image_to_be_shown_updated = True
self.left_image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.METHOD_NAME()
self.payload["image"] = json.dumps(payload)
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Payload Left Image Message
left_payload = self.payloadLeftImage()
self.left_payload["image"] = json.dumps(left_payload)
message = "#gul" + json.dumps(self.left_payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if message[:4] == "#ack":
self.set_acknowledge(True)
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
logged = False
while not logged:
try:
f = open("/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def reset_gui(self):
pass
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.run)
self.measure_thread.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def measure_thread(self):
while self.gui.client is None:
pass
previous_time = datetime.now()
while True:
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def run(self):
while self.gui.client is None:
pass
while True:
start_time = datetime.now()
self.gui.update_gui()
acknowledge_message = self.gui.get_acknowledge()
while not acknowledge_message:
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if ms < self.ideal_cycle:
time.sleep((self.ideal_cycle-ms) / 1000.0)
|
1,285 |
te matmul
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Default legalization function for linear algebra operators."""
from tvm import topi, tir, relax, te
from ...block_builder import BlockBuilder
from ...expr import Call, Expr, Var, Tuple, TupleGetItem
from .common import register_legalize
@register_legalize("relax.matmul")
def _matmul(bb: BlockBuilder, call: Call) -> Expr:
def METHOD_NAME(a: te.Tensor, b: te.Tensor) -> te.Tensor:
a_shape = list(a.shape)
b_shape = list(b.shape)
a_prepended = False
b_appended = False
if len(a_shape) == 1:
a_prepended = True
a_shape.insert(0, 1)
if len(b_shape) == 1:
b_appended = True
b_shape.append(1)
is_a_larger = len(a_shape) > len(b_shape)
offset = len(a_shape) - len(b_shape) if is_a_larger else len(b_shape) - len(a_shape)
a_relax = relax.Var("a", relax.TensorStructInfo(a.shape))
b_relax = relax.Var("b", relax.TensorStructInfo(b.shape))
f_infer_sinfo = call.op.get_attr("FInferStructInfo")
output_shape = f_infer_sinfo(relax.op.matmul(a_relax, b_relax), bb).shape
def matmul_compute(*idx_spatial):
k = te.reduce_axis((0, a_shape[-1]), name="k")
def multiply_compute(idx_reduce):
a_indices = []
b_indices = []
for i in range(offset):
if is_a_larger:
a_indices.append(idx_spatial[i])
else:
b_indices.append(idx_spatial[i])
for i in range(offset, len(output_shape) - (2 - a_prepended - b_appended)):
a_dim = a_shape[i if is_a_larger else i - offset]
b_dim = b_shape[i if not is_a_larger else i - offset]
dim_equal = a_dim == b_dim
if not isinstance(dim_equal, tir.IntImm) or dim_equal == 0:
a_dim_is_one = isinstance(a_dim, tir.IntImm) and a_dim == 1
b_dim_is_one = isinstance(b_dim, tir.IntImm) and b_dim == 1
a_indices.append(0 if a_dim_is_one else idx_spatial[i])
b_indices.append(0 if b_dim_is_one else idx_spatial[i])
else:
a_indices.append(idx_spatial[i])
b_indices.append(idx_spatial[i])
if not a_prepended:
a_indices.append(idx_spatial[-2 + b_appended])
a_indices.append(idx_reduce)
b_indices.append(idx_reduce)
if not b_appended:
b_indices.append(idx_spatial[-1])
dtype = call.attrs.out_dtype
if dtype != "":
return a(*a_indices).astype(dtype) * b(*b_indices).astype(dtype)
return a(*a_indices) * b(*b_indices)
return te.sum(multiply_compute(k), axis=k)
return te.compute(
output_shape,
lambda *idx: matmul_compute(*idx), # pylint: disable=unnecessary-lambda
name="matmul",
)
return bb.call_te(METHOD_NAME, call.args[0], call.args[1], primfunc_name_hint="matmul")
@register_legalize("relax.einsum")
def _einsum(bb: BlockBuilder, call: Call) -> Expr:
t = call.args[0]
n_field = len(t.struct_info.fields)
while isinstance(t, Var):
binding = bb.lookup_binding(t)
if not isinstance(binding, (Tuple, Var)):
break
t = binding
assert isinstance(t, (Tuple, Var))
fields = (
t.fields if isinstance(t, Tuple) else [bb.emit(TupleGetItem(t, i)) for i in range(n_field)]
)
return bb.call_te(topi.einsum, call.attrs.subscripts, *fields)
|
1,286 |
chebyshevu
|
"""Efficient functions for generating orthogonal polynomials."""
from ..core import Dummy
from ..domains import QQ, ZZ
from .constructor import construct_domain
from .polytools import Poly, PurePoly
def _jacobi(n, a, b, K):
"""Low-level implementation of Jacobi polynomials."""
ring = K.poly_ring('_0')
x = ring._0
j0 = ring.one
if n < 1:
return j0
j1 = ((a + b + K(2))*x + (a - b))/K(2)
for i in range(2, n + 1):
den = K(i)*(a + b + i)*(a + b + K(2)*i - K(2))
f0 = (a + b + K(2)*i - K.one) * (a*a - b*b) / (K(2)*den)
f1 = (a + b + K(2)*i - K.one) * (a + b + K(2)*i - K(2)) * (a + b + K(2)*i) / (K(2)*den)
f2 = (a + i - K.one)*(b + i - K.one)*(a + b + K(2)*i) / den
j0, j1 = j1, j1*f0 + j1*x*f1 - j0*f2
return j1
def jacobi_poly(n, a, b, x=None, **args):
"""Generates Jacobi polynomial of degree `n` in `x`."""
if n < 0:
raise ValueError(f"can't generate Jacobi polynomial of degree {n}")
K, v = construct_domain([a, b], field=True)
poly = _jacobi(int(n), v[0], v[1], K)
if x is not None:
poly = Poly(poly, x, domain=K)
else:
poly = PurePoly(poly, Dummy('x'), domain=K)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _gegenbauer(n, a, K):
"""Low-level implementation of Gegenbauer polynomials."""
ring = K.poly_ring('_0')
x = ring._0
g0 = ring.one
if n < 1:
return g0
g1 = K(2)*a*x
for i in range(2, n + 1):
f1 = K(2) * (i + a - K.one) / i
f2 = (i + K(2)*a - K(2)) / i
g0, g1 = g1, g1*x*f1 - g0*f2
return g1
def gegenbauer_poly(n, a, x=None, **args):
"""Generates Gegenbauer polynomial of degree `n` in `x`."""
if n < 0:
raise ValueError(
f"can't generate Gegenbauer polynomial of degree {n}")
K, a = construct_domain(a, field=True)
poly = _gegenbauer(int(n), a, K)
if x is not None:
poly = Poly(poly, x, domain=K)
else:
poly = PurePoly(poly, Dummy('x'), domain=K)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _chebyshevt(n, K):
"""Low-level implementation of Chebyshev polynomials of the 1st kind."""
ring = K.poly_ring('_0')
x = ring._0
c0 = ring.one
if n < 1:
return c0
c1 = x
for _ in range(2, n + 1):
a = c1*x*K(2)
c0, c1 = c1, a - c0
return c1
def chebyshevt_poly(n, x=None, **args):
"""Generates Chebyshev polynomial of the first kind of degree `n` in `x`."""
if n < 0:
raise ValueError(
f"can't generate 1st kind Chebyshev polynomial of degree {n}")
poly = _chebyshevt(int(n), ZZ)
if x is not None:
poly = Poly(poly, x, domain=ZZ)
else:
poly = PurePoly(poly, Dummy('x'), domain=ZZ)
if not args.get('polys', False):
return poly.as_expr()
return poly
def METHOD_NAME(n, K):
"""Low-level implementation of Chebyshev polynomials of the 2nd kind."""
ring = K.poly_ring('_0')
x = ring._0
seq = [ring.one, K(2)*x]
for _ in range(2, n + 1):
a = seq[-1]*x*K(2)
seq.append(a - seq[-2])
return seq[n]
def chebyshevu_poly(n, x=None, **args):
"""Generates Chebyshev polynomial of the second kind of degree `n` in `x`."""
if n < 0:
raise ValueError(
f"can't generate 2nd kind Chebyshev polynomial of degree {n}")
poly = METHOD_NAME(int(n), ZZ)
if x is not None:
poly = Poly(poly, x, domain=ZZ)
else:
poly = PurePoly(poly, Dummy('x'), domain=ZZ)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _hermite(n, K):
"""Low-level implementation of Hermite polynomials."""
ring = K.poly_ring('_0')
x = ring._0
h0 = ring.one
if n < 1:
return h0
h1 = K(2)*x
for i in range(2, n + 1):
a = h1*x
b = h0*K(i - 1)
h0, h1 = h1, (a - b)*K(2)
return h1
def hermite_poly(n, x=None, **args):
"""Generates Hermite polynomial of degree `n` in `x`."""
if n < 0:
raise ValueError(f"can't generate Hermite polynomial of degree {n}")
poly = _hermite(int(n), ZZ)
if x is not None:
poly = Poly(poly, x, domain=ZZ)
else:
poly = PurePoly(poly, Dummy('x'), domain=ZZ)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _legendre(n, K):
"""Low-level implementation of Legendre polynomials."""
ring = K.poly_ring('_0')
x = ring._0
l0 = ring.one
if n < 1:
return l0
l1 = x
for i in range(2, n + 1):
l0, l1 = l1, l1*x*K(2*i - 1, i) - l0*K(i - 1, i)
return l1
def legendre_poly(n, x=None, **args):
"""Generates Legendre polynomial of degree `n` in `x`."""
if n < 0:
raise ValueError(f"can't generate Legendre polynomial of degree {n}")
poly = _legendre(int(n), QQ)
if x is not None:
poly = Poly(poly, x, domain=QQ)
else:
poly = PurePoly(poly, Dummy('x'), domain=QQ)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _laguerre(n, alpha, K):
"""Low-level implementation of Laguerre polynomials."""
ring = K.poly_ring('_0')
x = ring._0
l0, l1 = ring.zero, ring.one
for i in range(1, n + 1):
l0, l1 = l1, l1*(-K.one/i*x + alpha/i + K(2*i - 1)/i) - l0*(alpha/i + K(i - 1)/i)
return l1
def laguerre_poly(n, x=None, alpha=None, **args):
"""Generates Laguerre polynomial of degree `n` in `x`."""
if n < 0:
raise ValueError(f"can't generate Laguerre polynomial of degree {n}")
if alpha is not None:
K, alpha = construct_domain(
alpha, field=True) # XXX: ground_field=True
else:
K, alpha = QQ, QQ(0)
poly = _laguerre(int(n), alpha, K)
if x is not None:
poly = Poly(poly, x, domain=K)
else:
poly = PurePoly(poly, Dummy('x'), domain=K)
if not args.get('polys', False):
return poly.as_expr()
return poly
def _spherical_bessel_fn(n, K):
"""Low-level implementation of fn(n, x)."""
ring = K.poly_ring('_0')
x = ring._0
s0 = ring.one
if n < 1:
return s0*x
s1 = x
for i in range(2, n + 1):
s0, s1 = s1, s1*x*K(2*i - 1) - s0
return s1*x
def _spherical_bessel_fn_minus(n, K):
"""Low-level implementation of fn(-n, x)."""
ring = K.poly_ring('_0')
x = ring._0
s0, s1 = x, ring.zero
for i in range(2, n + 1):
s0, s1 = s1, s1*x*K(3 - 2*i) - s0
return s1
def spherical_bessel_fn(n, x=None, **args):
"""
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Examples
========
>>> spherical_bessel_fn(1, z)
z**(-2)
>>> spherical_bessel_fn(2, z)
-1/z + 3/z**3
>>> spherical_bessel_fn(3, z)
-6/z**2 + 15/z**4
>>> spherical_bessel_fn(4, z)
1/z - 45/z**3 + 105/z**5
"""
if n < 0:
poly = _spherical_bessel_fn_minus(-int(n), ZZ)
else:
poly = _spherical_bessel_fn(int(n), ZZ)
if x is not None:
poly = Poly(poly, 1/x, domain=ZZ)
else:
poly = PurePoly(poly, 1/Dummy('x'), domain=ZZ)
if not args.get('polys', False):
return poly.as_expr()
return poly
|
1,287 |
check tensor parallel rank
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
import pytest
import torch
from colossalai import launch
from colossalai.context import reset_seeds
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as tp_env
from colossalai.testing import free_port, rerun_if_address_is_in_use, spawn
CONFIG_PATH_LIST = list(Path(__file__).parent.glob('configs/*.py'))
def check_data_parallel_rank(rank):
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
mp_size = gpc.get_world_size(ParallelMode.MODEL)
num_dp_groups = global_world_size // mp_size
dp_local_rank = gpc.get_local_rank(ParallelMode.DATA)
assert gpc.get_world_size(ParallelMode.DATA) == num_dp_groups
for group_idx in range(num_dp_groups):
ranks_in_dp_group = range(group_idx * mp_size, (group_idx + 1) * mp_size)
if rank in ranks_in_dp_group:
assert dp_local_rank == group_idx
def check_pipeline_parallel_rank(rank):
mp_world_size = gpc.get_world_size(ParallelMode.MODEL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_pipeline_stage = mp_world_size // tp_world_size
pipeline_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
for stage_idx in range(num_pipeline_stage):
ranks_in_current_stage = range(stage_idx * tp_world_size, (stage_idx + 1) * tp_world_size)
if rank in ranks_in_current_stage:
assert stage_idx == pipeline_local_rank
def check_model_parallel_rank(rank):
mp_size = gpc.get_world_size(ParallelMode.MODEL)
rank_within_mp_group = rank % mp_size
mp_local_rank = gpc.get_local_rank(ParallelMode.MODEL)
assert rank_within_mp_group == mp_local_rank
def METHOD_NAME(rank):
if tp_env.mode == '2d':
check_2d_tensor_parallel_rank(rank)
elif tp_env == '2.5d':
check_2p5d_tensor_parallel_rank(rank)
elif tp_env == '3d':
check_3d_tensor_parallel_rank(rank)
def get_tp_info():
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_tp_groups = global_world_size // tp_world_size
tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)
return tp_local_rank, tp_world_size, num_tp_groups
def check_2d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
col_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
row_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
assert col_local_rank == tp_local_rank // tp_env.summa_dim
assert row_local_rank == tp_local_rank % tp_env.summa_dim
def check_2p5d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)
assert rp_rank == tp_local_rank % tp_env.summa_dim
assert cp_rank == tp_local_rank // tp_env.tesseract_dim
assert dp_rank == tp_local_rank // (tp_env.summa_dim**2)
assert xp_rank == tp_local_rank // tp_env.summa_dim
def check_3d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)
wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)
op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)
assert ip_rank == tp_local_rank % tp_env.depth_3d
assert wp_rank == tp_local_rank // tp_env.depth_3d
assert op_rank == tp_local_rank // (tp_env.depth_3d**2)
def init_context(config_path, rank, world_size, backend, port, host):
dist_args = dict(config=config_path,
rank=rank,
world_size=world_size,
backend=backend,
port=port,
host=host,
verbose=True)
launch(**dist_args)
METHOD_NAME(rank)
check_data_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
check_model_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
def run_dist(rank, world_size, port, backend, port_list, host):
for config_path, current_port in zip(CONFIG_PATH_LIST, port_list):
init_context(config_path=config_path,
rank=rank,
world_size=world_size,
backend=backend,
port=current_port,
host=host)
reset_seeds()
@rerun_if_address_is_in_use()
def test_context():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
port_list = []
for _ in range(len(CONFIG_PATH_LIST)):
while True:
port = free_port()
if port not in port_list:
port_list.append(port)
break
spawn(run_dist, world_size, backend='gloo', port_list=port_list, host='localhost')
if __name__ == '__main__':
test_context()
|
1,288 |
test flow with header
|
from unittest import mock
import pytest
import requests
from settings import TEST_DATA
from suite.utils.resources_utils import ensure_response_from_backend, wait_before_test
from suite.utils.vs_vsr_resources_utils import patch_virtual_server_from_yaml
resp_1 = mock.Mock()
resp_2 = mock.Mock()
resp_3 = mock.Mock()
def execute_assertions(resp_1, resp_2, resp_3):
assert resp_1.status_code == 200
assert "Server name: backend1-" in resp_1.text
assert resp_2.status_code == 200
assert "Server name: backend3-" in resp_2.text
assert resp_3.status_code == 200
assert "Server name: backend4-" in resp_3.text
def ensure_responses_from_backends(req_url, host) -> None:
ensure_response_from_backend(req_url, host, {"x-version": "future"})
ensure_response_from_backend(req_url, host, {"x-version": "deprecated"})
ensure_response_from_backend(req_url, host, {"x-version-invalid": "deprecated"})
@pytest.mark.vs
@pytest.mark.smoke
@pytest.mark.parametrize(
"crd_ingress_controller, virtual_server_setup",
[
(
{"type": "complete", "extra_args": [f"-enable-custom-resources"]},
{"example": "virtual-server-advanced-routing", "app_type": "advanced-routing"},
)
],
indirect=True,
)
class TestAdvancedRouting:
def METHOD_NAME(self, kube_apis, crd_ingress_controller, virtual_server_setup):
ensure_responses_from_backends(virtual_server_setup.backend_1_url, virtual_server_setup.vs_host)
wait_before_test()
global resp_1, resp_2, resp_3
resp_1.status_code = resp_2.status_code = resp_3.status_code = 502
while resp_1.status_code == 502 and resp_2.status_code == 502 and resp_3.status_code == 502:
resp_1 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host, "x-version": "future"},
)
resp_2 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host, "x-version": "deprecated"},
)
resp_3 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host, "x-version-invalid": "deprecated"},
)
execute_assertions(resp_1, resp_2, resp_3)
def test_flow_with_argument(self, kube_apis, crd_ingress_controller, virtual_server_setup):
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-advanced-routing/virtual-server-argument.yaml",
virtual_server_setup.namespace,
)
ensure_response_from_backend(virtual_server_setup.backend_1_url, virtual_server_setup.vs_host)
wait_before_test()
global resp_1, resp_2, resp_3
resp_1.status_code = resp_2.status_code = resp_3.status_code = 502
while resp_1.status_code == 502 and resp_2.status_code == 502 and resp_3.status_code == 502:
resp_1 = requests.get(
virtual_server_setup.backend_1_url + "?arg1=v1", headers={"host": virtual_server_setup.vs_host}
)
resp_2 = requests.get(
virtual_server_setup.backend_1_url + "?arg1=v2", headers={"host": virtual_server_setup.vs_host}
)
resp_3 = requests.get(
virtual_server_setup.backend_1_url + "?argument1=v1", headers={"host": virtual_server_setup.vs_host}
)
execute_assertions(resp_1, resp_2, resp_3)
def test_flow_with_cookie(self, kube_apis, crd_ingress_controller, virtual_server_setup):
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-advanced-routing/virtual-server-cookie.yaml",
virtual_server_setup.namespace,
)
ensure_response_from_backend(virtual_server_setup.backend_1_url, virtual_server_setup.vs_host)
wait_before_test()
global resp_1, resp_2, resp_3
resp_1.status_code = resp_2.status_code = resp_3.status_code = 502
while resp_1.status_code == 502 and resp_2.status_code == 502 and resp_3.status_code == 502:
resp_1 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
cookies={"user": "some"},
)
resp_2 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
cookies={"user": "bad"},
)
resp_3 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
cookies={"user": "anonymous"},
)
execute_assertions(resp_1, resp_2, resp_3)
def test_flow_with_variable(self, kube_apis, crd_ingress_controller, virtual_server_setup):
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-advanced-routing/virtual-server-variable.yaml",
virtual_server_setup.namespace,
)
ensure_response_from_backend(virtual_server_setup.backend_1_url, virtual_server_setup.vs_host)
wait_before_test()
global resp_1, resp_2, resp_3
resp_1.status_code = resp_2.status_code = resp_3.status_code = 502
while resp_1.status_code == 502 and resp_2.status_code == 502 and resp_3.status_code == 502:
resp_1 = requests.get(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host})
resp_2 = requests.post(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host})
resp_3 = requests.put(virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host})
execute_assertions(resp_1, resp_2, resp_3)
def test_flow_with_complex_conditions(self, kube_apis, crd_ingress_controller, virtual_server_setup):
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
f"{TEST_DATA}/virtual-server-advanced-routing/virtual-server-complex.yaml",
virtual_server_setup.namespace,
)
ensure_response_from_backend(virtual_server_setup.backend_1_url, virtual_server_setup.vs_host)
wait_before_test()
global resp_1, resp_2, resp_3
resp_1.status_code = resp_2.status_code = resp_3.status_code = 502
while resp_1.status_code == 502 and resp_2.status_code == 502 and resp_3.status_code == 502:
resp_1 = requests.get(
virtual_server_setup.backend_1_url + "?arg1=v1",
headers={"host": virtual_server_setup.vs_host, "x-version": "future"},
cookies={"user": "some"},
)
resp_2 = requests.post(
virtual_server_setup.backend_1_url + "?arg1=v2",
headers={"host": virtual_server_setup.vs_host, "x-version": "deprecated"},
cookies={"user": "bad"},
)
resp_3 = requests.get(
virtual_server_setup.backend_1_url + "?arg1=v2",
headers={"host": virtual_server_setup.vs_host, "x-version": "deprecated"},
cookies={"user": "bad"},
)
execute_assertions(resp_1, resp_2, resp_3)
|
1,289 |
test uninstalled non existing
|
import pytest
import salt.states.win_wusa as wusa
from salt.exceptions import SaltInvocationError
from tests.support.mock import MagicMock, patch
@pytest.fixture
def kb():
return "KB123456"
@pytest.fixture
def configure_loader_modules():
return {wusa: {"__opts__": {"test": False}, "__env__": "base"}}
def test_installed_no_source():
"""
test wusa.installed without passing source
"""
with pytest.raises(SaltInvocationError) as excinfo:
wusa.installed(name="KB123456", source=None)
assert excinfo.exception.strerror == 'Must specify a "source" file to install'
def test_installed_existing(kb):
"""
test wusa.installed when the kb is already installed
"""
mock_installed = MagicMock(return_value=True)
with patch.dict(wusa.__salt__, {"wusa.is_installed": mock_installed}):
returned = wusa.installed(name=kb, source="salt://{}.msu".format(kb))
expected = {
"changes": {},
"comment": "{} already installed".format(kb),
"name": kb,
"result": True,
}
assert expected == returned
def test_installed_test_true(kb):
"""
test wusa.installed with test=True
"""
mock_installed = MagicMock(return_value=False)
with patch.dict(wusa.__salt__, {"wusa.is_installed": mock_installed}), patch.dict(
wusa.__opts__, {"test": True}
):
returned = wusa.installed(name=kb, source="salt://{}.msu".format(kb))
expected = {
"changes": {},
"comment": "{} would be installed".format(kb),
"name": kb,
"result": None,
}
assert expected == returned
def test_installed_cache_fail(kb):
"""
test wusa.install when it fails to cache the file
"""
mock_installed = MagicMock(return_value=False)
mock_cache = MagicMock(return_value="")
with patch.dict(
wusa.__salt__,
{"wusa.is_installed": mock_installed, "cp.cache_file": mock_cache},
):
returned = wusa.installed(name=kb, source="salt://{}.msu".format(kb))
expected = {
"changes": {},
"comment": 'Unable to cache salt://{}.msu from saltenv "base"'.format(kb),
"name": kb,
"result": False,
}
assert expected == returned
def test_installed(kb):
"""
test wusa.installed assuming success
"""
mock_installed = MagicMock(side_effect=[False, True])
mock_cache = MagicMock(return_value="C:\\{}.msu".format(kb))
with patch.dict(
wusa.__salt__,
{
"wusa.is_installed": mock_installed,
"cp.cache_file": mock_cache,
"wusa.install": MagicMock(),
},
):
returned = wusa.installed(name=kb, source="salt://{}.msu".format(kb))
expected = {
"changes": {"new": True, "old": False},
"comment": "{} was installed. ".format(kb),
"name": kb,
"result": True,
}
assert expected == returned
def test_installed_failed(kb):
"""
test wusa.installed with a failure
"""
mock_installed = MagicMock(side_effect=[False, False])
mock_cache = MagicMock(return_value="C:\\{}.msu".format(kb))
with patch.dict(
wusa.__salt__,
{
"wusa.is_installed": mock_installed,
"cp.cache_file": mock_cache,
"wusa.install": MagicMock(),
},
):
returned = wusa.installed(name=kb, source="salt://{}.msu".format(kb))
expected = {
"changes": {},
"comment": "{} failed to install. ".format(kb),
"name": kb,
"result": False,
}
assert expected == returned
def METHOD_NAME(kb):
"""
test wusa.uninstalled when the kb is not installed
"""
mock_installed = MagicMock(return_value=False)
with patch.dict(wusa.__salt__, {"wusa.is_installed": mock_installed}):
returned = wusa.uninstalled(name=kb)
expected = {
"changes": {},
"comment": "{} already uninstalled".format(kb),
"name": kb,
"result": True,
}
assert expected == returned
def test_uninstalled_test_true(kb):
"""
test wusa.uninstalled with test=True
"""
mock_installed = MagicMock(return_value=True)
with patch.dict(wusa.__salt__, {"wusa.is_installed": mock_installed}), patch.dict(
wusa.__opts__, {"test": True}
):
returned = wusa.uninstalled(name=kb)
expected = {
"changes": {},
"comment": "{} would be uninstalled".format(kb),
"name": kb,
"result": None,
}
assert expected == returned
def test_uninstalled(kb):
"""
test wusa.uninstalled assuming success
"""
mock_installed = MagicMock(side_effect=[True, False])
with patch.dict(
wusa.__salt__,
{"wusa.is_installed": mock_installed, "wusa.uninstall": MagicMock()},
):
returned = wusa.uninstalled(name=kb)
expected = {
"changes": {"new": False, "old": True},
"comment": "{} was uninstalled".format(kb),
"name": kb,
"result": True,
}
assert expected == returned
def test_uninstalled_failed(kb):
"""
test wusa.uninstalled with a failure
"""
mock_installed = MagicMock(side_effect=[True, True])
with patch.dict(
wusa.__salt__,
{"wusa.is_installed": mock_installed, "wusa.uninstall": MagicMock()},
):
returned = wusa.uninstalled(name=kb)
expected = {
"changes": {},
"comment": "{} failed to uninstall".format(kb),
"name": kb,
"result": False,
}
assert expected == returned
|
1,290 |
stack
|
"""pytorch backend implementation"""
from packaging.version import Version
import torch
if Version(torch.__version__) < Version("1.9.0"):
raise RuntimeError("DeepXDE requires PyTorch>=1.9.0.")
# To write device-agnostic (CPU or GPU) code, a common pattern is to first determine
# torch.device and then use it for all the tensors.
# https://pytorch.org/docs/stable/notes/cuda.html
# >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# >>> tensor.to(device=device)
# But, taking care of all tensors requires a lot of work.
# An alternative way is to use GPU by default if GPU is available, which is similar to
# TensorFlow.
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
lib = torch
def data_type_dict():
return {
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"uint8": torch.uint8,
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
"bool": torch.bool,
}
def is_gpu_available():
return torch.cuda.is_available()
def is_tensor(obj):
return torch.is_tensor(obj)
def shape(input_tensor):
return list(input_tensor.shape)
def size(tensor):
return torch.numel(tensor)
def ndim(input_tensor):
return input_tensor.dim()
def transpose(tensor, axes=None):
if axes is None:
axes = tuple(range(tensor.dim())[::-1])
return torch.permute(tensor, axes)
def reshape(tensor, shape):
return torch.reshape(tensor, shape)
def Variable(initial_value, dtype=None):
return torch.tensor(initial_value, dtype=dtype, requires_grad=True)
def as_tensor(data, dtype=None):
if isinstance(data, torch.Tensor):
if dtype is None or data.dtype == dtype:
return data
return data.type(dtype=dtype)
return torch.as_tensor(data, dtype=dtype)
def sparse_tensor(indices, values, shape):
return torch.sparse_coo_tensor(list(zip(*indices)), values, shape, requires_grad=True)
def from_numpy(np_array):
# Both torch.from_numpy and torch.as_tensor work without memory copy.
# https://discuss.pytorch.org/t/from-numpy-vs-as-tensor/79932
# https://stackoverflow.com/questions/48482787/pytorch-memory-model-torch-from-numpy-vs-torch-tensor
# But torch.from_numpy cannot handle device.
return torch.as_tensor(np_array)
def to_numpy(input_tensor):
return input_tensor.detach().cpu().numpy()
def concat(values, axis):
return torch.cat(values, axis)
def METHOD_NAME(values, axis):
return torch.METHOD_NAME(values, axis)
def expand_dims(tensor, axis):
return torch.unsqueeze(tensor, axis)
def reverse(tensor, axis):
return torch.flip(tensor, axis)
def roll(tensor, shift, axis):
return torch.roll(tensor, shift, axis)
def lgamma(x):
return torch.lgamma(x)
def elu(x):
return torch.nn.functional.elu(x)
def relu(x):
return torch.nn.functional.relu(x)
def gelu(x):
return torch.nn.functional.gelu(x)
def selu(x):
return torch.nn.functional.selu(x)
def sigmoid(x):
return torch.nn.functional.sigmoid(x)
def silu(x):
return torch.nn.functional.silu(x)
def sin(x):
return torch.sin(x)
def cos(x):
return torch.cos(x)
def exp(x):
return torch.exp(x)
def square(x):
return torch.square(x)
# pylint: disable=redefined-builtin
def abs(x):
return torch.abs(x)
def minimum(x, y):
return torch.minimum(x, y)
def tanh(x):
return torch.tanh(x)
def pow(x, y):
return torch.pow(x, y)
def mean(input_tensor, dim, keepdims=False):
return torch.mean(input_tensor, dim, keepdim=keepdims)
def reduce_mean(input_tensor):
return torch.mean(input_tensor)
def sum(input_tensor, dim, keepdims=False):
return torch.sum(input_tensor, dim, keepdim=keepdims)
def reduce_sum(input_tensor):
return torch.sum(input_tensor)
def prod(input_tensor, dim, keepdims=False):
return torch.prod(input_tensor, dim, keepdim=keepdims)
def reduce_prod(input_tensor):
return torch.prod(input_tensor)
# pylint: disable=redefined-builtin
def min(input_tensor, dim, keepdims=False):
return torch.amin(input_tensor, dim, keepdim=keepdims)
def reduce_min(input_tensor):
return torch.min(input_tensor)
# pylint: disable=redefined-builtin
def max(input_tensor, dim, keepdims=False):
return torch.amax(input_tensor, dim, keepdim=keepdims)
def reduce_max(input_tensor):
return torch.max(input_tensor)
def norm(tensor, ord=None, axis=None, keepdims=False):
return torch.linalg.norm(tensor, ord=ord, dim=axis, keepdim=keepdims)
def zeros(shape, dtype):
return torch.zeros(shape, dtype=dtype)
def zeros_like(input_tensor):
return torch.zeros_like(input_tensor)
def matmul(x, y):
return torch.mm(x, y)
def sparse_dense_matmul(x, y):
return torch.sparse.mm(x, y)
|
1,291 |
downgrade
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""refractor_alerting
Revision ID: 2e5a0ee25ed4
Revises: f80a3b88324b
Create Date: 2020-08-31 20:30:30.781478
"""
# revision identifiers, used by Alembic.
revision = "2e5a0ee25ed4"
down_revision = "f80a3b88324b"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"alert_validators",
sa.Column("created_on", sa.DateTime(), nullable=True),
sa.Column("changed_on", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("validator_type", sa.String(length=100), nullable=False),
sa.Column("config", sa.Text(), nullable=True),
sa.Column("created_by_fk", sa.Integer(), nullable=True),
sa.Column("changed_by_fk", sa.Integer(), nullable=True),
sa.Column("alert_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["alert_id"],
["alerts.id"],
),
sa.ForeignKeyConstraint(
["changed_by_fk"],
["ab_user.id"],
),
sa.ForeignKeyConstraint(
["created_by_fk"],
["ab_user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"sql_observers",
sa.Column("created_on", sa.DateTime(), nullable=True),
sa.Column("changed_on", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("sql", sa.Text(), nullable=False),
sa.Column("created_by_fk", sa.Integer(), nullable=True),
sa.Column("changed_by_fk", sa.Integer(), nullable=True),
sa.Column("alert_id", sa.Integer(), nullable=False),
sa.Column("database_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["alert_id"],
["alerts.id"],
),
sa.ForeignKeyConstraint(
["changed_by_fk"],
["ab_user.id"],
),
sa.ForeignKeyConstraint(
["created_by_fk"],
["ab_user.id"],
),
sa.ForeignKeyConstraint(
["database_id"],
["dbs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"sql_observations",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("dttm", sa.DateTime(), nullable=True),
sa.Column("observer_id", sa.Integer(), nullable=False),
sa.Column("alert_id", sa.Integer(), nullable=True),
sa.Column("value", sa.Float(), nullable=True),
sa.Column("error_msg", sa.String(length=500), nullable=True),
sa.ForeignKeyConstraint(
["alert_id"],
["alerts.id"],
),
sa.ForeignKeyConstraint(
["observer_id"],
["sql_observers.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_sql_observations_dttm"), "sql_observations", ["dttm"], unique=False
)
with op.batch_alter_table("alerts") as batch_op:
batch_op.add_column(sa.Column("changed_by_fk", sa.Integer(), nullable=True))
batch_op.add_column(sa.Column("changed_on", sa.DateTime(), nullable=True))
batch_op.add_column(sa.Column("created_by_fk", sa.Integer(), nullable=True))
batch_op.add_column(sa.Column("created_on", sa.DateTime(), nullable=True))
batch_op.alter_column(
"crontab", existing_type=mysql.VARCHAR(length=50), nullable=False
)
batch_op.create_foreign_key(
"alerts_ibfk_3", "ab_user", ["changed_by_fk"], ["id"]
)
batch_op.create_foreign_key(
"alerts_ibfk_4", "ab_user", ["created_by_fk"], ["id"]
)
batch_op.drop_column("sql")
batch_op.drop_column("database_id")
# ### end Alembic commands ###
def METHOD_NAME():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("alerts") as batch_op:
batch_op.add_column(
sa.Column(
"database_id", mysql.INTEGER(), autoincrement=False, nullable=False
)
)
batch_op.add_column(sa.Column("sql", mysql.TEXT(), nullable=True))
batch_op.drop_constraint("alerts_ibfk_3", type_="foreignkey")
batch_op.drop_constraint("alerts_ibfk_4", type_="foreignkey")
batch_op.alter_column(
"crontab", existing_type=mysql.VARCHAR(length=50), nullable=True
)
batch_op.drop_column("created_on")
batch_op.drop_column("created_by_fk")
batch_op.drop_column("changed_on")
batch_op.drop_column("changed_by_fk")
op.drop_index(op.f("ix_sql_observations_dttm"), table_name="sql_observations")
op.drop_table("sql_observations")
op.drop_table("sql_observers")
op.drop_table("alert_validators")
# ### end Alembic commands ###
|
1,292 |
reference deployment
|
import os
import shutil
import subprocess
import pytest
import yaml
from dagster import file_relative_path
pytest_plugins = ["dagster_test.fixtures"]
@pytest.fixture
def docker_context(test_id, monkeypatch, tmpdir):
# Docker contexts are stored in $HOME/.docker/contexts
# Buildkite doesn't have $HOME set by default. When it's not set,
# `docker create context` will save the context in the current directory.
# But `docker compose` will fail if $HOME isn't set and you try to pass a
# context.
monkeypatch.setenv("HOME", str(tmpdir))
# The ECS Local Simulation context expects to mount a $HOME/.aws volume
# when you call `docker compose up`
(tmpdir / ".aws").mkdir()
# Use ecs --local-simulation
# https://docs.docker.com/cloud/ecs-integration/#local-simulation
subprocess.call(["docker", "context", "create", "ecs", "--local-simulation", test_id])
yield test_id
subprocess.call(["docker", "context", "rm", test_id])
@pytest.fixture
def METHOD_NAME(tmpdir):
destination = tmpdir / "deploy_ecs"
# Copy the reference deployment into tmpdir
shutil.copytree(file_relative_path(__file__, ".."), destination)
with destination.as_cwd():
yield destination
@pytest.fixture
def source_code(METHOD_NAME, tmpdir):
# Typically, the requirements files install from pypi. For tests, we want to
# build from source. This fixture copies any required Dagster source code
# into tmpdir and rewrites requirements*.txt to install from the local copy.
python_modules = file_relative_path(__file__, "../../../python_modules/")
libraries = file_relative_path(python_modules, "libraries/")
modules = METHOD_NAME / "modules"
for path in METHOD_NAME.listdir():
if not path.basename.startswith("requirements"):
continue
if not path.ext == ".txt":
continue
overrides = []
for requirement in path.read().splitlines():
# The source code lives in dagster/python_modules
if requirement in os.listdir(python_modules):
source = file_relative_path(python_modules, requirement)
# The source code lives in dagster/python_modules/libraries
elif requirement in os.listdir(libraries):
source = file_relative_path(libraries, requirement)
# It's not a dagster library; continue to install from pypi
else:
overrides.append(requirement + "\n")
continue
shutil.copytree(source, modules / requirement, ignore=shutil.ignore_patterns(".tox"))
overrides.append(f"./modules/{requirement}\n")
with open(path, "w", encoding="utf8") as f:
f.writelines(overrides)
return modules
@pytest.fixture
def overridden_dockerfile(source_code):
# Override Dockerfile to copy our source code into the container
with open("Dockerfile", "r", encoding="utf8") as f:
dockerfile = f.readlines()
# Copy the files in directly after we set the WORKDIR
index = dockerfile.index("WORKDIR $DAGSTER_HOME\n") + 1
copy = ["RUN mkdir -p $DAGSTER_HOME/modules\n", "COPY modules $DAGSTER_HOME/modules\n"]
dockerfile = dockerfile[:index] + copy + dockerfile[index:]
with open("Dockerfile", "w", encoding="utf8") as f:
f.writelines(dockerfile)
@pytest.fixture
def overridden_dagster_yaml(METHOD_NAME):
# Override dagster.yaml to use DefaultRunLauncher; EcsRunLauncher can only
# run on a real ECS cluster whereas DefaultRunLauncher can successfully run
# end-to-end on a local ECS simulation. This is because the local ECS
# simulation doesn't mock out the ECS API in its entirety.
with open("dagster.yaml", "r", encoding="utf8") as f:
dagster_yaml = yaml.safe_load(f)
dagster_yaml["run_launcher"] = {
"module": "dagster.core.launcher",
"class": "DefaultRunLauncher",
}
with open("dagster.yaml", "w", encoding="utf8") as f:
f.write(yaml.safe_dump(dagster_yaml))
@pytest.fixture
def docker_compose(
source_code,
overridden_dockerfile,
overridden_dagster_yaml,
docker_context,
docker_compose_cm,
monkeypatch,
test_id,
):
# docker-compose.yml expects this envvar to be set so it can tag images
monkeypatch.setenv("REGISTRY_URL", "test")
with docker_compose_cm(docker_context=docker_context) as docker_compose:
yield docker_compose
@pytest.mark.xfail
def test_deploy(docker_compose, retrying_requests):
assert retrying_requests.get(f'http://{docker_compose["webserver"]}:3000/server_info').ok
|
1,293 |
test representation
|
from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import tensorflow as tf
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.backend.common import keras_tensor
class KerasTensorTest(testing.TestCase):
def test_attributes(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.dtype, "float32")
self.assertEqual(x.shape, (3,))
self.assertEqual(x.sparse, True)
def test_numpy_methods(self):
x = keras_tensor.KerasTensor(shape=(3, 2), dtype="float32")
# reshape
x = x.reshape((6,))
self.assertEqual(x.shape, (6,))
# expand_dims, squeeze
x = ops.expand_dims(x, -1)
self.assertEqual(x.shape, (6, 1))
x = x.squeeze()
self.assertEqual(x.shape, (6,))
x = ops.expand_dims(x, axis=0)
self.assertEqual(x.shape, (1, 6))
x = x.squeeze(axis=0)
self.assertEqual(x.shape, (6,))
def test_invalid_usage(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32")
with self.assertRaisesRegex(
ValueError, "doesn't have any actual numerical value"
):
np.array(x)
if backend.backend() == "jax":
from jax import numpy as jnp
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a JAX function"
):
jnp.array(x)
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a TensorFlow function"
):
tf.convert_to_tensor(x)
def test_bool(self):
tensor = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaisesRegex(TypeError, "cannot be used as a boolean."):
bool(tensor)
def METHOD_NAME(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertIn("<KerasTensor shape=(3, 4)", repr(x))
def test_iterating(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaises(NotImplementedError):
iter(x)
def test_any_symbolic_tensors(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = np.array([1, 2, 3])
self.assertTrue(keras_tensor.any_symbolic_tensors(args=[x, y]))
self.assertFalse(keras_tensor.any_symbolic_tensors(args=[y]))
def test_is_keras_tensor(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertTrue(keras_tensor.is_keras_tensor(x))
y = np.array([1, 2, 3])
self.assertFalse(keras_tensor.is_keras_tensor(y))
@patch("keras_core.ops.Absolute.symbolic_call")
def test_abs_method(self, mock_symbolic_call):
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
abs_x = abs(x) # this will internally call x.__abs__()
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(abs_x, mock_tensor)
@patch("keras_core.ops.Negative.symbolic_call")
def test_neg_method(self, mock_method):
self._test_unary_op_method(mock_method, lambda x: -x)
@patch("keras_core.ops.Subtract.symbolic_call")
def test_sub_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x - y)
@patch("keras_core.ops.Multiply.symbolic_call")
def test_mul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x * y)
@patch("keras_core.ops.Matmul.symbolic_call")
def test_matmul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x @ y)
@patch("keras_core.ops.Power.symbolic_call")
def test_pow_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x**y)
@patch("keras_core.ops.Mod.symbolic_call")
def test_mod_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x % y)
@patch("keras_core.ops.Less.symbolic_call")
def test_lt_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x < y)
@patch("keras_core.ops.LogicalAnd.symbolic_call")
def test_and_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x & y)
@patch("keras_core.ops.LogicalOr.symbolic_call")
def test_or_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x | y)
@patch("keras_core.ops.GetItem.symbolic_call")
def test_getitem_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x[y])
def _test_unary_op_method(self, mock_method, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x)
mock_method.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
def _test_binary_op_method(self, mock_method, other, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x, other)
mock_method.assert_called_once_with(x, other)
self.assertEqual(result, mock_tensor)
|
1,294 |
control registers
|
# Copyright 2023 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection, Optional, Sequence, Tuple, Union
from numpy.typing import NDArray
import attr
import cirq
from cirq._compat import cached_property
from cirq_ft import infra
from cirq_ft.algos import reflection_using_prepare, select_and_prepare
@attr.frozen(cache_hash=True)
class QubitizationWalkOperator(infra.GateWithRegisters):
r"""Constructs a Szegedy Quantum Walk operator using LCU oracles SELECT and PREPARE.
Constructs a Szegedy quantum walk operator $W = R_{L} . SELECT$, which is a product of
two reflections $R_{L} = (2|L><L| - I)$ and $SELECT=\sum_{l}|l><l|H_{l}$.
The action of $W$ partitions the Hilbert space into a direct sum of two-dimensional irreducible
vector spaces. For an arbitrary eigenstate $|k>$ of $H$ with eigenvalue $E_k$, $|\ell>|k>$ and
an orthogonal state $\phi_{k}$ span the irreducible two-dimensional space that $|\ell>|k>$ is
in under the action of $W$. In this space, $W$ implements a Pauli-Y rotation by an angle of
$-2arccos(E_{k} / \lambda)$ s.t. $W = e^{i arccos(E_k / \lambda) Y}$.
Thus, the walk operator $W$ encodes the spectrum of $H$ as a function of eigenphases of $W$
s.t. $spectrum(H) = \lambda cos(arg(spectrum(W)))$ where $arg(e^{i\phi}) = \phi$.
Args:
select: The SELECT lcu gate implementing $SELECT=\sum_{l}|l><l|H_{l}$.
prepare: Then PREPARE lcu gate implementing
$PREPARE|00...00> = \sum_{l=0}^{L - 1}\sqrt{\frac{w_{l}}{\lambda}} |l> = |\ell>$
control_val: If 0/1, a controlled version of the walk operator is constructed. Defaults to
None, in which case the resulting walk operator is not controlled.
power: Constructs $W^{power}$ by repeatedly decomposing into `power` copies of $W$.
Defaults to 1.
References:
[Encoding Electronic Spectra in Quantum Circuits with Linear T Complexity]
(https://arxiv.org/abs/1805.03662).
Babbush et. al. (2018). Figure 1.
"""
select: select_and_prepare.SelectOracle
prepare: select_and_prepare.PrepareOracle
control_val: Optional[int] = None
power: int = 1
def __attrs_post_init__(self):
assert self.select.METHOD_NAME == self.reflect.METHOD_NAME
@cached_property
def METHOD_NAME(self) -> infra.Registers:
return self.select.METHOD_NAME
@cached_property
def selection_registers(self) -> infra.SelectionRegisters:
return self.prepare.selection_registers
@cached_property
def target_registers(self) -> infra.Registers:
return self.select.target_registers
@cached_property
def registers(self) -> infra.Registers:
return infra.Registers(
[*self.METHOD_NAME, *self.selection_registers, *self.target_registers]
)
@cached_property
def reflect(self) -> reflection_using_prepare.ReflectionUsingPrepare:
return reflection_using_prepare.ReflectionUsingPrepare(
self.prepare, control_val=self.control_val
)
def decompose_from_registers(
self,
context: cirq.DecompositionContext,
**quregs: NDArray[cirq.Qid], # type:ignore[type-var]
) -> cirq.OP_TREE:
select_reg = {reg.name: quregs[reg.name] for reg in self.select.registers}
select_op = self.select.on_registers(**select_reg)
reflect_reg = {reg.name: quregs[reg.name] for reg in self.reflect.registers}
reflect_op = self.reflect.on_registers(**reflect_reg)
for _ in range(self.power):
yield select_op
yield reflect_op
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs) -> cirq.CircuitDiagramInfo:
wire_symbols = ['@' if self.control_val else '@(0)'] * self.METHOD_NAME.total_bits()
wire_symbols += ['W'] * (self.registers.total_bits() - self.METHOD_NAME.total_bits())
wire_symbols[-1] = f'W^{self.power}' if self.power != 1 else 'W'
return cirq.CircuitDiagramInfo(wire_symbols=wire_symbols)
def controlled(
self,
num_controls: Optional[int] = None,
control_values: Optional[
Union[cirq.ops.AbstractControlValues, Sequence[Union[int, Collection[int]]]]
] = None,
control_qid_shape: Optional[Tuple[int, ...]] = None,
) -> 'QubitizationWalkOperator':
if num_controls is None:
num_controls = 1
if control_values is None:
control_values = [1] * num_controls
if (
isinstance(control_values, Sequence)
and isinstance(control_values[0], int)
and len(control_values) == 1
and self.control_val is None
):
c_select = self.select.controlled(control_values=control_values)
assert isinstance(c_select, select_and_prepare.SelectOracle)
return QubitizationWalkOperator(
c_select, self.prepare, control_val=control_values[0], power=self.power
)
raise NotImplementedError(
f'Cannot create a controlled version of {self} with control_values={control_values}.'
)
def with_power(self, new_power: int) -> 'QubitizationWalkOperator':
return QubitizationWalkOperator(
self.select, self.prepare, control_val=self.control_val, power=new_power
)
def __repr__(self) -> str:
return (
f'cirq_ft.QubitizationWalkOperator('
f'{self.select}, '
f'{self.prepare}, '
f'{self.control_val}, '
f'{self.power})'
)
def __pow__(self, power: int):
return self.with_power(self.power * power)
def _t_complexity_(self):
if self.power > 1:
return self.power * infra.t_complexity(self.with_power(1))
return NotImplemented
|
1,295 |
battle 0
|
from .campaign_base import CampaignBase
from module.map.map_base import CampaignMap
from module.map.map_grids import SelectedGrids, RoadGrids
from module.logger import logger
from .d1 import Config as ConfigBase
MAP = CampaignMap('D3')
MAP.shape = 'N10'
MAP.camera_data = ['G6', 'F3', 'H4']
MAP.camera_data_spawn_point = ['G8']
MAP.map_data = """
-- -- -- -- -- ME -- -- ME -- -- -- -- ++
-- -- -- ME -- -- MB MB -- -- ME -- -- --
++ -- ME -- Me ME -- -- ME Me -- ME -- --
++ -- -- ME -- -- -- -- -- -- ME -- -- --
-- -- -- -- Me MS -- -- MS Me -- -- ++ --
-- ++ -- ME ++ ++ __ __ ++ ++ ME -- -- --
-- ++ -- ME ++ ++ -- -- ++ ++ ME -- -- --
-- -- -- -- Me MS -- -- MS Me -- -- ++ ++
++ -- -- -- -- -- -- -- -- -- -- -- -- ++
-- -- -- -- -- -- SP SP -- -- -- -- -- --
"""
MAP.weight_data = """
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
50 50 50 50 50 50 50 50 50 50 50 50 50 50
"""
MAP.wall_data = """
· · · · · | · · · · | · · · · · ,
+-----------+ +-----------+ ,
· · | · · · | · · · · | · · · | · · ,
| | | | ,
· · | · · · | · · · · | · · · | · · ,
+---+ +---+ +----+ +---+ ,
· · · | · · · · · · · · | · · · ,
| | ,
· · · | · · · · · · · · | · · · ,
| | ,
· · · | · · · · · · · · | · · · ,
| | ,
· · · | · · · · · · · · | · · · ,
| | ,
· · · | · · · · · · · · | · · · ,
| | ,
· · · | · · · · · · · · | · · · ,
+-----------+ +-----------+ ,
· · · · · · | · · | · · · · · · ,
"""
MAP.spawn_data = [
{'battle': 0, 'enemy': 2, 'siren': 2},
{'battle': 1, 'enemy': 1},
{'battle': 2, 'enemy': 2, 'siren': 1},
{'battle': 3, 'enemy': 1},
{'battle': 4, 'enemy': 2},
{'battle': 5, 'enemy': 1},
{'battle': 6, 'boss': 1},
]
A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, \
A2, B2, C2, D2, E2, F2, G2, H2, I2, J2, K2, L2, M2, N2, \
A3, B3, C3, D3, E3, F3, G3, H3, I3, J3, K3, L3, M3, N3, \
A4, B4, C4, D4, E4, F4, G4, H4, I4, J4, K4, L4, M4, N4, \
A5, B5, C5, D5, E5, F5, G5, H5, I5, J5, K5, L5, M5, N5, \
A6, B6, C6, D6, E6, F6, G6, H6, I6, J6, K6, L6, M6, N6, \
A7, B7, C7, D7, E7, F7, G7, H7, I7, J7, K7, L7, M7, N7, \
A8, B8, C8, D8, E8, F8, G8, H8, I8, J8, K8, L8, M8, N8, \
A9, B9, C9, D9, E9, F9, G9, H9, I9, J9, K9, L9, M9, N9, \
A10, B10, C10, D10, E10, F10, G10, H10, I10, J10, K10, L10, M10, N10, \
= MAP.flatten()
class Config(ConfigBase):
# ===== Start of generated config =====
MAP_SIREN_TEMPLATE = ['SirenBoss15', 'SirenBoss16']
MOVABLE_ENEMY_TURN = (2,)
MAP_HAS_SIREN = True
MAP_HAS_MOVABLE_ENEMY = True
MAP_HAS_MAP_STORY = True
MAP_HAS_FLEET_STEP = True
MAP_HAS_AMBUSH = False
MAP_HAS_MYSTERY = False
# ===== End of generated config =====
MAP_HAS_WALL = True
MAP_SWIPE_MULTIPLY = (1.001, 1.020)
MAP_SWIPE_MULTIPLY_MINITOUCH = (0.968, 0.986)
MAP_SWIPE_MULTIPLY_MAATOUCH = (0.940, 0.957)
class Campaign(CampaignBase):
MAP = MAP
ENEMY_FILTER = '1L > 1M > 1E > 1C > 2L > 2M > 2E > 2C > 3L > 3M > 3E > 3C'
def METHOD_NAME(self):
if self.clear_siren():
return True
if self.clear_filter_enemy(self.ENEMY_FILTER, preserve=0):
return True
return self.battle_default()
def battle_5(self):
if self.clear_siren():
return True
if self.clear_filter_enemy(self.ENEMY_FILTER, preserve=0):
return True
return self.battle_default()
def battle_6(self):
return self.fleet_boss.clear_boss()
|
1,296 |
collect data
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from collections import defaultdict
import functools
import logging
from typing import Callable, Dict, List, Literal, Tuple, overload
import torch
from .tools import _METRICS, _MASKS, norm_metrics, generate_sparsity, is_active_target
from ..base.compressor import Compressor, Pruner
from ..base.target_space import TargetType
from ..base.wrapper import ModuleWrapper
from ..utils.docstring import _EVALUATOR_DOCSTRING
from ..utils.evaluator import Evaluator, TensorHook
_logger = logging.getLogger(__name__)
class TaylorPruner(Pruner):
__doc__ = r"""
Taylor pruner is a pruner which prunes on the first weight dimension,
based on estimated importance calculated from the first order taylor expansion on weights to achieve a preset level of network sparsity.
The estimated importance is defined as the paper `Importance Estimation for Neural Network Pruning <http://jankautz.com/publications/Importance4NNPruning_CVPR19.pdf>`__.
:math:`\widehat{\mathcal{I}}_{\mathcal{S}}^{(1)}(\mathbf{W}) \triangleq \sum_{s \in \mathcal{S}} \mathcal{I}_{s}^{(1)}(\mathbf{W})=\sum_{s \in \mathcal{S}}\left(g_{s} w_{s}\right)^{2}`
""" + r"""
Parameters
----------
model
Model to be pruned.
config_list
A list of dict, each dict configure which module need to be pruned, and how to prune.
Please refer :doc:`Compression Config Specification </compression/config_list>` for more information.
evaluator
{evaluator_docstring}
training_steps
The step number used to collect gradients, the masks will be generated after training_steps training.
Examples
--------
Please refer to
:githublink:`examples/compression/pruning/taylor_pruning.py <examples/compression/pruning/taylor_pruning.py>`.
""".format(evaluator_docstring=_EVALUATOR_DOCSTRING)
@overload
def __init__(self, model: torch.nn.Module, config_list: List[Dict], evaluator: Evaluator, training_steps: int):
...
@overload
def __init__(self, model: torch.nn.Module, config_list: List[Dict], evaluator: Evaluator, training_steps: int,
existed_wrappers: Dict[str, ModuleWrapper]):
...
def __init__(self, model: torch.nn.Module, config_list: List[Dict], evaluator: Evaluator, training_steps: int,
existed_wrappers: Dict[str, ModuleWrapper] | None = None):
super().__init__(model=model, config_list=config_list, evaluator=evaluator,
existed_wrappers=existed_wrappers)
self.evaluator: Evaluator
self.training_steps = training_steps
# trigger masks generation when self._current_step == self.training_steps
self._current_step = 0
# save all target hooks with format {module_name: {target_name: hook}}
self.hooks: Dict[str, Dict[str, TensorHook]] = defaultdict(dict)
# `interval_steps` and `total_times` are used by `register_trigger`.
# `interval_steps` is the optimize step interval for generating masks.
# `total_times` is the total generation times of masks.
self.interval_steps = training_steps
self.total_times: int | Literal['unlimited'] = 1
@classmethod
def from_compressor(cls, compressor: Compressor, new_config_list: List[Dict], training_steps: int, evaluator: Evaluator | None = None):
return super().from_compressor(compressor, new_config_list, training_steps=training_steps, evaluator=evaluator)
def METHOD_NAME(self) -> Dict[str, Dict[str, torch.Tensor]]:
data = defaultdict(dict)
for module_name, hooks in self.hooks.items():
for target_name, hook in hooks.items():
if len(hook.buffer) > 0:
data[module_name][target_name] = hook.buffer[0] / self.training_steps
return data
def _calculate_metrics(self, data: Dict[str, Dict[str, torch.Tensor]]) -> _METRICS:
return norm_metrics(p=1, data=data, target_spaces=self._target_spaces)
def _generate_sparsity(self, metrics: _METRICS) -> _MASKS:
return generate_sparsity(metrics, self._target_spaces)
def _register_hooks(self, evaluator: Evaluator):
def collector(buffer: List, target: torch.Tensor) -> Callable[[torch.Tensor], None]:
# a factory function, return a tensor hook function for target
assert len(buffer) == 0, 'Buffer pass to taylor pruner collector is not empty.'
def collect_taylor(grad: torch.Tensor):
if len(buffer) == 0:
buffer.append(torch.zeros_like(grad))
if self._current_step < self.training_steps:
buffer[0] += (target.detach() * grad.detach()).pow(2)
return collect_taylor
hook_list = []
for module_name, ts in self._target_spaces.items():
for target_name, target_space in ts.items():
if is_active_target(target_space):
# TODO: add input/output
if target_space.type is TargetType.PARAMETER:
assert target_space.target is not None
hook = TensorHook(target_space.target,
target_name,
functools.partial(collector, target=target_space.target))
hook_list.append(hook)
self.hooks[module_name][target_name] = hook
else:
raise NotImplementedError()
evaluator.register_hooks(hook_list)
def _register_trigger(self, evaluator: Evaluator):
assert self.interval_steps >= self.training_steps or self.interval_steps < 0
self._remaining_times = self.total_times
def optimizer_task():
self._current_step += 1
if self._current_step == self.training_steps:
masks = self.generate_masks()
self.update_masks(masks)
if isinstance(self._remaining_times, int):
self._remaining_times -= 1
debug_msg = f'{self.__class__.__name__} generate masks, remaining times {self._remaining_times}'
_logger.debug(debug_msg)
if self._current_step == self.interval_steps and \
(self._remaining_times == 'unlimited' or self._remaining_times > 0): # type: ignore
self._current_step = 0
for _, hooks in self.hooks.items():
for _, hook in hooks.items():
hook.buffer.clear()
evaluator.patch_optimizer_step(before_step_tasks=[], after_step_tasks=[optimizer_task])
def _single_compress(self, max_steps: int | None, max_epochs: int | None):
assert max_steps is None and max_epochs is None
self._fusion_compress(self.training_steps, None)
def _fuse_preprocess(self, evaluator: Evaluator) -> None:
self._register_hooks(evaluator)
self._register_trigger(evaluator)
def _fuse_postprocess(self, evaluator: Evaluator) -> None:
pass
@overload
def compress(self) -> Tuple[torch.nn.Module, _MASKS]:
...
@overload
def compress(self, max_steps: int | None, max_epochs: int | None) -> Tuple[torch.nn.Module, _MASKS]:
...
def compress(self, max_steps: int | None = None, max_epochs: int | None = None):
return super().compress(max_steps, max_epochs)
|
1,297 |
test nacl
|
"""Tests for displacements."""
from copy import deepcopy
import numpy as np
from phonopy import Phonopy
def METHOD_NAME(ph_nacl: Phonopy):
"""Test displacements of NaCl 2x2x2."""
dataset = deepcopy(ph_nacl.dataset)
disp_ref = [[0, 0.01, 0.0, 0.0], [32, 0.01, 0.0, 0.0]]
np.testing.assert_allclose(ph_nacl.displacements, disp_ref, atol=1e-8)
ph_nacl.generate_displacements()
np.testing.assert_allclose(ph_nacl.displacements, disp_ref, atol=1e-8)
ph_nacl.dataset = dataset
def test_si(ph_si: Phonopy):
"""Test displacements of Si."""
dataset = deepcopy(ph_si.dataset)
disp_ref = [[0, 0.0, 0.0070710678118655, 0.0070710678118655]]
np.testing.assert_allclose(ph_si.displacements, disp_ref, atol=1e-8)
ph_si.generate_displacements()
np.testing.assert_allclose(ph_si.displacements, disp_ref, atol=1e-8)
ph_si.dataset = dataset
def test_sno2(ph_sno2: Phonopy):
"""Test displacements of SnO2."""
dataset = deepcopy(ph_sno2.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, -0.01, 0.0, 0.0],
[0, 0.0, 0.0, 0.01],
[48, 0.01, 0.0, 0.0],
[48, 0.0, 0.0, 0.01],
]
np.testing.assert_allclose(ph_sno2.displacements, disp_ref, atol=1e-8)
ph_sno2.generate_displacements()
disp_gen = [
[0, 0.007032660602415084, 0.0, 0.007109267532681459],
[0, -0.007032660602415084, 0.0, -0.007109267532681459],
[48, 0.007032660602415084, 0.0, 0.007109267532681459],
]
np.testing.assert_allclose(ph_sno2.displacements, disp_gen, atol=1e-8)
ph_sno2.dataset = dataset
def test_tio2(ph_tio2: Phonopy):
"""Test displacements of TiO2."""
dataset = deepcopy(ph_tio2.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, 0.0, 0.01, 0.0],
[0, 0.0, 0.0, 0.01],
[0, 0.0, 0.0, -0.01],
[72, 0.01, 0.0, 0.0],
[72, 0.0, 0.0, 0.01],
]
np.testing.assert_allclose(ph_tio2.displacements, disp_ref, atol=1e-8)
ph_tio2.generate_displacements()
disp_gen = [
[0, 0.0060687317141537135, 0.0060687317141537135, 0.0051323474905008],
[0, -0.0060687317141537135, -0.0060687317141537135, -0.0051323474905008],
[72, 0.007635558297727332, 0.0, 0.006457418174627326],
[72, -0.007635558297727332, 0.0, -0.006457418174627326],
]
np.testing.assert_allclose(ph_tio2.displacements, disp_gen, atol=1e-8)
ph_tio2.dataset = dataset
def test_tio2_random_disp(ph_tio2: Phonopy):
"""Test random displacements of TiO2."""
dataset = deepcopy(ph_tio2.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, 0.0, 0.01, 0.0],
[0, 0.0, 0.0, 0.01],
[0, 0.0, 0.0, -0.01],
[72, 0.01, 0.0, 0.0],
[72, 0.0, 0.0, 0.01],
]
np.testing.assert_allclose(ph_tio2.displacements, disp_ref, atol=1e-8)
ph_tio2.generate_displacements(number_of_snapshots=4, distance=0.03)
d = ph_tio2.displacements
np.testing.assert_allclose(np.linalg.norm(d, axis=2).ravel(), 0.03, atol=1e-8)
ph_tio2.dataset = dataset
def test_tio2_random_disp_plusminus(ph_tio2: Phonopy):
"""Test random plus-minus displacements of TiO2.
Note
----
Displacements of last 4 supercells are minus of those of first 4 supercells.
"""
dataset = deepcopy(ph_tio2.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, 0.0, 0.01, 0.0],
[0, 0.0, 0.0, 0.01],
[0, 0.0, 0.0, -0.01],
[72, 0.01, 0.0, 0.0],
[72, 0.0, 0.0, 0.01],
]
np.testing.assert_allclose(ph_tio2.displacements, disp_ref, atol=1e-8)
ph_tio2.generate_displacements(
number_of_snapshots=4, distance=0.03, is_plusminus=True
)
d = ph_tio2.displacements
np.testing.assert_allclose(d[:4], -d[4:], atol=1e-8)
np.testing.assert_allclose(np.linalg.norm(d, axis=2).ravel(), 0.03, atol=1e-8)
ph_tio2.dataset = dataset
def test_zr3n4(ph_zr3n4: Phonopy):
"""Test displacements of Zr3N4."""
dataset = deepcopy(ph_zr3n4.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, -0.01, 0.0, 0.0],
[16, 0.01, 0.0, 0.0],
[16, 0.0, 0.01, 0.0],
]
np.testing.assert_allclose(ph_zr3n4.displacements, disp_ref, atol=1e-8)
ph_zr3n4.generate_displacements()
disp_gen = [
[0, 0.01, 0.0, 0.0],
[0, -0.01, 0.0, 0.0],
[16, 0.007071067811865475, 0.007071067811865475, 0.0],
[16, -0.007071067811865475, -0.007071067811865475, 0.0],
]
np.testing.assert_allclose(ph_zr3n4.displacements, disp_gen, atol=1e-8)
ph_zr3n4.dataset = dataset
def test_tipn3(ph_tipn3: Phonopy):
"""Test displacements of Zr3N4."""
dataset = deepcopy(ph_tipn3.dataset)
disp_ref = [
[0, 0.01, 0.0, 0.0],
[0, 0.0, 0.01, 0.0],
[0, 0.0, 0.0, 0.01],
[0, 0.0, 0.0, -0.01],
[16, 0.01, 0.0, 0.0],
[16, 0.0, 0.01, 0.0],
[16, 0.0, 0.0, 0.01],
[16, 0.0, 0.0, -0.01],
[32, 0.01, 0.0, 0.0],
[32, 0.0, 0.01, 0.0],
[32, 0.0, -0.01, 0.0],
[32, 0.0, 0.0, 0.01],
[32, 0.0, 0.0, -0.01],
[40, 0.01, 0.0, 0.0],
[40, 0.0, 0.01, 0.0],
[40, 0.0, 0.0, 0.01],
[40, 0.0, 0.0, -0.01],
]
np.testing.assert_allclose(ph_tipn3.displacements, disp_ref, atol=1e-8)
ph_tipn3.generate_displacements()
disp_gen = [
[0, 0.006370194270018462, 0.006021020526083804, 0.00481330829956917],
[0, -0.006370194270018462, -0.006021020526083804, -0.00481330829956917],
[16, 0.006370194270018462, 0.006021020526083804, 0.00481330829956917],
[16, -0.006370194270018462, -0.006021020526083804, -0.00481330829956917],
[32, 0.007267439570389398, 0.0068690845162028965, 0.0],
[32, -0.007267439570389398, -0.0068690845162028965, 0.0],
[32, 0.0, 0.0, 0.01],
[32, 0.0, 0.0, -0.01],
[40, 0.006370194270018462, 0.006021020526083804, 0.00481330829956917],
[40, -0.006370194270018462, -0.006021020526083804, -0.00481330829956917],
]
np.testing.assert_allclose(ph_tipn3.displacements, disp_gen, atol=1e-8)
ph_tipn3.dataset = dataset
|
1,298 |
get private link for azure ad
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetPrivateLinkForAzureAdResult',
'AwaitableGetPrivateLinkForAzureAdResult',
'get_private_link_for_azure_ad',
'get_private_link_for_azure_ad_output',
]
@pulumi.output_type
class GetPrivateLinkForAzureAdResult:
"""
PrivateLink Policy configuration object.
"""
def __init__(__self__, all_tenants=None, id=None, name=None, owner_tenant_id=None, resource_group=None, resource_name=None, subscription_id=None, tags=None, tenants=None, type=None):
if all_tenants and not isinstance(all_tenants, bool):
raise TypeError("Expected argument 'all_tenants' to be a bool")
pulumi.set(__self__, "all_tenants", all_tenants)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if owner_tenant_id and not isinstance(owner_tenant_id, str):
raise TypeError("Expected argument 'owner_tenant_id' to be a str")
pulumi.set(__self__, "owner_tenant_id", owner_tenant_id)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if resource_name and not isinstance(resource_name, str):
raise TypeError("Expected argument 'resource_name' to be a str")
pulumi.set(__self__, "resource_name", resource_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenants and not isinstance(tenants, list):
raise TypeError("Expected argument 'tenants' to be a list")
pulumi.set(__self__, "tenants", tenants)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allTenants")
def all_tenants(self) -> Optional[bool]:
"""
Flag indicating whether all tenants are allowed
"""
return pulumi.get(self, "all_tenants")
@property
@pulumi.getter
def id(self) -> str:
"""
String Id used to locate any resource on Azure.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of this resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownerTenantId")
def owner_tenant_id(self) -> Optional[str]:
"""
Guid of the owner tenant
"""
return pulumi.get(self, "owner_tenant_id")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
Name of the resource group
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[str]:
"""
Name of the private link policy resource
"""
return pulumi.get(self, "resource_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription Identifier
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def tenants(self) -> Optional[Sequence[str]]:
"""
The list of tenantIds.
"""
return pulumi.get(self, "tenants")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of this resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkForAzureAdResult(GetPrivateLinkForAzureAdResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkForAzureAdResult(
all_tenants=self.all_tenants,
id=self.id,
name=self.name,
owner_tenant_id=self.owner_tenant_id,
resource_group=self.resource_group,
resource_name=self.resource_name,
subscription_id=self.subscription_id,
tags=self.tags,
tenants=self.tenants,
type=self.type)
def METHOD_NAME(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkForAzureAdResult:
"""
Gets a private link policy with a given name.
:param str policy_name: The name of the private link policy in Azure AD.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:aadiam/v20200301preview:getPrivateLinkForAzureAd', __args__, opts=opts, typ=GetPrivateLinkForAzureAdResult).value
return AwaitableGetPrivateLinkForAzureAdResult(
all_tenants=pulumi.get(__ret__, 'all_tenants'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
owner_tenant_id=pulumi.get(__ret__, 'owner_tenant_id'),
resource_group=pulumi.get(__ret__, 'resource_group'),
resource_name=pulumi.get(__ret__, 'resource_name'),
subscription_id=pulumi.get(__ret__, 'subscription_id'),
tags=pulumi.get(__ret__, 'tags'),
tenants=pulumi.get(__ret__, 'tenants'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_private_link_for_azure_ad_output(policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkForAzureAdResult]:
"""
Gets a private link policy with a given name.
:param str policy_name: The name of the private link policy in Azure AD.
:param str resource_group_name: Name of an Azure resource group.
"""
...
|
1,299 |
test 02 class methods
|
"""
This test file tests the lib.tokens.yubicotoken
This depends on lib.tokenclass
"""
from .base import MyTestCase
from privacyidea.lib.tokens.yubicotoken import (YubicoTokenClass, YUBICO_URL)
from privacyidea.models import Token
import responses
import json
from privacyidea.lib.config import set_privacyidea_config
class YubicoTokenTestCase(MyTestCase):
otppin = "topsecret"
serial1 = "ser1"
params1 = {"yubico.tokenid": "vvbgidlghkhgfbvetefnbrfibfctu"}
success_body = """h=Ndk+Lx678Tb4nZjPCi1+geki9vU=
t=2015-01-28T15:22:57Z0508
otp=vvbgidlghkhgndujklhhudbcuttkcklhvjktrjbfukrt
nonce=5e1cdbcbb798af7445b60376aaf2c17b2f064f41
sl=25
status=OK"""
fail_body = """h=3+BO86TdIuhg1gFpLj+PDyyxxu4=
t=2015-01-28T15:27:01Z0978
otp=vvbgidlghkhgndujklhhudbcuttkcklhvjktrjbfukrt
nonce=fbbfd6fead1f16372b493e7515396363cec90c00
status=REPLAYED_OTP"""
def test_01_create_token(self):
db_token = Token(self.serial1, tokentype="remote")
db_token.save()
token = YubicoTokenClass(db_token)
token.update(self.params1)
token.set_pin(self.otppin)
self.assertTrue(token.token.serial == self.serial1, token)
self.assertTrue(token.token.tokentype == "yubico",
token.token.tokentype)
self.assertTrue(token.type == "yubico", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "UBCM", class_prefix)
self.assertTrue(token.get_class_type() == "yubico", token)
def METHOD_NAME(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
info = token.get_class_info()
self.assertTrue(info.get("title") == "Yubico Token",
"{0!s}".format(info.get("title")))
info = token.get_class_info("title")
self.assertTrue(info == "Yubico Token", info)
@responses.activate
def test_04_check_otp_success(self):
responses.add(responses.GET, YUBICO_URL,
body=self.success_body)
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
otpcount = token.check_otp("vvbgidlghkhgndujklhhudbcuttkcklhvjktrjrt")
# Nonce and hash do not match
self.assertTrue(otpcount == -2, otpcount)
@responses.activate
def test_04_check_otp_success_with_post_request(self):
set_privacyidea_config("yubico.do_post", True)
responses.add(responses.POST, YUBICO_URL,
body=self.success_body)
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
otpcount = token.check_otp("vvbgidlghkhgndujklhhudbcuttkcklhvjktrjrt")
# Nonce and hash do not match
self.assertTrue(otpcount == -2, otpcount)
set_privacyidea_config("yubico.do_post", False)
@responses.activate
def test_05_check_otp_fail(self):
responses.add(responses.POST, YUBICO_URL,
body=self.fail_body)
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
otpcount = token.check_otp("vvbgidlghkhgndujklhhudbcuttkcklhvjktrjrt")
# Status != "OK".
self.assertTrue(otpcount == -1, otpcount)
def test_06_check_otp_ID_too_short(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
otpcount = token.check_otp("vvbgidlg")
self.assertTrue(otpcount == -1, otpcount)
def test_07_check_otp_ID_wrong(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = YubicoTokenClass(db_token)
otpcount = token.check_otp("Xvbgidlghkhgndujklhhudbcuttkcklhvjktrjrt")
self.assertTrue(otpcount == -1, otpcount)
def test_08_init_ID_too_short(self):
db_token = Token("neuYT", tokentype="remote")
db_token.save()
token = YubicoTokenClass(db_token)
self.assertRaises(Exception, token.update,
{"yubico.tokenid": "vvbgidlg"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.