id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,200 |
test kill job
|
#!/bin/env python
"""
tests for HTCondorCEComputingElement module
"""
import uuid
import pytest
from DIRAC.Resources.Computing import HTCondorCEComputingElement as HTCE
from DIRAC.Resources.Computing.BatchSystems import Condor
from DIRAC import S_OK
MODNAME = "DIRAC.Resources.Computing.HTCondorCEComputingElement"
STATUS_LINES = """
123.2 5
123.1 3
""".strip().split(
"\n"
)
HISTORY_LINES = """
123 0 4
""".strip().split(
"\n"
)
@pytest.fixture
def setUp():
return {"Queue": "espresso", "GridEnv": "/dev/null"}
def test_parseCondorStatus():
statusLines = """
104097.9 2
104098.0 1
104098.1 4
foo bar
104098.2 3
104098.3 5
104098.4 7
""".strip().split(
"\n"
)
# force there to be an empty line
expectedResults = {
"104097.9": "Running",
"104098.0": "Waiting",
"104098.1": "Done",
"104098.2": "Aborted",
"104098.3": "HELD",
"104098.4": "Unknown",
}
for jobID, expected in expectedResults.items():
assert HTCE.parseCondorStatus(statusLines, jobID) == expected
def test_getJobStatus(mocker):
"""Test HTCondorCE getJobStatus"""
mocker.patch(
MODNAME + ".HTCondorCEComputingElement._executeCondorCommand",
side_effect=[
S_OK((0, "\n".join(STATUS_LINES), "")),
S_OK((0, "\n".join(HISTORY_LINES), "")),
S_OK((0, "", "")),
],
)
mocker.patch(MODNAME + ".HTCondorCEComputingElement._HTCondorCEComputingElement__cleanup")
htce = HTCE.HTCondorCEComputingElement(12345)
ret = htce.getJobStatus(
[
"htcondorce://condorce.foo.arg/123.0:::abc321",
"htcondorce://condorce.foo.arg/123.1:::c3b2a1",
"htcondorce://condorce.foo.arg/123.2:::c3b2a2",
"htcondorce://condorce.foo.arg/333.3:::c3b2a3",
]
)
expectedResults = {
"htcondorce://condorce.foo.arg/123.0": "Done",
"htcondorce://condorce.foo.arg/123.1": "Aborted",
"htcondorce://condorce.foo.arg/123.2": "Aborted",
"htcondorce://condorce.foo.arg/333.3": "Unknown",
}
assert ret["OK"] is True
assert expectedResults == ret["Value"]
def test_getJobStatusBatchSystem(mocker):
"""Test Condor Batch System plugin getJobStatus"""
patchPopen = mocker.patch("DIRAC.Resources.Computing.BatchSystems.Condor.subprocess.Popen")
patchPopen.return_value.communicate.side_effect = [("\n".join(STATUS_LINES), ""), ("\n".join(HISTORY_LINES), "")]
patchPopen.return_value.returncode = 0
ret = Condor.Condor().getJobStatus(JobIDList=["123.0", "123.1", "123.2", "333.3"])
expectedResults = {
"123.0": "Done",
"123.1": "Aborted",
"123.2": "Unknown", # HELD is treated as Unknown
"333.3": "Unknown",
}
assert ret["Status"] == 0
assert expectedResults == ret["Jobs"]
@pytest.mark.parametrize(
"localSchedd, optionsNotExpected, optionsExpected",
[
(False, ["ShouldTransferFiles = YES", "WhenToTransferOutput = ON_EXIT_OR_EVICT"], ["universe = vanilla"]),
(True, [], ["ShouldTransferFiles = YES", "WhenToTransferOutput = ON_EXIT_OR_EVICT", "universe = grid"]),
],
)
def test__writeSub(mocker, localSchedd, optionsNotExpected, optionsExpected):
htce = HTCE.HTCondorCEComputingElement(12345)
htce.useLocalSchedd = localSchedd
subFileMock = mocker.Mock()
mocker.patch(MODNAME + ".os.fdopen", return_value=subFileMock)
mocker.patch(MODNAME + ".tempfile.mkstemp", return_value=("os", "pilotName"))
mocker.patch(MODNAME + ".mkDir")
jobStamps = []
commonJobStampPart = uuid.uuid4().hex[:3]
for _i in range(42):
jobStamp = commonJobStampPart + uuid.uuid4().hex[:29]
jobStamps.append(jobStamp)
htce._HTCondorCEComputingElement__writeSub("dirac-install", 42, "", 1, jobStamps) # pylint: disable=E1101
for option in optionsNotExpected:
# the three [0] are: call_args_list[firstCall][ArgsArgumentsTuple][FirstArgsArgument]
assert option not in subFileMock.write.call_args_list[0][0][0]
for option in optionsExpected:
assert option in subFileMock.write.call_args_list[0][0][0]
@pytest.mark.parametrize(
"localSchedd, expected", [(False, "-pool condorce.cern.ch:9619 -name condorce.cern.ch"), (True, "")]
)
def test_reset(setUp, localSchedd, expected):
ceParameters = setUp
htce = HTCE.HTCondorCEComputingElement(12345)
htce.ceParameters = ceParameters
htce.useLocalSchedd = True
ceName = "condorce.cern.ch"
htce.ceName = ceName
htce._reset()
assert htce.remoteScheddOptions == ""
@pytest.mark.parametrize(
"localSchedd, expected",
[
(False, "condor_submit -terse -pool condorce.cern.ch:9619 -remote condorce.cern.ch dirac_pilot"),
(True, "condor_submit -terse dirac_pilot"),
],
)
def test_submitJob(setUp, mocker, localSchedd, expected):
ceParameters = setUp
htce = HTCE.HTCondorCEComputingElement(12345)
htce.ceParameters = ceParameters
htce.useLocalSchedd = localSchedd
ceName = "condorce.cern.ch"
htce.ceName = ceName
execMock = mocker.patch(
MODNAME + ".HTCondorCEComputingElement._executeCondorCommand", return_value=S_OK((0, "123.0 - 123.0", ""))
)
mocker.patch(
MODNAME + ".HTCondorCEComputingElement._HTCondorCEComputingElement__writeSub", return_value="dirac_pilot"
)
mocker.patch(MODNAME + ".os")
result = htce.submitJob("pilot", "proxy", 1)
assert result["OK"] is True
assert " ".join(execMock.call_args_list[0][0][0]) == expected
@pytest.mark.parametrize(
"jobIDList, jobID, ret, success, local",
[
([], "", 0, True, True),
("", "", 0, True, True),
(["htcondorce://condorce.foo.arg/123.0:::abc321"], "123.0", 0, True, True),
("htcondorce://condorce.foo.arg/123.0:::abc321", "123.0", 0, True, True),
("htcondorce://condorce.foo.arg/123.0:::abc321", "123.0", 1, False, True),
(["htcondorce://condorce.foo.arg/333.3"], "333.3", 0, True, True),
("htcondorce://condorce.foo.arg/333.3", "333.3", 0, True, False),
],
)
def METHOD_NAME(setUp, mocker, jobIDList, jobID, ret, success, local):
ceParameters = setUp
htce = HTCE.HTCondorCEComputingElement(12345)
htce.ceName = "condorce.foo.arg"
htce.useLocalSchedd = local
htce.ceParameters = ceParameters
htce._reset()
execMock = mocker.patch(
MODNAME + ".HTCondorCEComputingElement._executeCondorCommand", return_value=S_OK((ret, "", ""))
)
ret = htce.killJob(jobIDList=jobIDList)
assert ret["OK"] == success
if jobID:
expected = f"condor_rm {htce.remoteScheddOptions.strip()} {jobID}"
assert " ".join(execMock.call_args_list[0][0][0]) == expected
|
2,201 |
resolve definition
|
"""Core utilities and constants."""
import inspect
import os
import re
from typing import Optional, Tuple
from ..cache.base import BaseCache
from ..core.profile import Profile
from ..messaging.agent_message import AgentMessage
from ..utils.classloader import ClassLoader
from .error import ProtocolMinorVersionNotSupported, ProtocolDefinitionValidationError
CORE_EVENT_PREFIX = "acapy::core::"
STARTUP_EVENT_TOPIC = CORE_EVENT_PREFIX + "startup"
STARTUP_EVENT_PATTERN = re.compile(f"^{STARTUP_EVENT_TOPIC}?$")
SHUTDOWN_EVENT_TOPIC = CORE_EVENT_PREFIX + "shutdown"
SHUTDOWN_EVENT_PATTERN = re.compile(f"^{SHUTDOWN_EVENT_TOPIC}?$")
WARNING_DEGRADED_FEATURES = "version-with-degraded-features"
WARNING_VERSION_MISMATCH = "fields-ignored-due-to-version-mismatch"
WARNING_VERSION_NOT_SUPPORTED = "version-not-supported"
async def validate_get_response_version(
profile: Profile, rec_version: str, msg_class: type
) -> Tuple[str, Optional[str]]:
"""Return a tuple with version to respond with and warnings.
Process received version and protocol version definition,
returns the tuple.
Args:
profile: Profile
rec_version: received version from message
msg_class: type
Returns:
Tuple with response version and any warnings
"""
resp_version = rec_version
warning = None
version_string_tokens = rec_version.split(".")
rec_major_version = int(version_string_tokens[0])
rec_minor_version = int(version_string_tokens[1])
version_definition = await get_version_def_from_msg_class(
profile, msg_class, rec_major_version
)
proto_major_version = int(version_definition["major_version"])
proto_curr_minor_version = int(version_definition["current_minor_version"])
proto_min_minor_version = int(version_definition["minimum_minor_version"])
if rec_minor_version < proto_min_minor_version:
warning = WARNING_VERSION_NOT_SUPPORTED
elif (
rec_minor_version >= proto_min_minor_version
and rec_minor_version < proto_curr_minor_version
):
warning = WARNING_DEGRADED_FEATURES
elif rec_minor_version > proto_curr_minor_version:
warning = WARNING_VERSION_MISMATCH
if proto_major_version == rec_major_version:
if (
proto_min_minor_version <= rec_minor_version
and proto_curr_minor_version >= rec_minor_version
):
resp_version = f"{str(proto_major_version)}.{str(rec_minor_version)}"
elif rec_minor_version > proto_curr_minor_version:
resp_version = f"{str(proto_major_version)}.{str(proto_curr_minor_version)}"
elif rec_minor_version < proto_min_minor_version:
raise ProtocolMinorVersionNotSupported(
"Minimum supported minor version is "
+ f"{proto_min_minor_version}."
+ f" Received {rec_minor_version}."
)
else:
raise ProtocolMinorVersionNotSupported(
f"Supported major version {proto_major_version}"
" is not same as received major version"
f" {rec_major_version}."
)
return (resp_version, warning)
def get_version_from_message_type(msg_type: str) -> str:
"""Return version from provided message_type."""
return (re.search(r"(\d+\.)?(\*|\d+)", msg_type)).group()
def get_version_from_message(msg: AgentMessage) -> str:
"""Return version from provided AgentMessage."""
msg_type = msg._type
return get_version_from_message_type(msg_type)
async def get_proto_default_version_from_msg_class(
profile: Profile, msg_class: type, major_version: int = 1
) -> str:
"""Return default protocol version from version_definition."""
version_definition = await get_version_def_from_msg_class(
profile, msg_class, major_version
)
return _get_default_version_from_version_def(version_definition)
def get_proto_default_version(def_path: str, major_version: int = 1) -> str:
"""Return default protocol version from version_definition."""
version_definition = _get_version_def_from_path(def_path, major_version)
return _get_default_version_from_version_def(version_definition)
def METHOD_NAME(search_path: str, msg_class: type) -> str:
try:
path = os.path.normpath(inspect.getfile(msg_class))
path = search_path + path.rsplit(search_path, 1)[1]
version = (re.search(r"v(\d+\_)?(\*|\d+)", path)).group()
path = path.split(version, 1)[0]
definition_path = (path.replace("/", ".")) + "definition"
if ClassLoader.load_module(definition_path):
return definition_path
except Exception:
# we expect some exceptions resolving paths
pass
def _get_path_from_msg_class(msg_class: type) -> str:
search_paths = ["aries_cloudagent", msg_class.__module__.split(".", 1)[0]]
if os.getenv("ACAPY_HOME"):
search_paths.insert(os.getenv("ACAPY_HOME"), 0)
definition_path = None
searches = 0
while not definition_path and searches < len(search_paths):
definition_path = METHOD_NAME(search_paths[searches], msg_class)
searches = searches + 1
# we could throw an exception here,
return definition_path
def _get_version_def_from_path(definition_path: str, major_version: int = 1):
version_definition = None
definition = ClassLoader.load_module(definition_path)
for protocol_version in definition.versions:
if major_version == protocol_version["major_version"]:
version_definition = protocol_version
break
return version_definition
def _get_default_version_from_version_def(version_definition) -> str:
default_major_version = version_definition["major_version"]
default_minor_version = version_definition["current_minor_version"]
return f"{default_major_version}.{default_minor_version}"
async def get_version_def_from_msg_class(
profile: Profile, msg_class: type, major_version: int = 1
):
"""Return version_definition of a protocol from msg_class."""
cache = profile.inject_or(BaseCache)
version_definition = None
if cache:
version_definition = await cache.get(
f"version_definition::{str(msg_class).lower()}"
)
if version_definition:
return version_definition
definition_path = _get_path_from_msg_class(msg_class)
version_definition = _get_version_def_from_path(definition_path, major_version)
if not version_definition:
raise ProtocolDefinitionValidationError(
f"Unable to load protocol version_definition for {str(msg_class)}"
)
if cache:
await cache.set(
f"version_definition::{str(msg_class).lower()}", version_definition
)
return version_definition
|
2,202 |
set up
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Module for testing the satpy.readers.hdf5_utils module."""
import os
import unittest
import numpy as np
try:
from satpy.readers.hdf5_utils import HDF5FileHandler
except ImportError:
# fake the import so we can at least run the tests in this file
HDF5FileHandler = object # type: ignore
class FakeHDF5FileHandler(HDF5FileHandler):
"""Swap HDF5 File Handler for reader tests to use."""
def __init__(self, filename, filename_info, filetype_info, **kwargs):
"""Get fake file content from 'get_test_content'."""
if HDF5FileHandler is object:
raise ImportError("Base 'HDF5FileHandler' could not be "
"imported.")
filename = str(filename)
super(HDF5FileHandler, self).__init__(filename, filename_info, filetype_info)
self.file_content = self.get_test_content(filename, filename_info, filetype_info)
self.file_content.update(kwargs)
def get_test_content(self, filename, filename_info, filetype_info):
"""Mimic reader input file content.
Args:
filename (str): input filename
filename_info (dict): Dict of metadata pulled from filename
filetype_info (dict): Dict of metadata from the reader's yaml config for this file type
Returns: dict of file content with keys like:
- 'dataset'
- '/attr/global_attr'
- 'dataset/attr/global_attr'
- 'dataset/shape'
"""
raise NotImplementedError("Fake File Handler subclass must implement 'get_test_content'")
class TestHDF5FileHandler(unittest.TestCase):
"""Test HDF5 File Handler Utility class."""
def METHOD_NAME(self):
"""Create a test HDF5 file."""
import h5py
h = h5py.File('test.h5', 'w')
# Create Group
g1 = h.create_group('test_group')
# Add datasets
ds1_f = g1.create_dataset('ds1_f',
shape=(10, 100),
dtype=np.float32,
data=np.arange(10. * 100).reshape((10, 100)))
ds1_i = g1.create_dataset('ds1_i',
shape=(10, 100),
dtype=np.int32,
data=np.arange(10 * 100).reshape((10, 100)))
ds2_f = h.create_dataset('ds2_f',
shape=(10, 100),
dtype=np.float32,
data=np.arange(10. * 100).reshape((10, 100)))
ds2_i = h.create_dataset('ds2_i',
shape=(10, 100),
dtype=np.int32,
data=np.arange(10 * 100).reshape((10, 100)))
# Add attributes
# shows up as a scalar array of bytes (shape=(), size=1)
h.attrs['test_attr_str'] = 'test_string'
h.attrs['test_attr_byte'] = b'test_byte'
h.attrs['test_attr_int'] = 0
h.attrs['test_attr_float'] = 1.2
# shows up as a numpy bytes object
h.attrs['test_attr_str_arr'] = np.array(b"test_string2")
g1.attrs['test_attr_str'] = 'test_string'
g1.attrs['test_attr_byte'] = b'test_byte'
g1.attrs['test_attr_int'] = 0
g1.attrs['test_attr_float'] = 1.2
for d in [ds1_f, ds1_i, ds2_f, ds2_i]:
d.attrs['test_attr_str'] = 'test_string'
d.attrs['test_attr_byte'] = b'test_byte'
d.attrs['test_attr_int'] = 0
d.attrs['test_attr_float'] = 1.2
d.attrs['test_ref'] = d.ref
self.var_attrs = list(d.attrs.keys())
h.close()
def tearDown(self):
"""Remove the previously created test file."""
os.remove('test.h5')
def test_all_basic(self):
"""Test everything about the HDF5 class."""
import xarray as xr
from satpy.readers.hdf5_utils import HDF5FileHandler
file_handler = HDF5FileHandler('test.h5', {}, {})
for ds_name in ('test_group/ds1_f', 'test_group/ds1_i', 'ds2_f', 'ds2_i'):
ds = file_handler[ds_name]
attrs = ds.attrs
self.assertEqual(ds.dtype, np.float32 if ds_name.endswith('f') else np.int32)
self.assertTupleEqual(file_handler[ds_name + '/shape'], (10, 100))
self.assertEqual(attrs['test_attr_str'], 'test_string')
self.assertEqual(attrs['test_attr_byte'], 'test_byte')
self.assertEqual(attrs['test_attr_int'], 0)
self.assertEqual(attrs['test_attr_float'], 1.2)
self.assertEqual(file_handler[ds_name + '/attr/test_attr_str'], 'test_string')
self.assertEqual(file_handler[ds_name + '/attr/test_attr_byte'], 'test_byte')
self.assertEqual(file_handler[ds_name + '/attr/test_attr_int'], 0)
self.assertEqual(file_handler[ds_name + '/attr/test_attr_float'], 1.2)
self.assertEqual(file_handler['/attr/test_attr_str'], 'test_string')
self.assertEqual(file_handler['/attr/test_attr_byte'], 'test_byte')
self.assertEqual(file_handler['/attr/test_attr_str_arr'], 'test_string2')
self.assertEqual(file_handler['/attr/test_attr_int'], 0)
self.assertEqual(file_handler['/attr/test_attr_float'], 1.2)
self.assertIsInstance(file_handler.get('ds2_f'), xr.DataArray)
self.assertIsNone(file_handler.get('fake_ds'))
self.assertEqual(file_handler.get('fake_ds', 'test'), 'test')
self.assertTrue('ds2_f' in file_handler)
self.assertFalse('fake_ds' in file_handler)
self.assertIsInstance(file_handler['ds2_f/attr/test_ref'], np.ndarray)
|
2,203 |
set visibility
|
from collections import OrderedDict
from typing import Iterable, TYPE_CHECKING, Callable, List, Tuple, Dict, Union, TypeVar, Any
from gi.repository import Gio, GLib, Pango
from blueman.main.DbusService import DbusService
from blueman.main.Tray import BluemanTray
from blueman.main.indicators.IndicatorInterface import IndicatorInterface, IndicatorNotAvailable
if TYPE_CHECKING:
from blueman.plugins.applet.Menu import MenuItemDict, SubmenuItemDict
from blueman.main.indicators.GtkStatusIcon import MenuItemActivator
class MenuService(DbusService):
def __init__(self, on_activate_menu_item: "MenuItemActivator") -> None:
super().__init__(None, "com.canonical.dbusmenu", "/org/blueman/sni/menu", Gio.BusType.SESSION)
self._items: OrderedDict[int, "MenuItemDict"] = OrderedDict()
self._revision = 0
self._revision_advertised = -1
self._on_activate = on_activate_menu_item
self.add_method("GetLayout", ("i", "i", "as"), ("u", "(ia{sv}av)"), self._get_layout)
self.add_method("Event", ("i", "s", "v", "u"), (), self._on_event)
self.add_method("AboutToShow", ("i",), ("b",), lambda _: self._revision > self._revision_advertised)
self.add_method("GetGroupProperties", ("ai", "as"), ("a(ia{sv})",),
lambda ids, props: [(idx, self._render_item(item)) for idx, item in self._iterate_items()
if idx in ids])
self.add_signal("LayoutUpdated", ("u", "i"))
GLib.timeout_add(100, self._advertise_revision)
def set_items(self, items: Iterable["MenuItemDict"]) -> None:
self._items = OrderedDict((item["id"], item) for item in items)
self._revision += 1
def _advertise_revision(self) -> bool:
if self._revision != self._revision_advertised:
self.emit_signal("LayoutUpdated", self._revision, 0)
self._revision_advertised = self._revision
return True
def _get_layout(self, parent_id: int, _recursion_depth: int, _property_names: List[str]
) -> Tuple[int, Tuple[int, Dict[str, GLib.Variant], List[GLib.Variant]]]:
if parent_id == 0:
return self._revision, (0, {}, self._render_menu(((item["id"] << 8, item) for item in self._items.values()),
self._render_submenu))
else:
item = self._items[parent_id >> 8]
if "submenu" in item and _recursion_depth != 0:
return self._revision, (parent_id, self._render_item(item), self._render_submenu(item, parent_id))
return self._revision, (parent_id, self._render_item(item), [])
def _render_submenu(self, item: "MenuItemDict", idx: int) -> List[GLib.Variant]:
if "submenu" in item:
return self._render_menu(enumerate(item["submenu"], idx + 1), lambda _item, _isx: [])
else:
return []
_T = TypeVar("_T", bound="SubmenuItemDict")
def _render_menu(self, items: Iterable[Tuple[int, _T]], submenu_callback: Callable[[_T, int], List[GLib.Variant]]
) -> List[GLib.Variant]:
return [GLib.Variant("(ia{sv}av)", (idx, self._render_item(item), submenu_callback(item, idx)))
for (idx, item) in items]
def _iterate_items(self) -> Iterable[Tuple[int, "SubmenuItemDict"]]:
for item in self._items.values():
yield item["id"] << 8, item
if "submenu" in item:
yield from enumerate(item["submenu"], (item["id"] << 8) + 1)
def _render_item(self, item: Union["MenuItemDict", "SubmenuItemDict"]) -> Dict[str, GLib.Variant]:
if "text" in item and "icon_name" in item:
label = Pango.parse_markup(item["text"], -1, "\0")[2] if item.get("markup", False) else item["text"]
props = {
"label": GLib.Variant("s", label),
"icon-name": GLib.Variant("s", item["icon_name"]),
"enabled": GLib.Variant("b", item["sensitive"]),
}
if "submenu" in item:
props["children-display"] = GLib.Variant("s", "submenu")
return props
else:
return {"type": GLib.Variant("s", "separator")}
def _on_event(self, idx: int, event_id: str, _data: GLib.Variant, _timestamp: int) -> None:
if event_id == "clicked":
if idx % (1 << 8) == 0:
self._on_activate(idx >> 8)
else:
self._on_activate(idx >> 8, idx % (1 << 8) - 1)
class StatusNotifierItemService(DbusService):
Category = "Hardware"
Id = "blueman"
Title = "blueman"
ItemIsMenu = False
def __init__(self, tray: BluemanTray, icon_name: str) -> None:
super().__init__(None, "org.kde.StatusNotifierItem", "/org/blueman/sni", Gio.BusType.SESSION,
{"Category": "s", "Id": "s", "IconName": "s", "Status": "s", "Title": "s",
"ToolTip": "(sa(iiay)ss)", "Menu": "o", "ItemIsMenu": "b"})
self.add_method("Activate", ("i", "i"), "", lambda x, y: tray.activate_status_icon())
self.menu = MenuService(tray.activate_menu_item)
self.IconName = icon_name
self.Status = "Active"
self.ToolTip: Tuple[str, List[Tuple[int, int, List[int]]], str, str] = ("", [], "", "")
self.Menu = "/org/blueman/sni/menu"
self.add_signal("NewIcon", "")
self.add_signal("NewStatus", "s")
self.add_signal("NewToolTip", "")
def register(self) -> None:
self.menu.register()
super().register()
def unregister(self) -> None:
super().unregister()
self.menu.unregister()
class StatusNotifierItem(IndicatorInterface):
_SNI_BUS_NAME = _SNI_INTERFACE_NAME = "org.kde.StatusNotifierWatcher"
def __init__(self, tray: BluemanTray, icon_name: str) -> None:
self._sni = StatusNotifierItemService(tray, icon_name)
self._sni.register()
self._bus = Gio.bus_get_sync(Gio.BusType.SESSION)
watcher_expected: bool
def on_watcher_appeared(*args: Any) -> None:
nonlocal watcher_expected
if watcher_expected:
watcher_expected = False
else:
tray.activate()
Gio.bus_watch_name(Gio.BusType.SESSION, self._SNI_BUS_NAME, Gio.BusNameWatcherFlags.NONE,
on_watcher_appeared, None)
try:
Gio.bus_get_sync(Gio.BusType.SESSION).call_sync(
self._SNI_BUS_NAME, "/StatusNotifierWatcher", self._SNI_INTERFACE_NAME,
"RegisterStatusNotifierItem", GLib.Variant("(s)", ("/org/blueman/sni",)),
None, Gio.DBusCallFlags.NONE, -1)
watcher_expected = True
except GLib.Error:
watcher_expected = False
raise IndicatorNotAvailable
def set_icon(self, icon_name: str) -> None:
self._sni.IconName = icon_name
self._sni.emit_signal("NewIcon")
def set_tooltip_title(self, title: str) -> None:
self._sni.ToolTip = ("", [], title, self._sni.ToolTip[3])
self._sni.emit_signal("NewToolTip")
def set_tooltip_text(self, text: str) -> None:
self._sni.ToolTip = ("", [], self._sni.ToolTip[2], text)
self._sni.emit_signal("NewToolTip")
def METHOD_NAME(self, visible: bool) -> None:
self._sni.Status = status = "Active" if visible else "Passive"
self._sni.emit_signal("NewStatus", status)
def set_menu(self, menu: Iterable["MenuItemDict"]) -> None:
self._sni.menu.set_items(menu)
|
2,204 |
serialization type
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
from typing import Dict, Any, Optional, TYPE_CHECKING
from .base import BaseType
from .imports import FileImport, ImportType, TypingSection
from .primitive_types import IntegerType, BinaryType, StringType, BooleanType
from .utils import add_to_description
if TYPE_CHECKING:
from .code_model import CodeModel
_LOGGER = logging.getLogger(__name__)
class ConstantType(BaseType):
"""Schema for constants that will be serialized.
:param yaml_data: the yaml data for this schema
:type yaml_data: dict[str, Any]
:param str value: The actual value of this constant.
:param schema: The schema for the value of this constant.
:type schema: ~autorest.models.PrimitiveType
"""
def __init__(
self,
yaml_data: Dict[str, Any],
code_model: "CodeModel",
value_type: BaseType,
value: Optional[str],
) -> None:
super().__init__(yaml_data=yaml_data, code_model=code_model)
self.value_type = value_type
self.value = value
def get_declaration(self, value=None):
if value and value != self.value:
_LOGGER.warning(
"Passed in value of %s differs from constant value of %s. Choosing constant value",
str(value),
str(self.value),
)
if self.value is None:
return "None"
return self.value_type.get_declaration(self.value)
def description(self, *, is_operation_file: bool) -> str:
if is_operation_file:
return ""
return add_to_description(
self.yaml_data.get("description", ""),
f"Default value is {self.get_declaration()}.",
)
@property
def METHOD_NAME(self) -> str:
"""Returns the serialization value for msrest.
:return: The serialization value for msrest
:rtype: str
"""
return self.value_type.METHOD_NAME
def docstring_text(self, **kwargs: Any) -> str:
return "constant"
def docstring_type(self, **kwargs: Any) -> str:
"""The python type used for RST syntax input and type annotation.
:param str namespace: Optional. The namespace for the models.
"""
return self.value_type.docstring_type(**kwargs)
def type_annotation(self, **kwargs: Any) -> str:
return (
f"Literal[{self.get_declaration()}]"
if self._is_literal
else self.value_type.type_annotation(**kwargs)
)
@property
def _is_literal(self) -> bool:
return isinstance(
self.value_type, (IntegerType, BinaryType, StringType, BooleanType)
)
@classmethod
def from_yaml(
cls, yaml_data: Dict[str, Any], code_model: "CodeModel"
) -> "ConstantType":
"""Constructs a ConstantType from yaml data.
:param yaml_data: the yaml data from which we will construct this schema
:type yaml_data: dict[str, Any]
:return: A created ConstantType
:rtype: ~autorest.models.ConstantType
"""
from . import build_type
return cls(
yaml_data=yaml_data,
code_model=code_model,
value_type=build_type(yaml_data["valueType"], code_model),
value=yaml_data["value"],
)
def get_json_template_representation(
self,
*,
optional: bool = True,
# pylint: disable=unused-argument
client_default_value_declaration: Optional[str] = None,
description: Optional[str] = None,
) -> Any:
return self.value_type.get_json_template_representation(
optional=optional,
client_default_value_declaration=self.get_declaration(),
description=description,
)
def _imports_shared(self, **kwargs: Any):
file_import = FileImport()
file_import.merge(self.value_type.imports(**kwargs))
return file_import
def imports_for_multiapi(self, **kwargs: Any) -> FileImport:
return self._imports_shared(**kwargs)
def imports(self, **kwargs: Any) -> FileImport:
file_import = self._imports_shared(**kwargs)
if self._is_literal:
file_import.add_import("sys", ImportType.STDLIB)
file_import.add_submodule_import(
"typing_extensions",
"Literal",
ImportType.BYVERSION,
TypingSection.REGULAR,
None,
(
(
(3, 8),
"typing",
"pylint: disable=no-name-in-module, ungrouped-imports",
),
),
)
return file_import
@property
def instance_check_template(self) -> str:
return self.value_type.instance_check_template
|
2,205 |
test apply
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
import scipy.linalg as spla
from pymor.operators.block import BlockDiagonalOperator, BlockOperator
from pymor.operators.numpy import NumpyMatrixOperator
from pymor.vectorarrays.block import BlockVectorSpace
from pymor.vectorarrays.numpy import NumpyVectorSpace
pytestmark = pytest.mark.builtin
def METHOD_NAME():
A11 = np.random.randn(2, 3)
A12 = np.random.randn(2, 4)
A21 = np.zeros((5, 3))
A22 = np.random.randn(5, 4)
A = np.vstack((np.hstack((A11, A12)),
np.hstack((A21, A22))))
A11op = NumpyMatrixOperator(A11)
A12op = NumpyMatrixOperator(A12)
A22op = NumpyMatrixOperator(A22)
Aop = BlockOperator(np.array([[A11op, A12op], [None, A22op]]))
v1 = np.random.randn(3)
v2 = np.random.randn(4)
v = np.hstack((v1, v2))
v1va = NumpyVectorSpace.from_numpy(v1)
v2va = NumpyVectorSpace.from_numpy(v2)
vva = BlockVectorSpace.make_array((v1va, v2va))
wva = Aop.apply(vva)
w = np.hstack((wva.blocks[0].to_numpy(), wva.blocks[1].to_numpy()))
assert np.allclose(A.dot(v), w)
def test_apply_adjoint():
A11 = np.random.randn(2, 3)
A12 = np.random.randn(2, 4)
A21 = np.zeros((5, 3))
A22 = np.random.randn(5, 4)
A = np.vstack((np.hstack((A11, A12)),
np.hstack((A21, A22))))
A11op = NumpyMatrixOperator(A11)
A12op = NumpyMatrixOperator(A12)
A22op = NumpyMatrixOperator(A22)
Aop = BlockOperator(np.array([[A11op, A12op], [None, A22op]]))
v1 = np.random.randn(2)
v2 = np.random.randn(5)
v = np.hstack((v1, v2))
v1va = NumpyVectorSpace.from_numpy(v1)
v2va = NumpyVectorSpace.from_numpy(v2)
vva = BlockVectorSpace.make_array((v1va, v2va))
wva = Aop.apply_adjoint(vva)
w = np.hstack((wva.blocks[0].to_numpy(), wva.blocks[1].to_numpy()))
assert np.allclose(A.T.dot(v), w)
def test_block_diagonal():
A = np.random.randn(2, 3)
B = np.random.randn(4, 5)
Aop = NumpyMatrixOperator(A)
Bop = NumpyMatrixOperator(B)
Cop = BlockDiagonalOperator((Aop, Bop))
assert Cop.source.dim == 8
assert Cop.range.dim == 6
def test_blk_diag_apply_inverse():
A = np.random.randn(2, 2)
B = np.random.randn(3, 3)
C = spla.block_diag(A, B)
Aop = NumpyMatrixOperator(A)
Bop = NumpyMatrixOperator(B)
Cop = BlockDiagonalOperator((Aop, Bop))
v1 = np.random.randn(2)
v2 = np.random.randn(3)
v = np.hstack((v1, v2))
v1va = NumpyVectorSpace.from_numpy(v1)
v2va = NumpyVectorSpace.from_numpy(v2)
vva = BlockVectorSpace.make_array((v1va, v2va))
wva = Cop.apply_inverse(vva)
w = np.hstack((wva.blocks[0].to_numpy(), wva.blocks[1].to_numpy()))
assert np.allclose(spla.solve(C, v), w)
def test_blk_diag_apply_inverse_adjoint():
A = np.random.randn(2, 2)
B = np.random.randn(3, 3)
C = spla.block_diag(A, B)
Aop = NumpyMatrixOperator(A)
Bop = NumpyMatrixOperator(B)
Cop = BlockDiagonalOperator((Aop, Bop))
v1 = np.random.randn(2)
v2 = np.random.randn(3)
v = np.hstack((v1, v2))
v1va = NumpyVectorSpace.from_numpy(v1)
v2va = NumpyVectorSpace.from_numpy(v2)
vva = BlockVectorSpace.make_array((v1va, v2va))
wva = Cop.apply_inverse_adjoint(vva)
w = np.hstack((wva.blocks[0].to_numpy(), wva.blocks[1].to_numpy()))
assert np.allclose(spla.solve(C.T, v), w)
def test_block_jacobian():
from pymor.operators.constructions import QuadraticFunctional
A = np.random.randn(2, 2)
B = np.random.randn(3, 3)
C = np.random.randn(4, 4)
Aop = QuadraticFunctional(NumpyMatrixOperator(A))
Bop = QuadraticFunctional(NumpyMatrixOperator(B))
Cop = NumpyMatrixOperator(C)
Dop = BlockDiagonalOperator((Aop, Bop, Cop))
Dop_single_block = BlockDiagonalOperator(np.array([[Aop]]))
assert not Dop.linear and not Dop_single_block.linear
v1 = np.random.randn(2)
v2 = np.random.randn(3)
v3 = np.random.randn(4)
v1va = NumpyVectorSpace.from_numpy(v1)
v2va = NumpyVectorSpace.from_numpy(v2)
v3va = NumpyVectorSpace.from_numpy(v3)
vva = BlockVectorSpace.make_array((v1va, v2va, v3va))
vva_single_block = BlockVectorSpace.make_array(v1va)
jac = Dop.jacobian(vva, mu=None)
jac_single_block = Dop_single_block.jacobian(vva_single_block, mu=None)
assert jac.linear and jac_single_block.linear
assert np.all(jac.blocks[0, 0].vector.to_numpy()[0] == np.dot(A.T, v1) + np.dot(A, v1))
assert np.all(jac.blocks[1, 1].vector.to_numpy()[0] == np.dot(B.T, v2) + np.dot(B, v2))
assert np.all(jac.blocks[2, 2].matrix == C)
|
2,206 |
fitheight
|
'''
Character width dictionary and convenience functions for column sizing
with xlwt when Arial 10 is the standard font. Widths were determined
experimentally using Excel 2000 on Windows XP. I have no idea how well
these will work on other setups. For example, I don't know if system
video settings will affect the results. I do know for sure that this
module won't be applicable to other fonts in general.
//John Yeung 2009-09-02
'''
charwidths = {
'0': 262.637,
'1': 262.637,
'2': 262.637,
'3': 262.637,
'4': 262.637,
'5': 262.637,
'6': 262.637,
'7': 262.637,
'8': 262.637,
'9': 262.637,
'a': 262.637,
'b': 262.637,
'c': 262.637,
'd': 262.637,
'e': 262.637,
'f': 146.015,
'g': 262.637,
'h': 262.637,
'i': 117.096,
'j': 88.178,
'k': 233.244,
'l': 88.178,
'm': 379.259,
'n': 262.637,
'o': 262.637,
'p': 262.637,
'q': 262.637,
'r': 175.407,
's': 233.244,
't': 117.096,
'u': 262.637,
'v': 203.852,
'w': 321.422,
'x': 203.852,
'y': 262.637,
'z': 233.244,
'A': 321.422,
'B': 321.422,
'C': 350.341,
'D': 350.341,
'E': 321.422,
'F': 291.556,
'G': 350.341,
'H': 321.422,
'I': 146.015,
'J': 262.637,
'K': 321.422,
'L': 262.637,
'M': 379.259,
'N': 321.422,
'O': 350.341,
'P': 321.422,
'Q': 350.341,
'R': 321.422,
'S': 321.422,
'T': 262.637,
'U': 321.422,
'V': 321.422,
'W': 496.356,
'X': 321.422,
'Y': 321.422,
'Z': 262.637,
' ': 146.015,
'!': 146.015,
'"': 175.407,
'#': 262.637,
'$': 262.637,
'%': 438.044,
'&': 321.422,
'\'': 88.178,
'(': 175.407,
')': 175.407,
'*': 203.852,
'+': 291.556,
',': 146.015,
'-': 175.407,
'.': 146.015,
'/': 146.015,
':': 146.015,
';': 146.015,
'<': 291.556,
'=': 291.556,
'>': 291.556,
'?': 262.637,
'@': 496.356,
'[': 146.015,
'\\': 146.015,
']': 146.015,
'^': 203.852,
'_': 262.637,
'`': 175.407,
'{': 175.407,
'|': 146.015,
'}': 175.407,
'~': 291.556}
# By default, Excel displays column widths in units equal to the width
# of '0' (the zero character) in the standard font. For me, this is
# Arial 10, but it can be changed by the user. The BIFF file format
# stores widths in units 1/256th that size.
#
# Within Excel, the smallest incrementable amount for column width
# is the pixel. However many pixels it takes to draw '0' is how many
# increments there are between a width of 1 and a width of 2. A
# request for a finer increment will be rounded to the nearest pixel.
# For Arial 10, this is 9 pixels, but different fonts will of course
# require different numbers of pixels, and thus have different column
# width granularity.
#
# So far so good, but there is a wrinkle. Excel pads the first unit
# of column width by 7 pixels. At least this is the padding when the
# standard font is Arial 10 or Courier New 10, the two fonts I've tried.
# It don't know if it's different for different fonts. For Arial 10,
# with a padding of 7 pixels and a 9-pixel-wide '0', this results in 16
# increments to get from width 0 (hidden) to width 1. Ten columns of
# width 1 are 160 pixels wide while five columns of width 2 are 125
# pixels wide. A single column of width 10 is only 97 pixels wide.
#
# The punch line is that pixels are the true measure of width, and
# what Excel reports as the column width is wonky between 0 and 1.
# The only way I know to find out the padding for a desired font is
# to set that font as the standard font in Excel and count pixels.
def colwidth(n):
'''Translate human-readable units to BIFF column width units'''
if n <= 0:
return 0
if n <= 1:
return n * 456
return 200 + n * 256
def fitwidth(data, bold=False):
'''Try to autofit Arial 10'''
maxunits = 0
for ndata in data.split("\n"):
units = 220
for char in ndata:
if char in charwidths:
units += charwidths[char]
else:
units += charwidths['0']
if maxunits < units:
maxunits = units
if bold:
maxunits *= 1.1
return max(maxunits, 700) # Don't go smaller than a reported width of 2
def METHOD_NAME(data, bold=False):
'''Try to autofit Arial 10'''
rowlen = len(data.split("\n"))
if rowlen > 1:
units = 230 * rowlen
else:
units = 290
if bold:
units *= 1.1
return int(units)
|
2,207 |
test single channel pass default when max
|
from random import randrange
from pubnub.pubnub import PubNub
from tests.helper import pnconf_file_copy
pubnub = PubNub(pnconf_file_copy())
MAX_FOR_FETCH_MESSAGES = 100
MULTIPLE_CHANNELS_MAX_FOR_FETCH_MESSAGES = 25
MAX_FOR_FETCH_MESSAGES_WITH_ACTIONS = 25
EXPECTED_SINGLE_CHANNEL_DEFAULT_MESSAGES = 100
EXPECTED_MULTIPLE_CHANNEL_DEFAULT_MESSAGES = 25
EXPECTED_DEFAULT_MESSAGES_WITH_ACTIONS = 25
class TestFetchMessages:
def test_single_channel_always_pass_max_when_in_bounds(self):
# given
expected_max_value = randrange(1, MAX_FOR_FETCH_MESSAGES + 1)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1")\
.count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == expected_max_value
def test_single_channel_always_pass_default_when_non_positive(self):
# given
expected_max_value = randrange(-100, 1)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1").count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_SINGLE_CHANNEL_DEFAULT_MESSAGES
def test_single_channel_always_pass_default_when_not_specified(self):
# given
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1")
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_SINGLE_CHANNEL_DEFAULT_MESSAGES
def METHOD_NAME(self):
# given
expected_max_value = randrange(MAX_FOR_FETCH_MESSAGES + 1, 1000)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1").count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_SINGLE_CHANNEL_DEFAULT_MESSAGES
def test_multiple_channels_always_pass_max_when_in_bounds(self):
# given
expected_max_value = randrange(1, MULTIPLE_CHANNELS_MAX_FOR_FETCH_MESSAGES + 1)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels(["channel1", "channel2"]).count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == expected_max_value
def test_multiple_channels_always_pass_default_when_non_positive(self):
# given
expected_max_value = randrange(-100, 1)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels(["channel1", "channel2"]).count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_MULTIPLE_CHANNEL_DEFAULT_MESSAGES
def test_multiple_channels_always_pass_default_when_not_specified(self):
# given
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels(["channel1", "channel2"])
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_MULTIPLE_CHANNEL_DEFAULT_MESSAGES
def test_multiple_channels_pass_default_when_max_exceeds(self):
# given
expected_max_value = randrange(MULTIPLE_CHANNELS_MAX_FOR_FETCH_MESSAGES + 1, 1000)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels(["channel1", "channel2"]).count(expected_max_value)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_MULTIPLE_CHANNEL_DEFAULT_MESSAGES
def test_single_channel_with_actions_pass_when_in_bounds(self):
# given
expected_max_value = randrange(1, MAX_FOR_FETCH_MESSAGES_WITH_ACTIONS + 1)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1").count(expected_max_value).include_message_actions(True)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == expected_max_value
def test_single_channel_with_actions_pass_default_when_not_specified(self):
# given
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1").include_message_actions(True)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_DEFAULT_MESSAGES_WITH_ACTIONS
def test_single_channel_with_actions_pass_default_when_max_exceeds(self):
# given
expected_max_value = randrange(MAX_FOR_FETCH_MESSAGES_WITH_ACTIONS + 1, 1000)
fetch_messages_endpoint_under_test = pubnub.fetch_messages()
fetch_messages_endpoint_under_test.channels("channel1").count(expected_max_value).include_message_actions(True)
# when
fetch_messages_endpoint_under_test.validate_params()
# then
assert fetch_messages_endpoint_under_test._count == EXPECTED_DEFAULT_MESSAGES_WITH_ACTIONS
|
2,208 |
test dot stim
|
from psychopy import visual, event, info
import pytest
import numpy as np
import shutil, os
from tempfile import mkdtemp
from psychopy.tests import utils
# Testing for memory leaks in PsychoPy classes (experiment run-time, not Builder, Coder, etc)
# The tests are too unstable to include in travis-ci at this point.
# command-line usage:
# py.testw tests/test_misc/memory_usage.py
# Define the "acceptable" leakage severity; some gc vagaries are possible.
THRESHOLD = 0.5
win = visual.Window(size=(200,200), allowStencil=True)
def leakage(Cls, *args, **kwargs):
"""make up to 100 instances of Cls(*args, **kwargs),
return the difference in memory used by this python process (in M) as a
severity measure, approx = 100 * mem leak per instance in M
"""
mem = []
for i in range(100):
Cls(*args, **kwargs) # anonymous instance, gets gc'd each iteration
mem.append(info.getMemoryUsage())
# don't keep going if we're leaking:
if mem[i] - mem[0] > THRESHOLD:
break
proportion = i / 99.
return round((mem[i] - mem[0]) / proportion, 1)
@pytest.mark.needs_sound
@pytest.mark.memory
class TestMemorySound():
@classmethod
def setup_class(self):
global sound, pyo
from psychopy import sound
import pyo
self.tmp = mkdtemp(prefix='psychopy-tests-memory-usage')
@classmethod
def teardown_class(self):
if hasattr(self, 'tmp'):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_soundpyo_array(self):
"""anything using a numpy.array uses pyo.DataTable
"""
if pyo.getVersion() < (0, 7, 7):
pytest.xfail() # pyo leak fixed Oct 2015
for stim in [440, np.zeros(88200)]: # np.zeros(8820000) passes, slow
assert leakage(sound.SoundPyo, stim, secs=2) < THRESHOLD, 'stim = ' + str(stim)
def test_soundpyo_file(self):
"""files are handled by pyo.SndFile
"""
if pyo.getVersion() < (0, 7, 7):
pytest.xfail()
from scipy.io import wavfile
tmp = os.path.join(self.tmp, 'zeros.wav')
wavfile.write(tmp, 44100, np.zeros(88200))
assert leakage(sound.SoundPyo, tmp) < THRESHOLD
@pytest.mark.needs_sound
@pytest.mark.memory
class TestMemoryMovie():
@classmethod
def setup_class(self):
self.mov = os.path.join(utils.TESTS_DATA_PATH, 'testMovie.mp4')
@classmethod
def teardown_class(self):
if hasattr(self, 'tmp'):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_movie3_leakage(self):
assert leakage(visual.MovieStim3, win, self.mov) < THRESHOLD
@pytest.mark.skipif('True')
def test_movie_leakage(self):
assert leakage(visual.MovieStim, win, self.mov) < THRESHOLD
@pytest.mark.skipif('True')
def test_movie2_leakage(self):
assert leakage(visual.MovieStim2, win, self.mov) < THRESHOLD
@pytest.mark.memory
class TestMemory():
@classmethod
def setup_class(self):
self.imgs = [os.path.join(utils.TESTS_DATA_PATH, 'testimage.jpg'), # smaller
os.path.join(utils.TESTS_DATA_PATH, 'greyscale.jpg')] # larger
@classmethod
def teardown_class(self):
if hasattr(self, 'tmp'):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_Mouse(self):
assert leakage(event.Mouse, win=win) < THRESHOLD
def test_VisualStim(self):
"""Visual stim that typically do not leak can all be tested together
"""
cleanStim = ['ShapeStim', 'Rect', 'Circle', 'Polygon', 'Line', 'CustomMouse', 'Aperture']
for StimName in cleanStim:
Stim = eval('visual.' + StimName)
assert leakage(Stim, win) < THRESHOLD, StimName
def test_ShapeStim(self):
v = [(-.2,-.05), (-.2,.05), (.2,.05), (.2,.15), (.35,0), (.2,-.15), (.2,-.05)]
assert leakage(visual.ShapeStim, win, vertices=v) < THRESHOLD
assert leakage(visual.ShapeStim, win, vertices=v * 100) < THRESHOLD
@pytest.mark.xfail
def test_Window(self):
msg = 'leakage probably not a problem for typical users with 1 Window() instance'
assert leakage(visual.Window, size=(100, 100)) < THRESHOLD, msg
assert leakage(visual.Window, size=(2000, 2000)) < THRESHOLD, msg
def test_TextStim(self):
msg = "Note: some TextStim leakage is pyglet's fault"
for txt in ['a', 'a'*1000]:
assert leakage(visual.TextStim, win, txt) < THRESHOLD, msg
def test_RatingScale(self):
msg = "RatingScale will probably leak if TextStim does"
# 'hover' has few visual items (no text, line, marker, accept box)
for kwargs in [{'marker': 'hover', 'choices': [1,2]}, {}]:
assert leakage(visual.RatingScale, win, **kwargs) < THRESHOLD, msg
def test_BufferImageStim(self):
msg = "Note: the size of the window and the rect to capture affects leak severity"
for rect in [(-.1,.1,.1,-.1), (-1,1,1,-1)]:
assert leakage(visual.BufferImageStim, win, rect=rect) < THRESHOLD, msg
def test_ImageStim(self):
msg = "Note: the image size affects leak severity"
for img in self.imgs:
assert leakage(visual.ImageStim, win, img) < THRESHOLD, msg
def test_SimpleImageStim(self):
for img in self.imgs:
assert leakage(visual.SimpleImageStim, win, img) < THRESHOLD
def test_GratingStim(self):
assert leakage(visual.GratingStim, win) < THRESHOLD
def METHOD_NAME(self):
assert leakage(visual.DotStim, win, nDots=2000) < THRESHOLD
def test_RadialStim(self):
for r in [4, 16]:
assert leakage(visual.RadialStim, win, radialCycles=r, angularCycles=r) < THRESHOLD
def test_ElementArrayStim(self):
for n in [100, 1000]:
assert leakage(visual.ElementArrayStim, win, nElements=n) < THRESHOLD
|
2,209 |
test download cache
|
import os
from pathlib import Path
import pytest
from sunpy.data.data_manager.tests.mocks import MOCK_HASH, write_to_test_file
from sunpy.util.exceptions import SunpyUserWarning
def test_basic(storage, downloader, data_function):
data_function()
assert downloader.times_called == 1
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def METHOD_NAME(manager, storage, downloader, data_function):
"""
Test calling function multiple times does not redownload.
"""
data_function()
data_function()
assert downloader.times_called == 1
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_file_tampered(manager, storage, downloader, data_function):
"""
Test calling function multiple times does not redownload.
"""
data_function()
write_to_test_file(manager._tempdir + '/sunpy.test_file', 'b')
with pytest.warns(SunpyUserWarning):
data_function()
assert downloader.times_called == 2
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_wrong_hash_provided(manager):
@manager.require('test_file', ['url1'], 'wrong_hash')
def test_foo():
pass
with pytest.raises(RuntimeError):
test_foo()
def test_skip_all(manager, storage, downloader, data_function):
"""
Test skip_hash_check redownloads data.
"""
data_function()
with manager.skip_hash_check():
data_function()
assert downloader.times_called == 2
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_override_file(manager, storage, downloader, data_function, tmpdir):
"""
Test the override_file functionality.
"""
def default_tester(manager):
"""
Function to test whether the file name is test_file.
"""
assert manager.get('test_file').name == ('sunpy.test_file')
def override_file_tester(manager):
"""
Function to test whether the file is /tmp/another_file.
"""
assert manager.get('test_file') == Path(f'{folder}/another_file')
# Outside the context manager file is default
folder = tmpdir.strpath
data_function(default_tester)
write_to_test_file(str(Path(folder+'/another_file')), 'a')
with manager.override_file('test_file', f'file://{folder}/another_file'):
# Inside the file is replaced
data_function(override_file_tester)
# TODO: this combined with the check above fails on windows
# with manager.override_file('test_file', f'{folder}/another_file'):
# # Inside the file is replaced
# data_function(override_file_tester)
# check the function works with hash provided
with manager.override_file('test_file', f'file://{folder}/another_file', MOCK_HASH):
data_function(override_file_tester)
with pytest.raises(ValueError): # noqa: PT012
# check if functions errors with the wrong hash
with manager.override_file('test_file', f'file://{folder}/another_file', 'wrong_hash'):
# Inside the file is replaced
data_function(override_file_tester)
# Even after context manager call outside the file is default
data_function(default_tester)
def test_override_file_remote(manager, downloader, data_function):
replace_url = 'http://example.com/another_file'
data_function()
assert downloader.times_called == 1
with manager.override_file('test_file', replace_url):
data_function()
assert downloader.times_called == 2
assert downloader.last_called_url == replace_url
def test_wrong_hash_error(manager, storage):
storage._store.append({
'file_path': '/tmp/test_file',
'file_hash': 'aa',
'url': 'url1'
})
@manager.require('test_file', ['url1', 'url2'], 'asdf')
def foo():
pass
with pytest.raises(ValueError):
foo()
def test_file_changed(data_function, storage):
# Download the file first
data_function()
file = storage._store[0]['file_path']
# The file was then locally changed
write_to_test_file(file, "asd")
# Now it should error
with pytest.warns(SunpyUserWarning):
data_function()
def test_delete_db(sqlmanager, sqlstorage):
# Download the file
@sqlmanager.require('test_file', ['http://example.com/test_file'], MOCK_HASH)
def test_function():
pass
test_function()
# The DB file was then deleted
os.remove(str(sqlstorage._db_path))
# SQLite should not throw an error
test_function()
def test_same_file_id_different_module(downloader, storage,
data_function, data_function_from_fake_module):
# Uses name 'test_file' to refer to the file
data_function()
# Change hash of the above file to allow MockDownloader to download another file
# Otherwise it will skip the download because a file with the same hash already exists
storage._store[0]['file_hash'] = 'abc'
# This function from a different module uses same name 'test_file' to refer to a different file
data_function_from_fake_module()
assert len(storage._store) == 2
assert downloader.times_called == 2
# Check if the files are namespaced correctly
assert Path(storage._store[0]['file_path']).name == 'sunpy.test_file'
assert Path(storage._store[1]['file_path']).name == 'fake_module.test_file'
def test_namespacing_with_manager_override_file(module_patched_manager, downloader,
storage, data_function_from_fake_module):
# Download a file using manager.require()
data_function_from_fake_module()
assert len(storage._store) == 1
assert downloader.times_called == 1
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file'
# Override the file name with a different URI
with module_patched_manager.override_file(
'test_file', 'http://www.different_uri.com/new_file', MOCK_HASH):
data_function_from_fake_module()
assert downloader.times_called == 2
# New file entry is stored in manager._file_cache only
# It's not stored in InMemStorage or SqlStorage
assert len(storage._store) == 1
assert Path(
module_patched_manager._file_cache['test_file']['fake_module.']
).name == 'fake_module.new_file'
# Storage still contains original test_file
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file'
# Request the original file again
data_function_from_fake_module()
# File doesn't get redownloaded, instead it is retrieved using the file hash
assert downloader.times_called == 2
# new_file entry in manager._file_cache is replaced with the original test_file
assert Path(
module_patched_manager._file_cache['test_file']['fake_module.']
).name == 'fake_module.test_file'
# Storage still contains original test_file
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file'
|
2,210 |
test dynamic field
|
import logging
import os
from django.core import management
from django.test import TestCase, override_settings
from django.forms import ValidationError
from django.core.files import File
from django.urls import reverse
from biostar.recipes import models, views, auth, factory, forms, const, api
from biostar.recipes import util as engine_util
from django.conf import settings
from biostar.utils.helpers import fake_request, get_uuid
TEST_ROOT = os.path.abspath(os.path.join(settings.BASE_DIR, 'export', 'test'))
TOC_ROOT = os.path.join(TEST_ROOT, 'toc')
logger = logging.getLogger('engine')
# Ensure that the table of directory exists.
os.makedirs(TOC_ROOT, exist_ok=True)
class Bunch(object):
last_valid = template = ''
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@override_settings(MEDIA_ROOT=TEST_ROOT, TOC_ROOT=TOC_ROOT)
class SiteAdminTest(TestCase):
def setUp(self):
logger.setLevel(logging.WARNING)
self.user = models.User.objects.create_superuser(username=f"tested{get_uuid(10)}", email="[email protected]",
password="tested")
self.user.save()
def test_site_admin(self):
"Test site admin page"
url = reverse("site_admin")
request = fake_request(url=url, data={}, method="GET",user=self.user)
response = views.site_admin(request=request)
# admin page specific to biostar-engine.
self.assertEqual(response.status_code, 200, "Can not load admin page")
def test_bin_view(self):
"Test recycle bin view"
url = reverse('recycle_bin')
request = fake_request(url=url, data={}, method="GET",user=self.user)
response = views.recycle_bin(request=request)
self.assertEqual(response.status_code, 200, "Can not load recyle bin")
@override_settings(MEDIA_ROOT=TEST_ROOT , MULTI_THREAD=False)
class FactoryTest(TestCase):
def setUp(self):
logger.setLevel(logging.WARNING)
self.owner = models.User.objects.filter(is_superuser=True).first()
self.project = auth.create_project(user=self.owner, name="tested",
text="Text", summary="summary", uid="tested")
def Xtest_api_change_obj(self):
"""
Change object image
"""
new_img = open(os.path.join(TEST_ROOT, "data", "image.png"))
api.change_image(obj=self.project, fname=new_img)
return
def test_factory_fields(self):
"Testing factory module that generates fields"
# All valid field types.
field_types = factory.get_field_types()
for display_type in field_types:
# Test that each field type can be rendered.
json_data = dict(display=display_type)
field = factory.dynamic_field(json_data)
if not field:
message = f"field generator for display={display_type} failed"
if display_type == const.SQL:
return
self.assertFalse(message)
def METHOD_NAME(self):
"Test data generator"
from biostar.recipes import const
data = auth.create_data(self.project, path=__file__)
display_type = const.DROPDOWN
json_data = dict(display=display_type, value=data.name,
source= 'PROJECT')
field = factory.dynamic_field(json_data, project=self.project)
if not field:
self.assertFalse(f"field generator for display={display_type} failed")
def test_data_generator(self):
"Test data generator"
field = factory.data_field_generator(field={}, type="DATA", project=self.project)
if not field:
self.assertFalse(f"data field generator failed")
def test_import_file(self):
"Test import files tab view"
url = reverse('root_list')
request = fake_request(url=url, data={}, user=self.owner)
response = views.import_files(request)
self.assertEqual(response.status_code, 200, f"Error with file listing in import tab.")
@override_settings(MEDIA_ROOT=TEST_ROOT)
class UtilTests(TestCase):
def setUp(self):
logger.setLevel(logging.WARNING)
def test_smart_preview(self):
collect = engine_util.findfiles("biostar/recipes/test/data", collect=[])
for fname in collect:
text = engine_util.smart_preview(fname)
self.assertTrue("error" not in text.split(), f"Preview error with {fname}")
with self.assertRaises(ValidationError):
forms.check_size(File(open(fname, "r")), maxsize=0.000001)
|
2,211 |
fixture connection settings service
|
"""Test Network Manager Connection object."""
from dbus_fast.aio.message_bus import MessageBus
from dbus_fast.signature import Variant
import pytest
from supervisor.dbus.network.interface import NetworkInterface
from supervisor.dbus.network.setting import NetworkSetting
from supervisor.dbus.network.setting.generate import get_connection_from_interface
from supervisor.host.const import InterfaceMethod
from supervisor.host.network import Interface
from tests.dbus_service_mocks.base import DBusServiceMock
from tests.dbus_service_mocks.network_connection_settings import (
ConnectionSettings as ConnectionSettingsService,
)
@pytest.fixture(name="connection_settings_service", autouse=True)
async def METHOD_NAME(
network_manager_services: dict[str, DBusServiceMock | dict[str, DBusServiceMock]]
) -> ConnectionSettingsService:
"""Mock Connection Settings service."""
yield network_manager_services["network_connection_settings"]
@pytest.fixture(name="dbus_interface")
async def fixture_dbus_interface(dbus_session_bus: MessageBus) -> NetworkInterface:
"""Get connected dbus interface."""
dbus_interface = NetworkInterface("/org/freedesktop/NetworkManager/Devices/1")
await dbus_interface.connect(dbus_session_bus)
yield dbus_interface
async def test_update(
dbus_interface: NetworkInterface,
connection_settings_service: ConnectionSettingsService,
):
"""Test network manager update."""
connection_settings_service.Update.calls.clear()
interface = Interface.from_dbus_interface(dbus_interface)
conn = get_connection_from_interface(
interface,
name=dbus_interface.settings.connection.id,
uuid=dbus_interface.settings.connection.uuid,
)
await dbus_interface.settings.update(conn)
assert len(connection_settings_service.Update.calls) == 1
settings = connection_settings_service.Update.calls[0][0]
assert settings["connection"]["id"] == Variant("s", "Supervisor eth0")
assert "interface-name" not in settings["connection"]
assert settings["connection"]["uuid"] == Variant(
"s", "0c23631e-2118-355c-bbb0-8943229cb0d6"
)
assert settings["connection"]["autoconnect"] == Variant("b", True)
assert settings["match"] == {"path": Variant("as", ["platform-ff3f0000.ethernet"])}
assert "ipv4" in settings
assert settings["ipv4"]["method"] == Variant("s", "auto")
assert "gateway" not in settings["ipv4"]
assert "dns" not in settings["ipv4"]
assert "address-data" not in settings["ipv4"]
assert "addresses" not in settings["ipv4"]
assert len(settings["ipv4"]["route-data"].value) == 1
assert settings["ipv4"]["route-data"].value[0]["dest"] == Variant(
"s", "192.168.122.0"
)
assert settings["ipv4"]["route-data"].value[0]["prefix"] == Variant("u", 24)
assert settings["ipv4"]["route-data"].value[0]["next-hop"] == Variant(
"s", "10.10.10.1"
)
assert settings["ipv4"]["routes"] == Variant("aau", [[8038592, 24, 17435146, 0]])
assert "ipv6" in settings
assert settings["ipv6"]["method"] == Variant("s", "auto")
assert "gateway" not in settings["ipv6"]
assert "dns" not in settings["ipv6"]
assert "address-data" not in settings["ipv6"]
assert "addresses" not in settings["ipv6"]
assert settings["ipv6"]["addr-gen-mode"] == Variant("i", 0)
assert "proxy" in settings
assert "802-3-ethernet" in settings
assert settings["802-3-ethernet"]["auto-negotiate"] == Variant("b", False)
assert "802-11-wireless" in settings
assert settings["802-11-wireless"]["ssid"] == Variant("ay", b"NETT")
assert "mode" not in settings["802-11-wireless"]
assert "powersave" not in settings["802-11-wireless"]
assert "802-11-wireless-security" not in settings
assert "vlan" not in settings
async def test_ipv6_disabled_is_link_local(dbus_interface: NetworkInterface):
"""Test disabled equals link local for ipv6."""
interface = Interface.from_dbus_interface(dbus_interface)
interface.ipv4.method = InterfaceMethod.DISABLED
interface.ipv6.method = InterfaceMethod.DISABLED
conn = get_connection_from_interface(
interface,
name=dbus_interface.settings.connection.id,
uuid=dbus_interface.settings.connection.uuid,
)
assert conn["ipv4"]["method"] == Variant("s", "disabled")
assert conn["ipv6"]["method"] == Variant("s", "link-local")
async def test_watching_updated_signal(
connection_settings_service: ConnectionSettingsService, dbus_session_bus: MessageBus
):
"""Test get settings called on update signal."""
connection_settings_service.GetSettings.calls.clear()
settings = NetworkSetting("/org/freedesktop/NetworkManager/Settings/1")
await settings.connect(dbus_session_bus)
connection_settings_service.GetSettings.calls == [tuple()]
connection_settings_service.Updated()
await connection_settings_service.ping()
await connection_settings_service.ping()
assert connection_settings_service.GetSettings.calls == [tuple(), tuple()]
|
2,212 |
perform mutation
|
from typing import Iterable
import graphene
from django.core.exceptions import ValidationError
from django.db import transaction
from ....core.tracing import traced_atomic_transaction
from ....core.utils.promo_code import generate_promo_code
from ....core.utils.validators import is_date_in_future
from ....giftcard import events, models
from ....giftcard.error_codes import GiftCardErrorCode
from ....permission.enums import GiftcardPermissions
from ....webhook.event_types import WebhookEventAsyncType
from ...app.dataloaders import get_app_promise
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_31
from ...core.doc_category import DOC_CATEGORY_GIFT_CARDS
from ...core.mutations import BaseMutation
from ...core.scalars import Date
from ...core.types import BaseInputObjectType, GiftCardError, NonNullList, PriceInput
from ...core.utils import WebhookEventInfo
from ...core.validators import validate_price_precision
from ...plugins.dataloaders import get_plugin_manager_promise
from ..mutations import GiftCardCreate
from ..types import GiftCard
class GiftCardBulkCreateInput(BaseInputObjectType):
count = graphene.Int(required=True, description="The number of cards to issue.")
balance = graphene.Field(
PriceInput, description="Balance of the gift card.", required=True
)
tags = NonNullList(
graphene.String,
description="The gift card tags.",
)
expiry_date = Date(description="The gift card expiry date.")
is_active = graphene.Boolean(
required=True, description="Determine if gift card is active."
)
class Meta:
doc_category = DOC_CATEGORY_GIFT_CARDS
class GiftCardBulkCreate(BaseMutation):
count = graphene.Int(
required=True,
default_value=0,
description="Returns how many objects were created.",
)
gift_cards = NonNullList(
GiftCard,
required=True,
default_value=[],
description="List of created gift cards.",
)
class Arguments:
input = GiftCardBulkCreateInput(
required=True, description="Fields required to create gift cards."
)
class Meta:
description = "Create gift cards." + ADDED_IN_31
doc_category = DOC_CATEGORY_GIFT_CARDS
model = models.GiftCard
permissions = (GiftcardPermissions.MANAGE_GIFT_CARD,)
error_type_class = GiftCardError
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.GIFT_CARD_CREATED,
description="A gift card was created.",
),
WebhookEventInfo(
type=WebhookEventAsyncType.NOTIFY_USER,
description="A notification for created gift card.",
),
]
@classmethod
@traced_atomic_transaction()
def METHOD_NAME( # type: ignore[override]
cls, _root, info: ResolveInfo, /, *, input
):
cls.clean_count_value(input)
cls.clean_expiry_date(input)
cls.clean_balance(input)
GiftCardCreate.set_created_by_user(input, info)
tags = input.pop("tags", None)
instances = cls.create_instances(input, info)
if tags:
cls.assign_gift_card_tags(instances, tags)
manager = get_plugin_manager_promise(info.context).get()
transaction.on_commit(
lambda: cls.call_gift_card_created_on_plugins(instances, manager)
)
return cls(count=len(instances), gift_cards=instances)
@staticmethod
def clean_count_value(input_data):
if not input_data["count"] > 0:
raise ValidationError(
{
"count": ValidationError(
"Count value must be greater than 0.",
code=GiftCardErrorCode.INVALID.value,
)
}
)
@staticmethod
def clean_expiry_date(input_data):
expiry_date = input_data.get("expiry_date")
if expiry_date and not is_date_in_future(expiry_date):
raise ValidationError(
{
"expiry_date": ValidationError(
"Expiry date cannot be in the past.",
code=GiftCardErrorCode.INVALID.value,
)
}
)
@staticmethod
def clean_balance(cleaned_input):
balance = cleaned_input["balance"]
amount = balance["amount"]
currency = balance["currency"]
try:
validate_price_precision(amount, currency)
except ValidationError as error:
error.code = GiftCardErrorCode.INVALID.value
raise ValidationError({"balance": error})
if not amount > 0:
raise ValidationError(
{
"balance": ValidationError(
"Balance amount have to be greater than 0.",
code=GiftCardErrorCode.INVALID.value,
)
}
)
cleaned_input["currency"] = currency
cleaned_input["current_balance_amount"] = amount
cleaned_input["initial_balance_amount"] = amount
@staticmethod
def create_instances(cleaned_input, info):
count = cleaned_input.pop("count")
balance = cleaned_input.pop("balance")
app = get_app_promise(info.context).get()
gift_cards = models.GiftCard.objects.bulk_create(
[
models.GiftCard(code=generate_promo_code(), **cleaned_input)
for _ in range(count)
]
)
events.gift_cards_issued_event(gift_cards, info.context.user, app, balance)
return gift_cards
@staticmethod
def assign_gift_card_tags(
instances: Iterable[models.GiftCard], tags_values: Iterable[str]
):
tags = {tag.lower() for tag in tags_values}
tags_instances = models.GiftCardTag.objects.filter(name__in=tags)
tags_to_create = tags - set(tags_instances.values_list("name", flat=True))
models.GiftCardTag.objects.bulk_create(
[models.GiftCardTag(name=tag) for tag in tags_to_create]
)
for tag_instance in tags_instances.iterator():
tag_instance.gift_cards.set(instances)
@staticmethod
def call_gift_card_created_on_plugins(instances, manager):
for instance in instances:
manager.gift_card_created(instance)
|
2,213 |
test get data no data
|
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Test of FunctionDataSource behavior.
"""
import unittest
from numpy import array, linspace, ones
from numpy.testing import assert_array_equal
from chaco.api import DataRange1D
from chaco.function_data_source import FunctionDataSource
from traits.testing.api import UnittestTools
class FunctionDataSourceTestCase(UnittestTools, unittest.TestCase):
def setUp(self):
self.myfunc = lambda low, high: linspace(low, high, 101) ** 2
self.data_source = FunctionDataSource(func=self.myfunc)
def test_init_defaults(self):
data_source = FunctionDataSource()
assert_array_equal(data_source._data, [])
self.assertEqual(data_source.value_dimension, "scalar")
self.assertEqual(data_source.sort_order, "ascending")
self.assertFalse(data_source.is_masked())
def test_basic_setup(self):
assert_array_equal(self.myfunc, self.data_source.func)
self.assertEqual(self.data_source.value_dimension, "scalar")
self.assertEqual(self.data_source.sort_order, "ascending")
self.assertFalse(self.data_source.is_masked())
def test_set_data(self):
with self.assertRaises(RuntimeError):
self.data_source.set_data(
lambda low, high: linspace(low, high, 101)
)
def test_range_high_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0
)
with self.assertTraitChanges(
self.data_source, "data_changed", count=1
):
self.data_source.data_range.high_setting = 2.0
assert_array_equal(
linspace(0.0, 2.0, 101) ** 2, self.data_source.get_data()
)
def test_range_low_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0
)
with self.assertTraitChanges(
self.data_source, "data_changed", count=1
):
self.data_source.data_range.low_setting = -1.0
assert_array_equal(
linspace(-1.0, 1.0, 101) ** 2, self.data_source.get_data()
)
def test_range_data_range_changed(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0
)
with self.assertTraitChanges(
self.data_source, "data_changed", count=1
):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0
)
assert_array_equal(
linspace(-2.0, 2.0, 101) ** 2, self.data_source.get_data()
)
def test_set_mask(self):
mymask = array([i % 2 for i in range(101)], dtype=bool)
with self.assertRaises(NotImplementedError):
self.data_source.set_mask(mymask)
def test_remove_mask(self):
with self.assertRaises(NotImplementedError):
self.data_source.remove_mask()
def test_get_data(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0
)
assert_array_equal(
linspace(0.0, 1.0, 101) ** 2, self.data_source.get_data()
)
def METHOD_NAME(self):
self.data_source = FunctionDataSource()
assert_array_equal(self.data_source.get_data(), array([], dtype=float))
def test_get_data_mask(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=1.0
)
data, mask = self.data_source.get_data_mask()
assert_array_equal(data, linspace(0.0, 1.0, 101) ** 2)
assert_array_equal(mask, ones(shape=101, dtype=bool))
def test_bounds(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0
)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
@unittest.skip("default sort_order is ascending, which isn't right")
def test_bounds_non_monotone(self):
self.data_source.data_range = DataRange1D(
low_setting=-2.0, high_setting=2.0
)
bounds = self.data_source.get_bounds()
self.assertEqual(bounds, (0.0, 4.0))
def test_data_size(self):
self.data_source.data_range = DataRange1D(
low_setting=0.0, high_setting=2.0
)
self.assertEqual(101, self.data_source.get_size())
|
2,214 |
get tags
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Supports git repositories for the benchmarked project.
"""
import os
import re
import shlex
from ..console import log
from ..repo import Repo, NoSuchNameError
from .. import util
class Git(Repo):
dvcs = "git"
def __init__(self, url, mirror_path):
self._git = util.which("git")
self._path = os.path.abspath(mirror_path)
self._pulled = False
# default branch
self._default_branch = util.git_default_branch()
if self.is_local_repo(url):
# Local repository, no need for mirror
self._path = os.path.abspath(url)
self._pulled = True
elif not self.is_local_repo(self._path):
if os.path.exists(self._path):
self._raise_bad_mirror_error(self._path)
# Clone is missing
log.info("Cloning project")
self._run_git(['clone', '--mirror', url, self._path], cwd=None)
@classmethod
def is_local_repo(cls, path):
return os.path.isdir(path) and (
os.path.exists(os.path.join(path, '.git')) or
os.path.isdir(os.path.join(path, 'objects')))
@classmethod
def url_match(cls, url):
regexes = [
r'^https?://.*?\.git$',
r'^git@.*?\.git$']
for regex in regexes:
if re.match(regex, url):
return True
# Check for a local path
if cls.is_local_repo(url):
return True
return False
def _run_git(self, args, cwd=True, **kwargs):
if cwd is True:
cwd = self._path
kwargs['cwd'] = cwd
env = dict(kwargs.pop('env', os.environ))
if cwd is not None:
prev = env.get('GIT_CEILING_DIRECTORIES')
env['GIT_CEILING_DIRECTORIES'] = os.pathsep.join(
[os.path.join(os.path.abspath(cwd), os.pardir)] +
([prev] if prev is not None else []))
return util.check_output([self._git] + args, env=env, **kwargs)
def get_new_range_spec(self, latest_result, branch=None):
return f'{latest_result}..{self.get_branch_name(branch)}'
def get_range_spec(self, commit_a, commit_b):
return f'{commit_a}..{commit_b}'
def pull(self):
# We assume the remote isn't updated during the run of asv
# itself.
if self._pulled:
return
log.info("Fetching recent changes")
self._run_git(['fetch', 'origin'])
self._pulled = True
def checkout(self, path, commit_hash):
def checkout_existing(display_error):
# Deinit fails if no submodules, so ignore its failure
self._run_git(['-c', 'protocol.file.allow=always',
'submodule', 'deinit', '-f', '.'],
cwd=path, display_error=False, valid_return_codes=None)
self._run_git(['checkout', '-f', commit_hash],
cwd=path, display_error=display_error)
self._run_git(['clean', '-fdx'],
cwd=path, display_error=display_error)
self._run_git(['-c', 'protocol.file.allow=always',
'submodule', 'update', '--init', '--recursive'],
cwd=path, display_error=display_error)
if os.path.isdir(path):
try:
checkout_existing(display_error=False)
except util.ProcessError:
# Remove and try to re-clone
util.long_path_rmtree(path)
if not os.path.isdir(path):
self._run_git(['clone', '--shared', '--recursive', self._path, path],
cwd=None)
checkout_existing(display_error=True)
def get_date(self, hash):
return int(self._run_git(
['rev-list', '-n', '1', '--format=%at', hash],
valid_return_codes=(0, 1), dots=False).strip().split()[-1]) * 1000
def get_hashes_from_range(self, range_spec):
args = ['rev-list', '--first-parent']
if range_spec != "":
args += shlex.split(range_spec) + ["--"]
output = self._run_git(args, valid_return_codes=(0, 1), dots=False)
return output.strip().split()
def get_hash_from_name(self, name):
if name is None:
name = self.get_branch_name()
# In case of annotated tags, return the hash for the commit
lookup_name = name + '^{commit}'
try:
return self._run_git(['rev-parse', lookup_name],
display_error=False,
dots=False).strip().split()[0]
except util.ProcessError as err:
if err.stdout.strip() == lookup_name:
# Name does not exist
raise NoSuchNameError(name)
raise
def get_hash_from_parent(self, name):
return self.get_hash_from_name(name + '^')
def get_name_from_hash(self, commit):
try:
name = self._run_git(["name-rev", "--name-only",
"--exclude=remotes/*",
"--no-undefined", commit],
display_error=False).strip()
if not name:
return None
except util.ProcessError:
# Failed to obtain.
return None
# Return tags without prefix
for prefix in ['tags/']:
if name.startswith(prefix):
return name[len(prefix):]
return name
def METHOD_NAME(self):
tags = {}
for tag in self._run_git(["tag", "-l", "--sort=taggerdate"]).splitlines():
tags[tag] = self._run_git(["rev-list", "-n", "1", tag]).strip()
return tags
def get_date_from_name(self, name):
return self.get_date(name + "^{commit}")
def get_branch_commits(self, branch):
return self.get_hashes_from_range(self.get_branch_name(branch))
def get_revisions(self, commits):
revisions = {}
for i, commit in enumerate(self._run_git([
"rev-list", "--all", "--date-order", "--reverse",
]).splitlines()):
if commit in commits:
revisions[commit] = i
return revisions
|
2,215 |
uninstall addon
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import logging
import os
import warnings
import zipfile
from contextlib import contextmanager
from io import BytesIO
from selenium.webdriver.common.driver_finder import DriverFinder
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from .service import Service
logger = logging.getLogger(__name__)
class WebDriver(RemoteWebDriver):
"""Controls the GeckoDriver and allows you to drive the browser."""
CONTEXT_CHROME = "chrome"
CONTEXT_CONTENT = "content"
def __init__(
self,
options: Options = None,
service: Service = None,
keep_alive=True,
) -> None:
"""Creates a new instance of the Firefox driver. Starts the service and
then creates new instance of Firefox driver.
:Args:
- options - Instance of ``options.Options``.
- service - (Optional) service instance for managing the starting and stopping of the driver.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use HTTP keep-alive.
"""
self.service = service if service else Service()
options = options if options else Options()
self.service.path = DriverFinder.get_path(self.service, options)
self.service.start()
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url,
ignore_proxy=options._ignore_local_proxy,
keep_alive=keep_alive,
)
super().__init__(command_executor=executor, options=options)
self._is_remote = False
def quit(self) -> None:
"""Quits the driver and close every associated window."""
try:
super().quit()
except Exception:
# We don't care about the message because something probably has gone wrong
pass
self.service.stop()
def set_context(self, context) -> None:
self.execute("SET_CONTEXT", {"context": context})
@contextmanager
def context(self, context):
"""Sets the context that Selenium commands are running in using a
`with` statement. The state of the context on the server is saved
before entering the block, and restored upon exiting it.
:param context: Context, may be one of the class properties
`CONTEXT_CHROME` or `CONTEXT_CONTENT`.
Usage example::
with selenium.context(selenium.CONTEXT_CHROME):
# chrome scope
... do stuff ...
"""
initial_context = self.execute("GET_CONTEXT").pop("value")
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context)
def install_addon(self, path, temporary=False) -> str:
"""Installs Firefox addon.
Returns identifier of installed addon. This identifier can later
be used to uninstall addon.
:param temporary: allows you to load browser extensions temporarily during a session
:param path: Absolute path to the addon that will be installed.
:Usage:
::
driver.install_addon('/path/to/firebug.xpi')
"""
if os.path.isdir(path):
fp = BytesIO()
path_root = len(path) + 1 # account for trailing slash
with zipfile.ZipFile(fp, "w", zipfile.ZIP_DEFLATED) as zipped:
for base, _, files in os.walk(path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
addon = base64.b64encode(fp.getvalue()).decode("UTF-8")
else:
with open(path, "rb") as file:
addon = base64.b64encode(file.read()).decode("UTF-8")
payload = {"addon": addon, "temporary": temporary}
return self.execute("INSTALL_ADDON", payload)["value"]
def METHOD_NAME(self, identifier) -> None:
"""Uninstalls Firefox addon using its identifier.
:Usage:
::
driver.uninstall_addon('[email protected]')
"""
self.execute("UNINSTALL_ADDON", {"id": identifier})
def get_full_page_screenshot_as_file(self, filename) -> bool:
"""Saves a full document screenshot of the current window to a PNG
image file. Returns False if there is any IOError, else returns True.
Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
::
driver.get_full_page_screenshot_as_file('/Screenshots/foo.png')
"""
if not filename.lower().endswith(".png"):
warnings.warn(
"name used for saved screenshot does not match file type. It should end with a `.png` extension",
UserWarning,
)
png = self.get_full_page_screenshot_as_png()
try:
with open(filename, "wb") as f:
f.write(png)
except OSError:
return False
finally:
del png
return True
def save_full_page_screenshot(self, filename) -> bool:
"""Saves a full document screenshot of the current window to a PNG
image file. Returns False if there is any IOError, else returns True.
Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
::
driver.save_full_page_screenshot('/Screenshots/foo.png')
"""
return self.get_full_page_screenshot_as_file(filename)
def get_full_page_screenshot_as_png(self) -> bytes:
"""Gets the full document screenshot of the current window as a binary
data.
:Usage:
::
driver.get_full_page_screenshot_as_png()
"""
return base64.b64decode(self.get_full_page_screenshot_as_base64().encode("ascii"))
def get_full_page_screenshot_as_base64(self) -> str:
"""Gets the full document screenshot of the current window as a base64
encoded string which is useful in embedded images in HTML.
:Usage:
::
driver.get_full_page_screenshot_as_base64()
"""
return self.execute("FULL_PAGE_SCREENSHOT")["value"]
|
2,216 |
get optimized locs
|
# Copyright 2022 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
from shapely.geometry import Point
from .layout_optimization_base import LayoutOptimization
class LayoutOptimizationPyOptSparse(LayoutOptimization):
def __init__(
self,
fi,
boundaries,
min_dist=None,
freq=None,
solver=None,
optOptions=None,
timeLimit=None,
storeHistory='hist.hist',
hotStart=None
):
super().__init__(fi, boundaries, min_dist=min_dist, freq=freq)
self.x0 = self._norm(self.fi.layout_x, self.xmin, self.xmax)
self.y0 = self._norm(self.fi.layout_y, self.ymin, self.ymax)
self.storeHistory = storeHistory
self.timeLimit = timeLimit
self.hotStart = hotStart
try:
import pyoptsparse
except ImportError:
err_msg = (
"It appears you do not have pyOptSparse installed. "
+ "Please refer to https://pyoptsparse.readthedocs.io/ for "
+ "guidance on how to properly install the module."
)
self.logger.error(err_msg, stack_info=True)
raise ImportError(err_msg)
# Insantiate ptOptSparse optimization object with name and objective function
self.optProb = pyoptsparse.Optimization('layout', self._obj_func)
self.optProb = self.add_var_group(self.optProb)
self.optProb = self.add_con_group(self.optProb)
self.optProb.addObj("obj")
if solver is not None:
self.solver = solver
print("Setting up optimization with user's choice of solver: ", self.solver)
else:
self.solver = "SLSQP"
print("Setting up optimization with default solver: SLSQP.")
if optOptions is not None:
self.optOptions = optOptions
else:
if self.solver == "SNOPT":
self.optOptions = {"Major optimality tolerance": 1e-7}
else:
self.optOptions = {}
exec("self.opt = pyoptsparse." + self.solver + "(options=self.optOptions)")
def _optimize(self):
if hasattr(self, "_sens"):
self.sol = self.opt(self.optProb, sens=self._sens)
else:
if self.timeLimit is not None:
self.sol = self.opt(
self.optProb,
sens="CDR",
storeHistory=self.storeHistory,
timeLimit=self.timeLimit,
hotStart=self.hotStart
)
else:
self.sol = self.opt(
self.optProb,
sens="CDR",
storeHistory=self.storeHistory,
hotStart=self.hotStart
)
return self.sol
def _obj_func(self, varDict):
# Parse the variable dictionary
self.parse_opt_vars(varDict)
# Update turbine map with turbince locations
self.fi.reinitialize(layout_x = self.x, layout_y = self.y)
# Compute the objective function
funcs = {}
funcs["obj"] = (
-1 * self.fi.get_farm_AEP(self.freq) / self.initial_AEP
)
# Compute constraints, if any are defined for the optimization
funcs = self.compute_cons(funcs, self.x, self.y)
fail = False
return funcs, fail
# Optionally, the user can supply the optimization with gradients
# def _sens(self, varDict, funcs):
# funcsSens = {}
# fail = False
# return funcsSens, fail
def parse_opt_vars(self, varDict):
self.x = self._unnorm(varDict["x"], self.xmin, self.xmax)
self.y = self._unnorm(varDict["y"], self.ymin, self.ymax)
def parse_sol_vars(self, sol):
self.x = list(self._unnorm(sol.getDVs()["x"], self.xmin, self.xmax))[0]
self.y = list(self._unnorm(sol.getDVs()["y"], self.ymin, self.ymax))[1]
def add_var_group(self, optProb):
optProb.addVarGroup(
"x", self.nturbs, varType="c", lower=0.0, upper=1.0, value=self.x0
)
optProb.addVarGroup(
"y", self.nturbs, varType="c", lower=0.0, upper=1.0, value=self.y0
)
return optProb
def add_con_group(self, optProb):
optProb.addConGroup("boundary_con", self.nturbs, upper=0.0)
optProb.addConGroup("spacing_con", 1, upper=0.0)
return optProb
def compute_cons(self, funcs, x, y):
funcs["boundary_con"] = self.distance_from_boundaries(x, y)
funcs["spacing_con"] = self.space_constraint(x, y)
return funcs
def space_constraint(self, x, y, rho=500):
# Calculate distances between turbines
locs = np.vstack((x, y)).T
distances = cdist(locs, locs)
arange = np.arange(distances.shape[0])
distances[arange, arange] = 1e10
dist = np.min(distances, axis=0)
g = 1 - np.array(dist) / self.min_dist
# Following code copied from OpenMDAO KSComp().
# Constraint is satisfied when KS_constraint <= 0
g_max = np.max(np.atleast_2d(g), axis=-1)[:, np.newaxis]
g_diff = g - g_max
exponents = np.exp(rho * g_diff)
summation = np.sum(exponents, axis=-1)[:, np.newaxis]
KS_constraint = g_max + 1.0 / rho * np.log(summation)
return KS_constraint[0][0]
def distance_from_boundaries(self, x, y):
boundary_con = np.zeros(self.nturbs)
for i in range(self.nturbs):
loc = Point(x[i], y[i])
boundary_con[i] = loc.distance(self._boundary_line)
if self._boundary_polygon.contains(loc) is True:
boundary_con[i] *= -1.0
return boundary_con
def _get_initial_and_final_locs(self):
x_initial = self._unnorm(self.x0, self.xmin, self.xmax)
y_initial = self._unnorm(self.y0, self.ymin, self.ymax)
x_opt, y_opt = self.METHOD_NAME()
return x_initial, y_initial, x_opt, y_opt
def METHOD_NAME(self):
x_opt = self._unnorm(self.sol.getDVs()["x"], self.xmin, self.xmax)
y_opt = self._unnorm(self.sol.getDVs()["y"], self.ymin, self.ymax)
return x_opt, y_opt
|
2,217 |
send
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import threading
import time
from abc import ABC, abstractmethod
from queue import Empty, Queue
from .log_utils import LogType, nni_log
from .commands import CommandType
INTERVAL_SECONDS = 0.5
class BaseChannel(ABC):
def __init__(self, args):
self.is_keep_parsed = args.node_count > 1
self.args = args
self.node_id = self.args.node_id
@abstractmethod
def _inner_send(self, message):
pass
@abstractmethod
def _inner_receive(self):
return []
@abstractmethod
def _inner_open(self):
pass
@abstractmethod
def _inner_close(self):
pass
def open(self):
# initialize receive, send threads.
self.is_running = True
self.receive_queue = Queue()
self.receive_thread = threading.Thread(target=self._receive_loop)
self.receive_thread.start()
self.send_queue = Queue()
self.send_thread = threading.Thread(target=self._send_loop)
self.send_thread.start()
self._inner_open()
client_info = {
"isReady": True,
"runnerId": self.args.runner_id,
"expId": self.args.exp_id,
}
nni_log(LogType.Info, 'Channel: send ready information %s' % client_info)
self.METHOD_NAME(CommandType.Initialized, client_info)
def close(self):
self.is_running = False
try:
self._inner_close()
except Exception as err:
# ignore any error on closing
print("error on closing channel: %s" % err)
def METHOD_NAME(self, command, data):
"""Send command to Training Service.
command: CommandType object.
data: string payload.
the message is sent synchronized.
"""
data["node"] = self.node_id
data = json.dumps(data)
data = data.encode('utf8')
message = b'%b%014d%b' % (command.value, len(data), data)
self.send_queue.put(message)
def sent(self):
return self.send_queue.qsize() == 0
def received(self):
return self.receive_queue.qsize() > 0
def receive(self):
"""Receive a command from Training Service.
Returns a tuple of command (CommandType) and payload (str)
"""
command = None
data = None
try:
command_content = self.receive_queue.get(False)
if command_content is not None:
if (len(command_content) < 16):
# invalid header
nni_log(LogType.Error, 'incorrect command is found, command must be greater than 16 bytes!')
return None, None
header = command_content[:16]
command = CommandType(header[:2])
length = int(header[2:])
if (len(command_content)-16 != length):
nni_log(LogType.Error, 'incorrect command length, length {}, actual data length is {}, header {}.'
.format(length, len(command_content)-16, header))
return None, None
data = command_content[16:16+length]
data = json.loads(data.decode('utf8'))
if self.node_id is None:
nni_log(LogType.Info, 'Received command, header: [%s], data: [%s]' % (header, data))
else:
nni_log(LogType.Info, 'Received command(%s), header: [%s], data: [%s]' % (self.node_id, header, data))
except Empty:
# do nothing, if no command received.
pass
except Exception as identifier:
nni_log(LogType.Error, 'meet unhandled exception in base_channel: %s' % identifier)
return command, data
def _fetch_message(self, buffer, has_new_line=False):
messages = []
while(len(buffer)) >= 16:
header = buffer[:16]
length = int(header[2:])
message_length = length+16
total_length = message_length
if has_new_line:
total_length += 1
# break, if buffer is too short.
if len(buffer) < total_length:
break
data = buffer[16:message_length]
if has_new_line and 10 != buffer[total_length-1]:
nni_log(LogType.Error, 'end of message should be \\n, but got {}'.format(self.in_cache[total_length-1]))
buffer = buffer[total_length:]
messages.append(header + data)
return messages, buffer
def _receive_loop(self):
while (self.is_running):
messages = self._inner_receive()
if messages is not None:
for message in messages:
self.receive_queue.put(message)
time.sleep(INTERVAL_SECONDS)
def _send_loop(self):
while (self.is_running):
message = None
try:
# no sleep, since it's a block call with INTERVAL_SECONDS second timeout
message = self.send_queue.get(True, INTERVAL_SECONDS)
except Empty:
# do nothing, if no command received.
pass
if message is not None:
self._inner_send(message)
|
2,218 |
load seccomp
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
"""Utility functions adding a seccomp filter."""
import ctypes
from ctypes import c_int, c_uint, c_uint32, c_char_p, c_void_p
import errno
import logging
import os
import threading
import benchexec.libc as libc
# /usr/include/seccomp.h
SCMP_ACT_ALLOW = 0x7FFF0000
SCMP_ACT_ENOSYS = 0x00050000 | errno.ENOSYS
_scmp_filter_ctx = c_void_p
_SECCOMP_MODE_FILTER = 2 # /usr/include/linux/seccomp.h
# By default, a seccomp filter only allows syscall that match the native architecture.
# In some cases, it is common to run binaries with a different architecture
# (e.g., x86 binaries on an x86_64 kernel) and we want to allow such binaries and filter
# their syscalls. Thus we need a list of architectures of which we expect binaries to
# occur even though the native architecture is different. This is commonly the case for
# 32-bit counterparts of 64-bit architectures.
_ALLOWED_COMPATIBILITY_ARCHITECTURES = [b"x86", b"x32", b"arm"]
_AVAILABLE = None
_LOAD_LOCK = threading.Lock()
_lib = None
_check_errno = libc._check_errno
def _check_null(result, func, arguments):
"""Check that a ctypes function returned something else than null."""
if result:
return result
func_name = getattr(func, "__name__", "__unknown__")
raise OSError(f"{func_name}({', '.join(map(str, arguments))}) returned null")
def METHOD_NAME():
# First check if seccomp has a chance of working.
try:
libc.prctl(libc.PR_GET_SECCOMP, 0, 0, 0, 0)
except OSError as e:
logging.warning(
"Seccomp is not available, container isolation is degraded (%s).",
os.strerror(e.errno),
)
return False
try:
libc.prctl(libc.PR_SET_SECCOMP, _SECCOMP_MODE_FILTER, 0, 0, 0)
except OSError as e:
# EFAULT is expected if passing null pointer as filter argument
if e.errno != errno.EFAULT:
logging.warning(
"Unexpected failure when enabling seccomp filter, "
"container isolation is degraded (%s).",
e,
)
return False
else:
logging.warning(
"Unexpected failure when enabling seccomp filter, "
"container isolation is degraded."
)
return False
# Load library with utility functions.
global _lib
try:
# Allow overriding library lookup for cases like NixOS
libseccomp = os.environ.get("LIBSECCOMP", "libseccomp.so.2")
_lib = ctypes.CDLL(libseccomp, use_errno=True)
except OSError as e:
logging.warning(
"Could not load libseccomp2, "
"please install it for improved container isolation (%s).",
e,
)
return False
_lib.seccomp_init.argtypes = [c_uint32]
_lib.seccomp_init.restype = _scmp_filter_ctx
_lib.seccomp_init.errcheck = _check_null
_lib.seccomp_release.argtypes = [_scmp_filter_ctx]
_lib.seccomp_release.restype = None
_lib.seccomp_export_pfc.argtypes = [_scmp_filter_ctx, c_int]
_lib.seccomp_export_pfc.errcheck = _check_errno
_lib.seccomp_load.argtypes = [_scmp_filter_ctx]
_lib.seccomp_load.errcheck = _check_errno
_lib.seccomp_arch_resolve_name.argtypes = [c_char_p]
_lib.seccomp_arch_resolve_name.restype = c_uint32
_lib.seccomp_arch_add.argtypes = [_scmp_filter_ctx, c_uint32]
_lib.seccomp_arch_add.errcheck = _check_errno
_lib.seccomp_syscall_resolve_name.argtypes = [c_char_p]
_lib.seccomp_syscall_resolve_name.restype = c_int
_lib.seccomp_rule_add.argtypes = [_scmp_filter_ctx, c_uint32, c_int, c_uint]
_lib.seccomp_rule_add.errcheck = _check_errno
return True
def is_available():
"""
Check if seccomp is available and expected to work on this system.
If seccomp is not available an appropriate warning is logged.
"""
global _AVAILABLE
# Make sure to call _load_seccomp() exactly once.
with _LOAD_LOCK:
if _AVAILABLE is None:
_AVAILABLE = METHOD_NAME()
return _AVAILABLE
class SeccompFilter(object):
"""
Encapsulates a seccomp filter that can be incrementally built and loaded.
This class is a single-use context manager,
it is recommended to use it in a with statement.
This class can only be used if is_available() returns True.
"""
def __init__(self, default_action=SCMP_ACT_ALLOW):
"""
Create instance and specify default action for all syscalls
that are not matched by any rule.
"""
assert is_available()
self.filter = _lib.seccomp_init(default_action)
# By default, a seccomp filter only allows syscall that match the native
# architecture, but we also want to allow some other architectures and add the
# appropriate filters for them (libseccomp does this automatically and correctly
# after seccomp_arch_add was used). We could allow other architectures in a more
# fine-granular way (e.g., 32bit ARM only on 64bit ARM), but this is not
# necessary, it would just reduce the size of the filter program slightly.
for arch in _ALLOWED_COMPATIBILITY_ARCHITECTURES:
_lib.seccomp_arch_add(self.filter, _lib.seccomp_arch_resolve_name(arch))
def __enter__(self):
return self
def __exit__(self, *exc_details):
self.free()
def add_rule(self, action, syscall):
"""
Add a rule for a specific syscall.
@param action: A number like SCMP_ACT_ALLOW or SCMP_ACT_ENOSYS
@param syscall: A syscall name or number (on the native architecture)
"""
if not isinstance(syscall, int):
syscall = _lib.seccomp_syscall_resolve_name(syscall)
_lib.seccomp_rule_add(self.filter, action, syscall, 0)
def activate(self):
"""Activate the given seccomp filter for the current process in the kernel."""
_lib.seccomp_load(self.filter)
def print_to(self, fd):
"""Print debug info about the current filter to the given file descriptor."""
_lib.seccomp_export_pfc(self.filter, fd)
def free(self):
_lib.seccomp_release(self.filter)
self.filter = None
|
2,219 |
random movie
|
import logging
import random
from discord import Embed
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours, NEGATIVE_REPLIES, Tokens
log = logging.getLogger(__name__)
class ScaryMovie(commands.Cog):
"""Selects a random scary movie and embeds info into Discord chat."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="scarymovie", alias=["smovie"])
async def METHOD_NAME(self, ctx: commands.Context) -> None:
"""Randomly select a scary movie and display information about it."""
async with ctx.typing():
selection = await self.select_movie()
if not selection:
await ctx.send(embed=Embed(
title=random.choice(NEGATIVE_REPLIES),
description=":warning: Failed to select a movie from the API",
color=Colours.soft_red
))
return
movie_details = await self.format_metadata(selection)
await ctx.send(embed=movie_details)
async def select_movie(self) -> dict:
"""Selects a random movie and returns a JSON of movie details from TMDb."""
url = "https://api.themoviedb.org/3/discover/movie"
params = {
"api_key": Tokens.tmdb.get_secret_value(),
"with_genres": "27",
"vote_count.gte": "5",
"include_adult": "false"
}
headers = {
"Content-Type": "application/json;charset=utf-8"
}
# Get total page count of horror movies
async with self.bot.http_session.get(url=url, params=params, headers=headers) as response:
data = await response.json()
total_pages = data.get("total_pages")
# Get movie details from one random result on a random page
params["page"] = random.randint(1, min(total_pages, 500))
async with self.bot.http_session.get(url=url, params=params, headers=headers) as response:
data = await response.json()
if (results := data.get("results")) is None:
log.warning("Failed to select a movie - data returned from API has no 'results' key")
return {}
selection_id = random.choice(results).get("id")
if selection_id is None:
log.warning("Failed to select a movie - selected film didn't have an id")
return {}
# Get full details and credits
async with self.bot.http_session.get(
url=f"https://api.themoviedb.org/3/movie/{selection_id}",
params={"api_key": Tokens.tmdb.get_secret_value(), "append_to_response": "credits"}
) as selection:
return await selection.json()
@staticmethod
async def format_metadata(movie: dict) -> Embed:
"""Formats raw TMDb data to be embedded in Discord chat."""
# Build the relevant URLs.
movie_id = movie.get("id")
poster_path = movie.get("poster_path")
tmdb_url = f"https://www.themoviedb.org/movie/{movie_id}" if movie_id else None
poster = f"https://image.tmdb.org/t/p/original{poster_path}" if poster_path else None
# Get cast names
cast = []
for actor in movie.get("credits", {}).get("cast", [])[:3]:
cast.append(actor.get("name"))
# Get director name
director = movie.get("credits", {}).get("crew", [])
if director:
director = director[0].get("name")
# Determine the spookiness rating
rating = ""
rating_count = movie.get("vote_average", 0) / 2
for _ in range(int(rating_count)):
rating += ":skull:"
if (rating_count % 1) >= .5:
rating += ":bat:"
# Try to get year of release and runtime
year = movie.get("release_date", [])[:4]
runtime = movie.get("runtime")
runtime = f"{runtime} minutes" if runtime else None
# Not all these attributes will always be present
movie_attributes = {
"Directed by": director,
"Starring": ", ".join(cast),
"Running time": runtime,
"Release year": year,
"Spookiness rating": rating,
}
embed = Embed(
colour=0x01d277,
title=f"**{movie.get('title')}**",
url=tmdb_url,
description=movie.get("overview")
)
if poster:
embed.set_image(url=poster)
# Add the attributes that we actually have data for, but not the others.
for name, value in movie_attributes.items():
if value:
embed.add_field(name=name, value=value)
embed.set_footer(text="This product uses the TMDb API but is not endorsed or certified by TMDb.")
embed.set_thumbnail(url="https://i.imgur.com/LtFtC8H.png")
return embed
async def setup(bot: Bot) -> None:
"""Load the Scary Movie Cog."""
if not Tokens.tmdb:
log.warning("No TMDB Token. Not loading ScaryMovie Cog.")
return
await bot.add_cog(ScaryMovie(bot))
|
2,220 |
link
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
from unittest import mock
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.trial import unittest
from twisted.web import client
from buildbot.data import connector as dataconnector
from buildbot.db import connector as dbconnector
from buildbot.mq import connector as mqconnector
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import db
from buildbot.test.util import www
from buildbot.util import bytes2unicode
from buildbot.util import unicode2bytes
from buildbot.www import auth
from buildbot.www import authz
from buildbot.www import service as wwwservice
SOMETIME = 1348971992
OTHERTIME = 1008971992
class BodyReader(protocol.Protocol):
# an IProtocol that reads the entire HTTP body and then calls back
# with it
def __init__(self, finishedDeferred):
self.body = []
self.finishedDeferred = finishedDeferred
def dataReceived(self, data):
self.body.append(data)
def connectionLost(self, reason):
if reason.check(client.ResponseDone):
self.finishedDeferred.callback(b''.join(self.body))
else:
self.finishedDeferred.errback(reason)
class Www(db.RealDatabaseMixin, www.RequiresWwwMixin, unittest.TestCase):
master = None
@defer.inlineCallbacks
def setUp(self):
# set up a full master serving HTTP
yield self.setUpRealDatabase(table_names=['masters', 'objects', 'object_state'],
sqlite_memory=False)
master = fakemaster.FakeMaster(reactor)
master.config.db = {"db_url": self.db_url}
master.db = dbconnector.DBConnector('basedir')
yield master.db.setServiceParent(master)
yield master.db.setup(check_version=False)
master.config.mq = {"type": 'simple'}
master.mq = mqconnector.MQConnector()
yield master.mq.setServiceParent(master)
yield master.mq.setup()
master.data = dataconnector.DataConnector()
yield master.data.setServiceParent(master)
master.config.www = {
"port": 'tcp:0:interface=127.0.0.1',
"debug": True,
"auth": auth.NoAuth(),
"authz": authz.Authz(),
"avatar_methods": [],
"logfileName": 'http.log'
}
master.www = wwwservice.WWWService()
yield master.www.setServiceParent(master)
yield master.www.startService()
yield master.www.reconfigServiceWithBuildbotConfig(master.config)
session = mock.Mock()
session.uid = "0"
master.www.site.sessionFactory = mock.Mock(return_value=session)
# now that we have a port, construct the real URL and insert it into
# the config. The second reconfig isn't really required, but doesn't
# hurt.
self.url = f'http://127.0.0.1:{master.www.getPortnum()}/'
self.url = unicode2bytes(self.url)
master.config.buildbotURL = self.url
yield master.www.reconfigServiceWithBuildbotConfig(master.config)
self.master = master
# build an HTTP agent, using an explicit connection pool if Twisted
# supports it (Twisted 13.0.0 and up)
if hasattr(client, 'HTTPConnectionPool'):
self.pool = client.HTTPConnectionPool(reactor)
self.agent = client.Agent(reactor, pool=self.pool)
else:
self.pool = None
self.agent = client.Agent(reactor)
@defer.inlineCallbacks
def tearDown(self):
if self.pool:
yield self.pool.closeCachedConnections()
if self.master:
yield self.master.www.stopService()
yield self.tearDownRealDatabase()
@defer.inlineCallbacks
def apiGet(self, url, expect200=True):
pg = yield self.agent.request(b'GET', url)
# this is kind of obscene, but protocols are like that
d = defer.Deferred()
bodyReader = BodyReader(d)
pg.deliverBody(bodyReader)
body = yield d
# check this *after* reading the body, otherwise Trial will
# complain that the response is half-read
if expect200 and pg.code != 200:
self.fail(f"did not get 200 response for '{url}'")
return json.loads(bytes2unicode(body))
def METHOD_NAME(self, suffix):
return self.url + b'api/v2/' + suffix
# tests
# There's no need to be exhaustive here. The intent is to test that data
# can get all the way from the DB to a real HTTP client, and a few
# resources will be sufficient to demonstrate that.
@defer.inlineCallbacks
def test_masters(self):
yield self.insert_test_data([
fakedb.Master(id=7, name='some:master',
active=0, last_active=SOMETIME),
fakedb.Master(id=8, name='other:master',
active=1, last_active=OTHERTIME),
])
res = yield self.apiGet(self.METHOD_NAME(b'masters'))
self.assertEqual(res, {
'masters': [
{'active': False, 'masterid': 7, 'name': 'some:master',
'last_active': SOMETIME},
{'active': True, 'masterid': 8, 'name': 'other:master',
'last_active': OTHERTIME},
],
'meta': {
'total': 2,
}})
res = yield self.apiGet(self.METHOD_NAME(b'masters/7'))
self.assertEqual(res, {
'masters': [
{'active': False, 'masterid': 7, 'name': 'some:master',
'last_active': SOMETIME},
],
'meta': {
}})
|
2,221 |
norm
|
"""tensorflow.compat.v1 backend implementation"""
from packaging.version import Version
import tensorflow.compat.v1 as tf
if Version(tf.__version__) < Version("2.7.0"):
raise RuntimeError("DeepXDE requires TensorFlow>=2.7.0.")
# The major changes from TensorFlow 1.x to TensorFlow 2.x are:
# 1. Eager execution: enable_eager_execution(), disable_eager_execution()
# 2. Resource variables: enable_resource_variables(), disable_resource_variables()
# 3. Tensor shapes: enable_v2_tensorshape(), disable_v2_tensorshape()
# 4. Control flow: enable_control_flow_v2(), disable_control_flow_v2()
# 5. Tensors comparison: enable_tensor_equality(), disable_tensor_equality()
# 6. Some internal uses of tf.data symbols
# For more details, see
# - https://www.tensorflow.org/guide/migrate
# - the source code of disable_v2_behavior()
# We can simply disable all TensorFlow 2.x behaviors by disable_v2_behavior(), but some
# features in TensorFlow 2.x are useful such as `Tensor shapes`. Actually we use `Tensor
# shapes` in DeepXDE.
tf.disable_v2_behavior()
tf.enable_v2_tensorshape()
# In terms of functionality, we only need to disable eager mode.
# tf.disable_eager_execution()
# It hurts performance a lot (only in some cases?) if enabling tensor equality.
# tf.disable_tensor_equality()
# It hurts performance a little (only in some cases?) if enabling resource variables.
# tf.disable_resource_variables()
# It hurts performance a little (only in some cases?) if enabling control flow.
# tf.disable_control_flow_v2()
lib = tf
def data_type_dict():
return {
"float16": tf.float16,
"float32": tf.float32,
"float64": tf.float64,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"int32": tf.int32,
"int64": tf.int64,
"bool": tf.bool,
}
def is_gpu_available():
return bool(tf.config.list_physical_devices("GPU"))
def is_tensor(obj):
return tf.is_tensor(obj)
def shape(input_tensor):
return input_tensor.shape.as_list()
def size(tensor):
return tf.get_static_value(tf.size(tensor)).item()
def ndim(input_tensor):
return len(input_tensor.shape)
def transpose(tensor, axes=None):
return tf.transpose(tensor, perm=axes)
def reshape(tensor, shape):
return tf.reshape(tensor, shape)
def Variable(initial_value, dtype=None):
return tf.Variable(initial_value=initial_value, trainable=True, dtype=dtype)
def as_tensor(data, dtype=None):
if tf.is_tensor(data):
if dtype is None or data.dtype == dtype:
return data
return tf.cast(data, dtype)
return tf.convert_to_tensor(data, dtype=dtype)
def sparse_tensor(indices, values, shape):
return tf.sparse.SparseTensor(indices, values, shape)
def from_numpy(np_array):
# Do memory copy:
# https://stackoverflow.com/questions/47519802/does-tensorflow-convert-to-tensor-do-memory-copy
# To avoid memory copy, use implicit conversion, but memory copy is still possible.
# https://www.tensorflow.org/tutorials/customization/basics#numpy_compatibility
return tf.convert_to_tensor(np_array)
def concat(values, axis):
return tf.concat(values, axis)
def stack(values, axis):
return tf.stack(values, axis)
def expand_dims(tensor, axis):
return tf.expand_dims(tensor, axis)
def reverse(tensor, axis):
return tf.reverse(tensor, axis)
def roll(tensor, shift, axis):
return tf.roll(tensor, shift, axis)
def lgamma(x):
return tf.math.lgamma(x)
def elu(x):
return tf.nn.elu(x)
def relu(x):
return tf.nn.relu(x)
def selu(x):
return tf.nn.selu(x)
def sigmoid(x):
return tf.math.sigmoid(x)
def silu(x):
return tf.keras.activations.swish(x)
def sin(x):
return tf.math.sin(x)
def cos(x):
return tf.math.cos(x)
def exp(x):
return tf.math.exp(x)
def square(x):
return tf.math.square(x)
# pylint: disable=redefined-builtin
def abs(x):
return tf.math.abs(x)
def minimum(x, y):
return tf.math.minimum(x, y)
def tanh(x):
return tf.math.tanh(x)
def pow(x, y):
return tf.math.pow(x, y)
def mean(input_tensor, dim, keepdims=False):
return tf.math.reduce_mean(input_tensor, axis=dim, keepdims=keepdims)
def reduce_mean(input_tensor):
return tf.math.reduce_mean(input_tensor)
def sum(input_tensor, dim, keepdims=False):
return tf.math.reduce_sum(input_tensor, axis=dim, keepdims=keepdims)
def reduce_sum(input_tensor):
return tf.math.reduce_sum(input_tensor)
def prod(input_tensor, dim, keepdims=False):
return tf.math.reduce_prod(input_tensor, axis=dim, keepdims=keepdims)
def reduce_prod(input_tensor):
return tf.math.reduce_prod(input_tensor)
# pylint: disable=redefined-builtin
def min(input_tensor, dim, keepdims=False):
return tf.math.reduce_min(input_tensor, axis=dim, keepdims=keepdims)
def reduce_min(input_tensor):
return tf.math.reduce_min(input_tensor)
# pylint: disable=redefined-builtin
def max(input_tensor, dim, keepdims=False):
return tf.math.reduce_max(input_tensor, axis=dim, keepdims=keepdims)
def reduce_max(input_tensor):
return tf.math.reduce_max(input_tensor)
def METHOD_NAME(tensor, ord=None, axis=None, keepdims=False):
if ord is None:
ord = "euclidean"
return tf.METHOD_NAME(tensor, ord=ord, axis=axis, keepdims=keepdims)
def zeros(shape, dtype):
return tf.zeros(shape, dtype=dtype)
def zeros_like(input_tensor):
return tf.zeros_like(input_tensor)
def matmul(x, y):
return tf.linalg.matmul(x, y)
def sparse_dense_matmul(x, y):
return tf.sparse.sparse_dense_matmul(x, y)
|
2,222 |
is from local
|
import inspect
from typing import Dict, List
import torch
from .. import variables
from ..exc import unimplemented
from ..utils import istype
from .base import VariableTracker
from .constant import ConstantVariable
class DistributedVariable(VariableTracker):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not DistributedVariable.is_available():
unimplemented("torch.distributed package is not available!")
@staticmethod
def is_available():
# check if the distributed package is available or not
return torch.distributed.is_available()
def METHOD_NAME(value):
if not DistributedVariable.is_available():
return False
from torch.distributed._tensor import DTensor
return inspect.isfunction(value) and value is DTensor.from_local
def is_constant_pg_functions(value):
if not DistributedVariable.is_available():
return False
from torch.distributed.distributed_c10d import (
_get_group_tag,
get_process_group_ranks,
)
constant_processgroup_functions = [
get_process_group_ranks,
_get_group_tag,
]
return inspect.isfunction(value) and value in constant_processgroup_functions
class PlacementClassVariable(DistributedVariable):
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
@staticmethod
def is_placement_type(value):
# we can't rely on importing/accessing torch distributed, it is not always built.
if not DistributedVariable.is_available():
return False
from torch.distributed._tensor.placement_types import Placement
return type(value) is type and issubclass(value, Placement)
def call_function(
self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]"
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if (
inspect.getattr_static(self.value, "__new__", None) in (object.__new__,)
and self.source
):
# NOTE: we don't need to track mutations to the placement class as they
# suppose to be immutable.
new_obj = object.__new__(self.value)
var = PlacementVariable(new_obj, **options)
if inspect.getattr_static(self.value, "__init__", None):
return var.add_options(var.call_method(tx, "__init__", args, kwargs))
return super().call_function(tx, args, kwargs)
class PlacementVariable(DistributedVariable):
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
@staticmethod
def is_placement(value):
# we can't rely on importing/accessing torch distributed, it is not always built.
if not DistributedVariable.is_available():
return False
from torch.distributed._tensor.placement_types import Placement
return isinstance(value, Placement)
def as_python_constant(self):
return self.value
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
from . import ConstantVariable
options = VariableTracker.propagate(self, args, kwargs.values())
allowed_methods = ["__init__", "__setattr__"]
# placement types dynamo tracking allows only __init__
# and __setattr__ methods, the latter is for case like `Shard(dim)`
if name in allowed_methods:
try:
value_type = type(self.value)
assert (
inspect.getattr_static(value_type, "__getattr__", None) is None
), "no custom getattr allowed!"
method = inspect.getattr_static(value_type, name)
except AttributeError:
method = None
if method is object.__init__:
return ConstantVariable(None, **options)
args = [x.as_python_constant() for x in args]
kwargs = {k: v.as_python_constant() for k, v in kwargs.items()}
method(self.value, *args, **kwargs)
return self
return super().call_method(tx, name, args, kwargs)
class DeviceMeshVariable(DistributedVariable):
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
@staticmethod
def is_device_mesh(value):
# we can't rely on importing/accessing torch distributed, it is not always built.
if not DistributedVariable.is_available():
return False
from torch.distributed._tensor.device_mesh import DeviceMesh
return istype(value, DeviceMesh)
def as_python_constant(self):
return self.value
def var_getattr(self, tx, name: str) -> VariableTracker:
if name == "ndim":
return ConstantVariable(self.value.ndim)
return super().var_getattr(tx, name)
class ProcessGroupVariable(DistributedVariable):
"""
We don't want a ProcessGroup object to end up in our output graph.
But it's common for dynamo to intercept a PG that is then used to get info like
rank() or world_size(), as well as passed to utility functions in distributed_c10d
which desugar it into plain types like a ranklist and tag.
For convenience and proper guarding, we construct a variable type.
TODO: make it possible to use ProcessGroupVariable as input to simple functions
like _expand_group without dynamo complaining about making a proxy for it.
It is not a tensor-like type, and we don't want a proxy- but dynamo assumes
torch library functions are dealing with tensor-like types and would have proxies
for their args.
TODO: should we make this inherit VT instead of UDOV? Do we want any of the default behaviors
or just graph-break whenever one of our special cases is not hit?
"""
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def as_python_constant(self):
return self.value
def python_type(self):
return type(self.value)
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
if name == "rank":
return variables.ConstantVariable(self.value.rank())
if name == "size":
return variables.ConstantVariable(self.value.size())
return super().call_method(tx, name, args, kwargs)
def var_getattr(self, tx, name):
if name in ["rank", "size"]:
return variables.LambdaVariable(
lambda *args, **kwargs: self.call_method(tx, name, args, kwargs)
).add_options(self)
# TODO should this just raise unimplemented?
return super().var_getattr(tx, name)
@staticmethod
def is_process_group(value):
# we can't rely on importing/accessing torch distributed, it is not always built.
if not DistributedVariable.is_available():
return False
from torch._C._distributed_c10d import ProcessGroup
return istype(value, ProcessGroup)
|
2,223 |
str see also
|
import re, inspect, textwrap, pydoc
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def METHOD_NAME(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self).METHOD_NAME(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
return out
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Attributes', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self.METHOD_NAME(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Examples')
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
pass
class SphinxClassDoc(SphinxDocString, ClassDoc):
pass
def get_doc_object(obj, what=None, doc=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, '', doc=doc)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxDocString(doc)
|
2,224 |
wait for
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import socket
import ssl
import struct
import time
import aioquic.quic.configuration # type: ignore
import aioquic.quic.connection # type: ignore
import aioquic.quic.events # type: ignore
import trio
import dns.exception
import dns.inet
from dns._asyncbackend import NullContext
from dns.quic._common import (
QUIC_MAX_DATAGRAM,
AsyncQuicConnection,
AsyncQuicManager,
BaseQuicStream,
UnexpectedEOF,
)
class TrioQuicStream(BaseQuicStream):
def __init__(self, connection, stream_id):
super().__init__(connection, stream_id)
self._wake_up = trio.Condition()
async def METHOD_NAME(self, amount):
while True:
if self._buffer.have(amount):
return
self._expecting = amount
async with self._wake_up:
await self._wake_up.wait()
self._expecting = 0
async def receive(self, timeout=None):
if timeout is None:
context = NullContext(None)
else:
context = trio.move_on_after(timeout)
with context:
await self.METHOD_NAME(2)
(size,) = struct.unpack("!H", self._buffer.get(2))
await self.METHOD_NAME(size)
return self._buffer.get(size)
raise dns.exception.Timeout
async def send(self, datagram, is_end=False):
data = self._encapsulate(datagram)
await self._connection.write(self._stream_id, data, is_end)
async def _add_input(self, data, is_end):
if self._common_add_input(data, is_end):
async with self._wake_up:
self._wake_up.notify()
async def close(self):
self._close()
# Streams are async context managers
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async with self._wake_up:
self._wake_up.notify()
return False
class TrioQuicConnection(AsyncQuicConnection):
def __init__(self, connection, address, port, source, source_port, manager=None):
super().__init__(connection, address, port, source, source_port, manager)
self._socket = trio.socket.socket(self._af, socket.SOCK_DGRAM, 0)
if self._source:
trio.socket.bind(dns.inet.low_level_address_tuple(self._source, self._af))
self._handshake_complete = trio.Event()
self._run_done = trio.Event()
self._worker_scope = None
async def _worker(self):
try:
await self._socket.connect(self._peer)
while not self._done:
(expiration, interval) = self._get_timer_values(False)
with trio.CancelScope(
deadline=trio.current_time() + interval
) as self._worker_scope:
datagram = await self._socket.recv(QUIC_MAX_DATAGRAM)
self._connection.receive_datagram(
datagram, self._peer[0], time.time()
)
self._worker_scope = None
self._handle_timer(expiration)
datagrams = self._connection.datagrams_to_send(time.time())
for datagram, _ in datagrams:
await self._socket.send(datagram)
await self._handle_events()
finally:
self._done = True
self._handshake_complete.set()
async def _handle_events(self):
count = 0
while True:
event = self._connection.next_event()
if event is None:
return
if isinstance(event, aioquic.quic.events.StreamDataReceived):
stream = self._streams.get(event.stream_id)
if stream:
await stream._add_input(event.data, event.end_stream)
elif isinstance(event, aioquic.quic.events.HandshakeCompleted):
self._handshake_complete.set()
elif isinstance(
event, aioquic.quic.events.ConnectionTerminated
) or isinstance(event, aioquic.quic.events.StreamReset):
self._done = True
self._socket.close()
count += 1
if count > 10:
# yield
count = 0
await trio.sleep(0)
async def write(self, stream, data, is_end=False):
self._connection.send_stream_data(stream, data, is_end)
if self._worker_scope is not None:
self._worker_scope.cancel()
async def run(self):
if self._closed:
return
async with trio.open_nursery() as nursery:
nursery.start_soon(self._worker)
self._run_done.set()
async def make_stream(self, timeout=None):
if timeout is None:
context = NullContext(None)
else:
context = trio.move_on_after(timeout)
with context:
await self._handshake_complete.wait()
if self._done:
raise UnexpectedEOF
stream_id = self._connection.get_next_available_stream_id(False)
stream = TrioQuicStream(self, stream_id)
self._streams[stream_id] = stream
return stream
raise dns.exception.Timeout
async def close(self):
if not self._closed:
self._manager.closed(self._peer[0], self._peer[1])
self._closed = True
self._connection.close()
if self._worker_scope is not None:
self._worker_scope.cancel()
await self._run_done.wait()
class TrioQuicManager(AsyncQuicManager):
def __init__(
self, nursery, conf=None, verify_mode=ssl.CERT_REQUIRED, server_name=None
):
super().__init__(conf, verify_mode, TrioQuicConnection, server_name)
self._nursery = nursery
def connect(self, address, port=853, source=None, source_port=0):
(connection, start) = self._connect(address, port, source, source_port)
if start:
self._nursery.start_soon(connection.run)
return connection
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Copy the iterator into a list as exiting things will mutate the connections
# table.
connections = list(self._connections.values())
for connection in connections:
await connection.close()
return False
|
2,225 |
api member name
|
# Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import json
from abc import ABC
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
Literal,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from typing_extensions import Self
from cvat_sdk.api_client.model_utils import IModelData, ModelNormal, to_json
from cvat_sdk.core.helpers import get_paginated_collection
if TYPE_CHECKING:
from cvat_sdk.core.client import Client
IModel = TypeVar("IModel", bound=IModelData)
ModelType = TypeVar("ModelType", bound=ModelNormal)
ApiType = TypeVar("ApiType")
class ModelProxy(ABC, Generic[ModelType, ApiType]):
_client: Client
@property
def METHOD_NAME(self) -> str:
...
def __init__(self, client: Client) -> None:
self.__dict__["_client"] = client
@classmethod
def get_api(cls, client: Client) -> ApiType:
return getattr(client.api_client, cls.METHOD_NAME)
@property
def api(self) -> ApiType:
return self.get_api(self._client)
class Entity(ModelProxy[ModelType, ApiType]):
"""
Represents a single object. Implements related operations and provides read access
to data members.
"""
_model: ModelType
def __init__(self, client: Client, model: ModelType) -> None:
super().__init__(client)
self.__dict__["_model"] = model
@property
def _model_id_field(self) -> str:
return "id"
def __getattr__(self, __name: str) -> Any:
# NOTE: be aware of potential problems with throwing AttributeError from @property
# in derived classes!
# https://medium.com/@ceshine/python-debugging-pitfall-mixed-use-of-property-and-getattr-f89e0ede13f1
return self._model[__name]
def __str__(self) -> str:
return str(self._model)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: id={getattr(self, self._model_id_field)}>"
class Repo(ModelProxy[ModelType, ApiType]):
"""
Represents a collection of corresponding Entity objects.
Implements group and management operations for entities.
"""
_entity_type: Type[Entity[ModelType, ApiType]]
### Utilities
def build_model_bases(
mt: Type[ModelType], at: Type[ApiType], *, api_member_name: Optional[str] = None
) -> Tuple[Type[Entity[ModelType, ApiType]], Type[Repo[ModelType, ApiType]]]:
"""
Helps to remove code duplication in declarations of derived classes
"""
class _EntityBase(Entity[ModelType, ApiType]):
if api_member_name:
METHOD_NAME = api_member_name
class _RepoBase(Repo[ModelType, ApiType]):
if api_member_name:
METHOD_NAME = api_member_name
return _EntityBase, _RepoBase
### CRUD mixins
_EntityT = TypeVar("_EntityT", bound=Entity)
#### Repo mixins
class ModelCreateMixin(Generic[_EntityT, IModel]):
def create(self: Repo, spec: Union[Dict[str, Any], IModel]) -> _EntityT:
"""
Creates a new object on the server and returns the corresponding local object
"""
(model, _) = self.api.create(spec)
return self._entity_type(self._client, model)
class ModelRetrieveMixin(Generic[_EntityT]):
def retrieve(self: Repo, obj_id: int) -> _EntityT:
"""
Retrieves an object from the server by ID
"""
(model, _) = self.api.retrieve(id=obj_id)
return self._entity_type(self._client, model)
class ModelListMixin(Generic[_EntityT]):
@overload
def list(self: Repo, *, return_json: Literal[False] = False) -> List[_EntityT]:
...
@overload
def list(self: Repo, *, return_json: Literal[True] = False) -> List[Any]:
...
def list(self: Repo, *, return_json: bool = False) -> List[Union[_EntityT, Any]]:
"""
Retrieves all objects from the server and returns them in basic or JSON format.
"""
results = get_paginated_collection(endpoint=self.api.list_endpoint, return_json=return_json)
if return_json:
return json.dumps(results)
return [self._entity_type(self._client, model) for model in results]
#### Entity mixins
class ModelUpdateMixin(ABC, Generic[IModel]):
@property
def _model_partial_update_arg(self: Entity) -> str:
...
def _export_update_fields(
self: Entity, overrides: Optional[Union[Dict[str, Any], IModel]] = None
) -> Dict[str, Any]:
# TODO: support field conversion and assignment updating
# fields = to_json(self._model)
if isinstance(overrides, ModelNormal):
overrides = to_json(overrides)
fields = deepcopy(overrides)
return fields
def fetch(self: Entity) -> Self:
"""
Updates the current object from the server
"""
# TODO: implement revision checking
(self._model, _) = self.api.retrieve(id=getattr(self, self._model_id_field))
return self
def update(self: Entity, values: Union[Dict[str, Any], IModel]) -> Self:
"""
Commits model changes to the server
The local object is updated from the server after this operation.
"""
# TODO: implement revision checking
self.api.partial_update(
id=getattr(self, self._model_id_field),
**{self._model_partial_update_arg: self._export_update_fields(values)},
)
# TODO: use the response model, once input and output models are same
return self.fetch()
class ModelDeleteMixin:
def remove(self: Entity) -> None:
"""
Removes current object on the server
"""
self.api.destroy(id=getattr(self, self._model_id_field))
|
2,226 |
test group mirror
|
#!/usr/bin/env python
#
#
from qautils.gppylib.testold.testDriver import TestDriver
from qautils.gppylib.programs.clsAddMirrors import *
from qautils.gppylib.operations.buildMirrorSegments import *
from qautils.gppylib.mainUtils import *
from qautils.gppylib.testold.testUtils import *
from qautils.gppylib.system import fileSystemImplTest, fileSystemInterface
from qautils.gppylib.gplog import get_default_logger
programName = sys.argv[0]
parserFn = GpAddMirrorsProgram.createParser
commandFn = GpAddMirrorsProgram.createProgram
driver = TestDriver()
logger = get_default_logger()
###############
#
# Now the tests:
#
####################################
#
#
#
# Test that spread mirror assigns ports and directories correctly
#
def testSpreadMirror(args):
driver.initThreeHostMultiHomeNoMirrors()
confProvider = configInterface.getConfigurationProvider().initializeProvider(5432)
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
calc = GpMirrorBuildCalculator(gpArray, 1000, [ "/data/m1", "/data/m2"], [{},{}])
calc.getSpreadMirrors()
GpMirrorListToBuild([], None, False, 1).checkForPortAndDirectoryConflicts(gpArray)
testOutput("")
testOutputGpArray(gpArray)
testOutput("")
simple_test("testSpreadMirror", testSpreadMirror, [],
"""
dbid | content | role | preferred_role | mode | status | hostname | address | port | datadir | replication_port
1 | -1 | p | p | s | u | master-host | primary-host | 5432 | /datadirpathdbmaster/gp-1 | None
2 | 0 | p | p | r | u | first-host | first-host-1 | 50001 | /first/datadirpathdbfast1/gp0 | 53001
3 | 1 | p | p | r | u | first-host | first-host-2 | 50002 | /first/datadirpathdbfast2/gp1 | 53002
4 | 2 | p | p | r | u | second-host | second-host-1 | 50001 | /second/datadirpathdbfast1/gp2 | 53001
5 | 3 | p | p | r | u | second-host | second-host-2 | 50002 | /second/datadirpathdbfast2/gp3 | 53002
6 | 4 | p | p | r | u | third-host | third-host-1 | 50001 | /third/datadirpathdbfast2/gp4 | 53001
7 | 5 | p | p | r | u | third-host | third-host-2 | 50002 | /third/datadirpathdbfast2/gp5 | 53002
8 | 0 | m | m | r | u | second-host | second-host-1 | 51001 | /data/m1 | 52001
9 | 1 | m | m | r | u | third-host | third-host-1 | 51001 | /data/m1 | 52001
10 | 2 | m | m | r | u | third-host | third-host-2 | 51002 | /data/m2 | 52002
11 | 3 | m | m | r | u | first-host | first-host-1 | 51001 | /data/m1 | 52001
12 | 4 | m | m | r | u | first-host | first-host-2 | 51002 | /data/m2 | 52002
13 | 5 | m | m | r | u | second-host | second-host-2 | 51002 | /data/m2 | 52002
"""
)
#
# Test group mirroring!
#
def METHOD_NAME(args):
driver.initThreeHostMultiHomeNoMirrors()
confProvider = configInterface.getConfigurationProvider().initializeProvider(5432)
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
calc = GpMirrorBuildCalculator(gpArray, 1000, [ "/data/m1", "/data/m2"], [{},{}])
calc.getGroupMirrors()
GpMirrorListToBuild([], None, False, 1).checkForPortAndDirectoryConflicts(gpArray)
testOutput("")
testOutputGpArray(gpArray)
testOutput("")
simple_test("testGroupMirror", METHOD_NAME, [],
"""
dbid | content | role | preferred_role | mode | status | hostname | address | port | datadir | replication_port
1 | -1 | p | p | s | u | master-host | primary-host | 5432 | /datadirpathdbmaster/gp-1 | None
2 | 0 | p | p | r | u | first-host | first-host-1 | 50001 | /first/datadirpathdbfast1/gp0 | 53001
3 | 1 | p | p | r | u | first-host | first-host-2 | 50002 | /first/datadirpathdbfast2/gp1 | 53002
4 | 2 | p | p | r | u | second-host | second-host-1 | 50001 | /second/datadirpathdbfast1/gp2 | 53001
5 | 3 | p | p | r | u | second-host | second-host-2 | 50002 | /second/datadirpathdbfast2/gp3 | 53002
6 | 4 | p | p | r | u | third-host | third-host-1 | 50001 | /third/datadirpathdbfast2/gp4 | 53001
7 | 5 | p | p | r | u | third-host | third-host-2 | 50002 | /third/datadirpathdbfast2/gp5 | 53002
8 | 0 | m | m | r | u | second-host | second-host-1 | 51001 | /data/m1 | 52001
9 | 1 | m | m | r | u | second-host | second-host-2 | 51002 | /data/m2 | 52002
10 | 2 | m | m | r | u | third-host | third-host-1 | 51001 | /data/m1 | 52001
11 | 3 | m | m | r | u | third-host | third-host-2 | 51002 | /data/m2 | 52002
12 | 4 | m | m | r | u | first-host | first-host-1 | 51001 | /data/m1 | 52001
13 | 5 | m | m | r | u | first-host | first-host-2 | 51002 | /data/m2 | 52002
"""
)
#
# Test segment copy and comparison
#
def testCopyAndComparison(args):
driver.initThreeHostMultiHomeNoMirrors()
gpArray = configInterface.getConfigurationProvider().initializeProvider(5432).loadSystemConfig(useUtilityMode=False)
testOutput("")
for seg in gpArray.getSegDbList():
seg.getSegmentFilespaces()[12334] = "/data/foo1"
seg.getSegmentFilespaces()[42334] = "/data/foo2"
seg.getSegmentFilespaces()[32334] = "/data/foo3"
seg.getSegmentFilespaces()[72334] = "/data/foo4"
segCopy = seg.copy()
testOutput("equalsCopy: %s" % (segCopy == seg))
if segCopy != seg:
testOutput("%s" % repr(seg))
testOutput("%s" % repr(segCopy))
testOutput("")
simple_test("testCopyAndComparison", testCopyAndComparison, [],
"""
equalsCopy: True
equalsCopy: True
equalsCopy: True
equalsCopy: True
equalsCopy: True
equalsCopy: True
""")
# All done tests
printTestResults()
|
2,227 |
parse edge key
|
import time
from signal import pause
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
map_edge_parse = {'falling':GPIO.FALLING, 'rising':GPIO.RISING, 'both':GPIO.BOTH}
map_pull_parse = {'pull_up':GPIO.PUD_UP, 'pull_down':GPIO.PUD_DOWN, 'pull_off':GPIO.PUD_OFF}
map_edge_print = {GPIO.FALLING: 'falling', GPIO.RISING: 'rising', GPIO.BOTH: 'both'}
map_pull_print = {GPIO.PUD_UP:'pull_up', GPIO.PUD_DOWN: 'pull_down', GPIO.PUD_OFF: 'pull_off'}
def METHOD_NAME(edge):
if edge in [GPIO.FALLING, GPIO.RISING, GPIO.BOTH]:
return edge
try:
result = map_edge_parse[edge.lower()]
except KeyError:
result = edge
raise KeyError('Unknown Edge type {edge}'.format(edge=edge))
return result
def parse_pull_up_down(pull_up_down):
if pull_up_down in [GPIO.PUD_UP, GPIO.PUD_DOWN, GPIO.PUD_OFF]:
return pull_up_down
try:
result = map_pull_parse[pull_up_down]
except KeyError:
result = pull_up_down
raise KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down))
return result
def print_edge_key(edge):
try:
result = map_edge_print[edge]
except KeyError:
result = edge
return result
def print_pull_up_down(pull_up_down):
try:
result = map_pull_print[pull_up_down]
except KeyError:
result = pull_up_down
return result
# This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function).
# It checks if the GPIO is in the state since the function was called. If the state
# changes it return False. If the time is over the function returns True.
def checkGpioStaysInState(holdingTime, gpioChannel, gpioHoldingState):
# Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter)
startTime = time.perf_counter()
# Continously check if time is not over
while True:
time.sleep(0.1)
currentState = GPIO.input(gpioChannel)
if holdingTime < (time.perf_counter() - startTime):
break
# Return if state does not match holding state
if (gpioHoldingState != currentState):
return False
# Else: Wait
if (gpioHoldingState != currentState):
return False
return True
class SimpleButton:
def __init__(self, pin, action=lambda *args: None, action2=lambda *args: None, name=None,
bouncetime=500, antibouncehack=False, edge='falling', hold_time=.3, hold_mode=None, pull_up_down='pull_up'):
self.edge = METHOD_NAME(edge)
self.hold_time = hold_time
self.hold_mode = hold_mode
self.pull_up = True
self.pull_up_down = parse_pull_up_down(pull_up_down)
self.pin = pin
self.name = name
self.bouncetime = bouncetime
self.antibouncehack = antibouncehack
GPIO.setup(self.pin, GPIO.IN, pull_up_down=self.pull_up_down)
self._action = action
self._action2 = action2
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler,
bouncetime=self.bouncetime)
self.callback_with_pin_argument = False
def callbackFunctionHandler(self, *args):
if len(args) > 0 and args[0] == self.pin and not self.callback_with_pin_argument:
logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args))
args = args[1:]
logger.debug('args after: {}'.format(args))
if self.antibouncehack:
time.sleep(0.1)
inval = GPIO.input(self.pin)
if inval != GPIO.LOW:
return None
if self.hold_mode in ('Repeat', 'Postpone', 'SecondFunc', 'SecondFuncRepeat'):
return self.longPressHandler(*args)
else:
logger.info('{}: execute callback'.format(self.name))
return self.when_pressed(*args)
@property
def when_pressed(self):
logger.info('{}: action'.format(self.name))
return self._action
@property
def when_held(self):
logger.info('{}: action2'.format(self.name))
return self._action2
@when_pressed.setter
def when_pressed(self, func):
logger.info('{}: set when_pressed')
self._action = func
GPIO.remove_event_detect(self.pin)
logger.info('add new action')
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime)
def set_callbackFunction(self, callbackFunction):
self.when_pressed = callbackFunction
def longPressHandler(self, *args):
logger.info('{}: longPressHandler, mode: {}'.format(self.name, self.hold_mode))
# instant action (except Postpone mode)
if self.hold_mode != "Postpone":
self.when_pressed(*args)
# action(s) after hold_time
if self.hold_mode == "Repeat":
# Repeated call of main action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
elif self.hold_mode == "Postpone":
# Postponed call of main action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFunc":
# Call of secondary action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFuncRepeat":
# Repeated call of secondary action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
def __del__(self):
logger.debug('remove event detection')
GPIO.remove_event_detect(self.pin)
@property
def is_pressed(self):
if self.pull_up:
return not GPIO.input(self.pin)
return GPIO.input(self.pin)
def __repr__(self):
return '<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format(
self.name, self.pin, print_edge_key(self.edge), self.hold_mode, self.hold_time, self.bouncetime,self.antibouncehack,print_pull_up_down(self.pull_up_down)
)
if __name__ == "__main__":
print('please enter pin no to test')
pin = int(input())
func = lambda *args: print('FunctionCall with {}'.format(args))
btn = SimpleButton(pin=pin, action=func, hold_mode='Repeat')
pause()
|
2,228 |
process
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from contextlib import ExitStack
from flask import flash, redirect, request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.modules.events import Event
from indico.modules.events.registration.util import get_event_regforms_registrations
from indico.modules.events.views import WPAccessKey
from indico.modules.logs.models.entries import EventLogRealm, LogKind
from indico.modules.logs.util import make_diff_log
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RH
from indico.web.util import jsonify_data, jsonify_form
class RHEventBase(RH):
def _process_args(self):
self.event = Event.get(request.view_args['event_id'])
if self.event is None:
raise NotFound(_('An event with this ID does not exist.'))
elif self.event.is_deleted:
raise NotFound(_('This event has been deleted.'))
class RHProtectedEventBase(RHEventBase):
"""
Base class for events that checks if the user can access it.
This includes unauthenticated users who have access to the event
for any other reason (e.g. due to a whitelisted IP address), but
"""
def _check_access(self):
if not self.event.can_access(session.user):
raise Forbidden
class RHAuthenticatedEventBase(RHProtectedEventBase):
"""
Base class for events that checks if the user is authenticated and
can access the event.
"""
def _check_access(self):
if session.user is None:
raise Forbidden
RHProtectedEventBase._check_access(self)
class RHDisplayEventBase(RHProtectedEventBase):
def __init__(self):
RHProtectedEventBase.__init__(self)
self.event_rh_contexts = ExitStack()
def _forbidden_if_not_admin(self):
if not request.is_xhr and session.user and session.user.is_admin:
flash(_('This page is currently not visible by non-admin users (menu entry disabled)!'), 'warning')
else:
raise Forbidden
def _check_access(self):
try:
RHProtectedEventBase._check_access(self)
except Forbidden:
if self.event.access_key:
raise AccessKeyRequired
elif self.event.has_regform_in_acl and self.event.public_regform_access:
raise RegistrationRequired
msg = [_('You are not authorized to access this event.')]
if self.event.no_access_contact:
msg.append(_('If you believe you should have access, please contact {}')
.format(self.event.no_access_contact))
raise Forbidden(' '.join(msg))
def _show_access_key_form(self):
return WPAccessKey.render_template('display/access_key.html', event=self.event)
def _show_registration_form(self):
displayed_regforms, user_registrations = get_event_regforms_registrations(self.event, session.user)
if displayed_regforms and all(r.require_login for r in displayed_regforms) and not session.user:
# force the user to log in first. like this they will go back to the page they tried to
# originally access in case they are already registered for the event
raise Forbidden
if len(displayed_regforms) == 1:
return redirect(url_for('event_registration.display_regform', displayed_regforms[0]))
return redirect(url_for('event_registration.display_regform_list', self.event))
def _process_args(self):
RHProtectedEventBase._process_args(self)
if not getattr(self, 'management', False) and self.event.default_locale:
self.event_rh_contexts.enter_context(self.event.force_event_locale(session.user, allow_session=True))
def _do_process(self):
with self.event_rh_contexts:
try:
return RHEventBase._do_process(self)
except AccessKeyRequired:
return self._show_access_key_form()
except RegistrationRequired:
if request.is_xhr:
raise
return self._show_registration_form()
class EditEventSettingsMixin:
settings_proxy = None
form_cls = None
success_message = None
log_module = None
log_message = None
log_fields = None
def METHOD_NAME(self):
current_settings = self.settings_proxy.get_all(self.event)
form = self.form_cls(obj=FormDefaults(**current_settings))
if form.validate_on_submit():
self.settings_proxy.set_multi(self.event, form.data)
new_settings = self.settings_proxy.get_all(self.event)
flash(self.success_message, 'success')
changes = {k: (v, new_settings[k]) for k, v in current_settings.items() if v != new_settings[k]}
if changes:
self.event.log(EventLogRealm.management, LogKind.change, self.log_module, self.log_message,
session.user, data={'Changes': make_diff_log(changes, self.log_fields)})
return jsonify_data()
return jsonify_form(form)
class AccessKeyRequired(Forbidden):
pass
class RegistrationRequired(Forbidden):
pass
|
2,229 |
add trigger
|
import inspect
from ..hdl import *
from ..hdl.ast import Statement, SignalSet
from .core import Tick, Settle, Delay, Passive, Active
from ._base import BaseProcess
from ._pyrtl import _ValueCompiler, _RHSValueCompiler, _StatementCompiler
__all__ = ["PyCoroProcess"]
class PyCoroProcess(BaseProcess):
def __init__(self, state, domains, constructor, *, default_cmd=None):
self.state = state
self.domains = domains
self.constructor = constructor
self.default_cmd = default_cmd
self.reset()
def reset(self):
self.runnable = True
self.passive = False
self.coroutine = self.constructor()
self.exec_locals = {
"slots": self.state.slots,
"result": None,
**_ValueCompiler.helpers
}
self.waits_on = SignalSet()
def src_loc(self):
coroutine = self.coroutine
if coroutine is None:
return None
while coroutine.gi_yieldfrom is not None and inspect.isgenerator(coroutine.gi_yieldfrom):
coroutine = coroutine.gi_yieldfrom
if inspect.isgenerator(coroutine):
frame = coroutine.gi_frame
if inspect.iscoroutine(coroutine):
frame = coroutine.cr_frame
return "{}:{}".format(inspect.getfile(frame), inspect.getlineno(frame))
def METHOD_NAME(self, signal, trigger=None):
self.state.METHOD_NAME(self, signal, trigger=trigger)
self.waits_on.add(signal)
def clear_triggers(self):
for signal in self.waits_on:
self.state.remove_trigger(self, signal)
self.waits_on.clear()
def run(self):
if self.coroutine is None:
return
self.clear_triggers()
response = None
while True:
try:
command = self.coroutine.send(response)
if command is None:
command = self.default_cmd
response = None
if isinstance(command, Value):
exec(_RHSValueCompiler.compile(self.state, command, mode="curr"),
self.exec_locals)
response = Const(self.exec_locals["result"], command.shape()).value
elif isinstance(command, Statement):
exec(_StatementCompiler.compile(self.state, command),
self.exec_locals)
elif type(command) is Tick:
domain = command.domain
if isinstance(domain, ClockDomain):
pass
elif domain in self.domains:
domain = self.domains[domain]
else:
raise NameError("Received command {!r} that refers to a nonexistent "
"domain {!r} from process {!r}"
.format(command, command.domain, self.src_loc()))
self.METHOD_NAME(domain.clk, trigger=1 if domain.clk_edge == "pos" else 0)
if domain.rst is not None and domain.async_reset:
self.METHOD_NAME(domain.rst, trigger=1)
return
elif type(command) is Settle:
self.state.wait_interval(self, None)
return
elif type(command) is Delay:
# Internal timeline is in 1ps integeral units, intervals are public API and in floating point
interval = int(command.interval * 1e12) if command.interval is not None else None
self.state.wait_interval(self, interval)
return
elif type(command) is Passive:
self.passive = True
elif type(command) is Active:
self.passive = False
elif command is None: # only possible if self.default_cmd is None
raise TypeError("Received default command from process {!r} that was added "
"with add_process(); did you mean to add this process with "
"add_sync_process() instead?"
.format(self.src_loc()))
else:
raise TypeError("Received unsupported command {!r} from process {!r}"
.format(command, self.src_loc()))
except StopIteration:
self.passive = True
self.coroutine = None
return
except Exception as exn:
self.coroutine.throw(exn)
|
2,230 |
write table
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cmd
import json
import sys
from nvflare.fuel.hci.cmd_arg_utils import split_to_args
from nvflare.fuel.hci.table import Table
from nvflare.fuel.sec.authz import AuthzContext, Person, Policy, parse_policy_config
from nvflare.security.security import COMMAND_CATEGORIES
class Commander(cmd.Cmd):
def __init__(self, policy: Policy):
"""Command line prompt helper tool for getting information for authorization configurations.
Args:
policy: authorization policy object
"""
cmd.Cmd.__init__(self)
self.policy = policy
self.intro = "Type help or ? to list commands.\n"
self.prompt = "> "
def do_bye(self, arg):
"""Exits from the client."""
return True
def emptyline(self):
return
def _split_to_args(self, arg):
if len(arg) <= 0:
return []
else:
return split_to_args(arg)
def do_show_rights(self, arg):
rights = self.policy.rights
table = Table(["right"])
for r in rights:
table.add_row([r])
self.METHOD_NAME(table)
def do_show_roles(self, arg):
roles = self.policy.roles
table = Table(["role"])
for r in roles:
table.add_row([r])
self.METHOD_NAME(table)
def do_show_config(self, arg):
config = self.policy.config
self.write_string(json.dumps(config, indent=1))
def do_show_role_rights(self, arg):
role_rights = self.policy.role_rights
table = Table(["role", "right", "conditions"])
for role_name in sorted(role_rights):
right_conds = role_rights[role_name]
for right_name in sorted(right_conds):
conds = right_conds[right_name]
table.add_row([role_name, right_name, str(conds)])
self.METHOD_NAME(table)
def _parse_person(self, spec: str):
parts = spec.split(":")
if len(parts) != 3:
return "must be like name:org:role"
return Person(parts[0], parts[1], parts[2])
def do_eval_right(self, arg):
args = ["eval_right"] + self._split_to_args(arg)
if len(args) < 4:
self.write_string(
"Usage: {} site_org right_name user_name:org:role [submitter_name:org:role]".format(args[0])
)
return
site_org = args[1]
right_name = args[2]
user_spec = args[3]
submitter_spec = None
if len(args) > 4:
submitter_spec = args[4]
parsed = self._parse_person(user_spec)
if isinstance(parsed, str):
# error
return self.write_error("bad user spec: " + parsed)
user = parsed
submitter = None
if submitter_spec:
parsed = self._parse_person(submitter_spec)
if isinstance(parsed, str):
# error
return self.write_error("bad submitter spec: " + parsed)
submitter = parsed
result, err = self.policy.evaluate(
site_org=site_org, ctx=AuthzContext(right=right_name, user=user, submitter=submitter)
)
if err:
self.write_error(err)
elif result is None:
self.write_string("undetermined")
else:
self.write_string(str(result))
def write_string(self, data: str):
content = data + "\n"
self.stdout.write(content)
def METHOD_NAME(self, table: Table):
table.write(self.stdout)
def write_error(self, err: str):
content = "Error: " + err + "\n"
self.stdout.write(content)
def define_authz_preview_parser(parser):
parser.add_argument("--policy", "-p", type=str, help="authz policy file", required=True)
def load_policy(policy_file_path):
with open(policy_file_path) as file:
config = json.load(file)
policy, err = parse_policy_config(config, COMMAND_CATEGORIES)
if err:
print("Policy config error: {}".format(err))
sys.exit(1)
return policy
def run_command(args):
policy = load_policy(args.policy)
commander = Commander(policy)
commander.cmdloop(intro="Type help or ? to list commands.")
def main():
"""Tool to help preview and see the details of an authorization policy with command line commands."""
parser = argparse.ArgumentParser()
define_authz_preview_parser(parser)
args = parser.parse_args()
run_command(args)
if __name__ == "__main__":
main()
|
2,231 |
connect indicators dimension names
|
from typing import Iterable
from akvo.rsr.models import (
Project, DefaultPeriod, Result, IndicatorDimensionName, IndicatorDimensionValue,
Indicator, IndicatorReference, IndicatorPeriod
)
def add_new_project_to_program(project: Project, program: Project):
"""
This function assumes that the project is an empty newly created project.
Program's validation set and results framework will be applied/inherited to the project.
"""
project.set_reporting_org(program.reporting_org)
for validation_set in program.validations.all():
project.add_validation_set(validation_set)
project.set_parent(program).save()
inherit_results_framework(project, program)
project.refresh_from_db()
def inherit_results_framework(child: Project, parent: Project):
inherit_dimension_names(child, parent)
inherit_results(child, parent)
# Copy the default periods after copying the results to not create new
# periods, from the parent, which may already be present from the parent!
inherit_default_periods(child, parent)
def inherit_dimension_names(child: Project, parent: Project):
inherited_dimension_names = child.dimension_names.exclude(parent_dimension_name__isnull=True).values_list('parent_dimension_name', flat=True)
parent_names = parent.dimension_names.exclude(id__in=inherited_dimension_names)
names = IndicatorDimensionName.objects.bulk_create([
IndicatorDimensionName(project=child, parent_dimension_name=p, name=p.name)
for p in parent_names
])
name_parent_map = {n.parent_dimension_name.id: n for n in names}
parent_values = IndicatorDimensionValue.objects.filter(name__in=(n.id for n in parent_names))
IndicatorDimensionValue.objects.bulk_create([
IndicatorDimensionValue(name=name_parent_map[dv.name.id], parent_dimension_value=dv, value=dv.value)
for dv in parent_values
])
def inherit_results(child: Project, parent: Project):
inherited_results = child.results.exclude(parent_result__isnull=True).values_list('parent_result', flat=True)
parent_results = parent.results.exclude(id__in=inherited_results)
results = Result.objects.bulk_create([
Result(
project=child,
parent_result=r,
title=r.title,
type=r.type,
aggregation_status=r.aggregation_status,
description=r.description,
order=r.order,
)
for r in parent_results
])
inherit_indicators(child, results)
def inherit_indicators(child: Project, results: Iterable[Result]):
result_parent_map = {r.parent_result.id: r for r in results}
parent_indicators = Indicator.objects.filter(result__in=result_parent_map.keys())
indicators = Indicator.objects.bulk_create([
Indicator(
result=result_parent_map[i.result.id],
parent_indicator=i,
title=i.title,
description=i.description,
measure=i.measure,
ascending=i.ascending,
cumulative=i.cumulative,
type=i.type,
export_to_iati=i.export_to_iati,
scores=i.scores,
order=i.order,
baseline_comment=i.baseline_comment,
baseline_year=i.baseline_year,
baseline_value=i.baseline_value,
)
for i in parent_indicators
])
inherit_periods(indicators)
copy_references(indicators)
METHOD_NAME(child, indicators)
def inherit_periods(indicators: Iterable[Indicator]):
indicator_parent_map = {i.parent_indicator.id: i for i in indicators}
parent_periods = IndicatorPeriod.objects.filter(indicator__in=indicator_parent_map.keys())
IndicatorPeriod.objects.bulk_create([
IndicatorPeriod(
indicator=indicator_parent_map[p.indicator.id],
parent_period=p,
period_start=p.period_start,
period_end=p.period_end
)
for p in parent_periods
])
def copy_references(indicators: Iterable[Indicator]):
bulk = []
for indicator in indicators:
parent_references = indicator.parent_indicator.references.all()
for reference in parent_references:
bulk.append(IndicatorReference(
indicator=indicator,
reference=reference.reference,
vocabulary=reference.vocabulary,
vocabulary_uri=reference.vocabulary_uri,
))
IndicatorReference.objects.bulk_create(bulk)
def METHOD_NAME(child: Project, indicators: Iterable[Indicator]):
ThroughModel = Indicator.dimension_names.through
bulk = []
for indicator in indicators:
parent_dimension_names = indicator.parent_indicator.dimension_names.all()
candidates = IndicatorDimensionName.objects.filter(project=child, parent_dimension_name__in=parent_dimension_names)
for dimension_name in candidates:
bulk.append(ThroughModel(indicator_id=indicator.id, indicatordimensionname_id=dimension_name.id))
ThroughModel.objects.bulk_create(bulk)
def inherit_default_periods(child: Project, parent: Project):
inherited_parent_periods = child.default_periods.exclude(parent__isnull=True).values_list('parent', flat=True)
parent_periods = parent.default_periods.exclude(id__in=inherited_parent_periods)
DefaultPeriod.objects.bulk_create([
DefaultPeriod(project=child, parent=p, period_start=p.period_start, period_end=p.period_end)
for p in parent_periods
])
|
2,232 |
custom manylinux platforms
|
"""Generate and work with PEP 425 Compatibility Tags.
"""
import re
from typing import List, Optional, Tuple
from pip._vendor.packaging.tags import (
PythonVersion,
Tag,
compatible_tags,
cpython_tags,
generic_tags,
interpreter_name,
interpreter_version,
mac_platforms,
)
_osx_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)")
def version_info_to_nodot(version_info: Tuple[int, ...]) -> str:
# Only use up to the first two numbers.
return "".join(map(str, version_info[:2]))
def _mac_platforms(arch: str) -> List[str]:
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
mac_version = (int(major), int(minor))
arches = [
# Since we have always only checked that the platform starts
# with "macosx", for backwards-compatibility we extract the
# actual prefix provided by the user in case they provided
# something like "macosxcustom_". It may be good to remove
# this as undocumented or deprecate it in the future.
"{}_{}".format(name, arch[len("macosx_") :])
for arch in mac_platforms(mac_version, actual_arch)
]
else:
# arch pattern didn't match (?!)
arches = [arch]
return arches
def METHOD_NAME(arch: str) -> List[str]:
arches = [arch]
arch_prefix, arch_sep, arch_suffix = arch.partition("_")
if arch_prefix == "manylinux2014":
# manylinux1/manylinux2010 wheels run on most manylinux2014 systems
# with the exception of wheels depending on ncurses. PEP 599 states
# manylinux1/manylinux2010 wheels should be considered
# manylinux2014 wheels:
# https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels
if arch_suffix in {"i686", "x86_64"}:
arches.append("manylinux2010" + arch_sep + arch_suffix)
arches.append("manylinux1" + arch_sep + arch_suffix)
elif arch_prefix == "manylinux2010":
# manylinux1 wheels run on most manylinux2010 systems with the
# exception of wheels depending on ncurses. PEP 571 states
# manylinux1 wheels should be considered manylinux2010 wheels:
# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
arches.append("manylinux1" + arch_sep + arch_suffix)
return arches
def _get_custom_platforms(arch: str) -> List[str]:
arch_prefix, arch_sep, arch_suffix = arch.partition("_")
if arch.startswith("macosx"):
arches = _mac_platforms(arch)
elif arch_prefix in ["manylinux2014", "manylinux2010"]:
arches = METHOD_NAME(arch)
else:
arches = [arch]
return arches
def _expand_allowed_platforms(platforms: Optional[List[str]]) -> Optional[List[str]]:
if not platforms:
return None
seen = set()
result = []
for p in platforms:
if p in seen:
continue
additions = [c for c in _get_custom_platforms(p) if c not in seen]
seen.update(additions)
result.extend(additions)
return result
def _get_python_version(version: str) -> PythonVersion:
if len(version) > 1:
return int(version[0]), int(version[1:])
else:
return (int(version[0]),)
def _get_custom_interpreter(
implementation: Optional[str] = None, version: Optional[str] = None
) -> str:
if implementation is None:
implementation = interpreter_name()
if version is None:
version = interpreter_version()
return f"{implementation}{version}"
def get_supported(
version: Optional[str] = None,
platforms: Optional[List[str]] = None,
impl: Optional[str] = None,
abis: Optional[List[str]] = None,
) -> List[Tag]:
"""Return a list of supported tags for each version specified in
`versions`.
:param version: a string version, of the form "33" or "32",
or None. The version will be assumed to support our ABI.
:param platform: specify a list of platforms you want valid
tags for, or None. If None, use the local system platform.
:param impl: specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:param abis: specify a list of abis you want valid
tags for, or None. If None, use the local interpreter abi.
"""
supported: List[Tag] = []
python_version: Optional[PythonVersion] = None
if version is not None:
python_version = _get_python_version(version)
interpreter = _get_custom_interpreter(impl, version)
platforms = _expand_allowed_platforms(platforms)
is_cpython = (impl or interpreter_name()) == "cp"
if is_cpython:
supported.extend(
cpython_tags(
python_version=python_version,
abis=abis,
platforms=platforms,
)
)
else:
supported.extend(
generic_tags(
interpreter=interpreter,
abis=abis,
platforms=platforms,
)
)
supported.extend(
compatible_tags(
python_version=python_version,
interpreter=interpreter,
platforms=platforms,
)
)
return supported
|
2,233 |
container count
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetStorageAccountResult',
'AwaitableGetStorageAccountResult',
'get_storage_account',
'get_storage_account_output',
]
@pulumi.output_type
class GetStorageAccountResult:
"""
Represents a Storage Account on the Data Box Edge/Gateway device.
"""
def __init__(__self__, blob_endpoint=None, METHOD_NAME=None, data_policy=None, description=None, id=None, name=None, storage_account_credential_id=None, storage_account_status=None, system_data=None, type=None):
if blob_endpoint and not isinstance(blob_endpoint, str):
raise TypeError("Expected argument 'blob_endpoint' to be a str")
pulumi.set(__self__, "blob_endpoint", blob_endpoint)
if METHOD_NAME and not isinstance(METHOD_NAME, int):
raise TypeError("Expected argument 'container_count' to be a int")
pulumi.set(__self__, "container_count", METHOD_NAME)
if data_policy and not isinstance(data_policy, str):
raise TypeError("Expected argument 'data_policy' to be a str")
pulumi.set(__self__, "data_policy", data_policy)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_credential_id and not isinstance(storage_account_credential_id, str):
raise TypeError("Expected argument 'storage_account_credential_id' to be a str")
pulumi.set(__self__, "storage_account_credential_id", storage_account_credential_id)
if storage_account_status and not isinstance(storage_account_status, str):
raise TypeError("Expected argument 'storage_account_status' to be a str")
pulumi.set(__self__, "storage_account_status", storage_account_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="blobEndpoint")
def blob_endpoint(self) -> str:
"""
BlobEndpoint of Storage Account
"""
return pulumi.get(self, "blob_endpoint")
@property
@pulumi.getter(name="containerCount")
def METHOD_NAME(self) -> int:
"""
The Container Count. Present only for Storage Accounts with DataPolicy set to Cloud.
"""
return pulumi.get(self, "container_count")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> str:
"""
Data policy of the storage Account.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the storage Account.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountCredentialId")
def storage_account_credential_id(self) -> Optional[str]:
"""
Storage Account Credential Id
"""
return pulumi.get(self, "storage_account_credential_id")
@property
@pulumi.getter(name="storageAccountStatus")
def storage_account_status(self) -> Optional[str]:
"""
Current status of the storage account
"""
return pulumi.get(self, "storage_account_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of StorageAccount
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetStorageAccountResult(GetStorageAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStorageAccountResult(
blob_endpoint=self.blob_endpoint,
METHOD_NAME=self.METHOD_NAME,
data_policy=self.data_policy,
description=self.description,
id=self.id,
name=self.name,
storage_account_credential_id=self.storage_account_credential_id,
storage_account_status=self.storage_account_status,
system_data=self.system_data,
type=self.type)
def get_storage_account(device_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_account_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageAccountResult:
"""
Represents a Storage Account on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str resource_group_name: The resource group name.
:param str storage_account_name: The storage account name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['resourceGroupName'] = resource_group_name
__args__['storageAccountName'] = storage_account_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20220301:getStorageAccount', __args__, opts=opts, typ=GetStorageAccountResult).value
return AwaitableGetStorageAccountResult(
blob_endpoint=pulumi.get(__ret__, 'blob_endpoint'),
METHOD_NAME=pulumi.get(__ret__, 'container_count'),
data_policy=pulumi.get(__ret__, 'data_policy'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
storage_account_credential_id=pulumi.get(__ret__, 'storage_account_credential_id'),
storage_account_status=pulumi.get(__ret__, 'storage_account_status'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_storage_account)
def get_storage_account_output(device_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStorageAccountResult]:
"""
Represents a Storage Account on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str resource_group_name: The resource group name.
:param str storage_account_name: The storage account name.
"""
...
|
2,234 |
normalize prefix or 404
|
# -*- coding: utf-8 -*-
"""Utility functions for the Bioregistry :mod:`flask` app."""
import json
from functools import partial
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple
import yaml
from flask import (
Response,
abort,
current_app,
redirect,
render_template,
request,
url_for,
)
from pydantic import BaseModel
from bioregistry.resource_manager import Manager
from .proxies import manager
from ..utils import _norm
def _get_resource_providers(
prefix: str, identifier: Optional[str]
) -> Optional[List[Dict[str, Any]]]:
if identifier is None:
return None
rv = []
for metaprefix, uri in manager.get_providers_list(prefix, identifier):
if metaprefix == "default":
metaprefix = prefix
name = manager.get_name(prefix)
homepage = manager.get_homepage(prefix)
elif metaprefix == "rdf":
name = f"{manager.get_name(prefix)} (RDF)"
homepage = manager.get_homepage(prefix)
else:
name = manager.get_registry_name(metaprefix)
homepage = manager.get_registry_homepage(metaprefix)
rv.append(
dict(
metaprefix=metaprefix,
homepage=homepage,
name=name,
uri=uri,
)
)
return rv
def METHOD_NAME(prefix: str, endpoint: Optional[str] = None):
try:
norm_prefix = manager.normalize_prefix(prefix)
except ValueError:
norm_prefix = None
if norm_prefix is None:
return render_template("resolve_errors/missing_prefix.html", prefix=prefix), 404
elif endpoint is not None and norm_prefix != prefix:
return redirect(url_for(endpoint, prefix=norm_prefix))
return norm_prefix
def _search(manager_: Manager, q: str) -> List[Tuple[str, str]]:
q_norm = _norm(q)
results = [
(prefix, lookup if _norm(prefix) != lookup else "")
for lookup, prefix in manager_.synonyms.items()
if q_norm in lookup
]
return sorted(results)
def _autocomplete(manager_: Manager, q: str, url_prefix: Optional[str] = None) -> Mapping[str, Any]:
r"""Run the autocomplete algorithm.
:param manager_: A manager
:param q: The query string
:param url_prefix:
The explicit URL prefix. If not used, relative paths are generated. Introduced to
solve https://github.com/biopragmatics/bioregistry/issues/596.
:return: A dictionary with the autocomplete results.
Before completion is of prefix:
>>> from bioregistry import manager
>>> _autocomplete(manager, 'cheb')
{'query': 'cheb', 'results': [('chebi', ''), ('chebi', 'chebiid'), ('goche', 'gochebi')], 'success': True, 'reason': 'searched prefix', 'url': None}
If only prefix is complete:
>>> _autocomplete(manager, 'chebi')
{'query': 'chebi', 'results': [('chebi', ''), ('chebi', 'chebiid'), ('goche', 'gochebi')], 'success': True, 'reason': 'matched prefix', 'url': '/chebi'}
Not matching the pattern:
>>> _autocomplete(manager, 'chebi:NOPE')
{'query': 'chebi:NOPE', 'prefix': 'chebi', 'pattern': '^\\d+$', 'identifier': 'NOPE', 'success': False, 'reason': 'failed validation', 'url': None}
Matching the pattern:
>>> _autocomplete(manager, 'chebi:1234')
{'query': 'chebi:1234', 'prefix': 'chebi', 'pattern': '^\\d+$', 'identifier': '1234', 'success': True, 'reason': 'passed validation', 'url': '/chebi:1234'}
""" # noqa: E501
if url_prefix is None:
url_prefix = ""
url_prefix = url_prefix.rstrip().rstrip("/")
if ":" not in q:
url: Optional[str]
if q in manager_.registry:
reason = "matched prefix"
url = f"{url_prefix}/{q}"
else:
reason = "searched prefix"
url = None
return dict(
query=q,
results=_search(manager_, q),
success=True,
reason=reason,
url=url,
)
prefix, identifier = q.split(":", 1)
resource = manager_.get_resource(prefix)
if resource is None:
return dict(
query=q,
prefix=prefix,
identifier=identifier,
success=False,
reason="bad prefix",
)
pattern = manager_.get_pattern(prefix)
if pattern is None:
success = True
reason = "no pattern"
norm_id = resource.standardize_identifier(identifier)
url = f"{url_prefix}/{resource.get_curie(norm_id)}"
elif resource.is_standardizable_identifier(identifier):
success = True
reason = "passed validation"
norm_id = resource.standardize_identifier(identifier)
url = f"{url_prefix}/{resource.get_curie(norm_id)}"
else:
success = False
reason = "failed validation"
url = None
return dict(
query=q,
prefix=prefix,
pattern=pattern,
identifier=identifier,
success=success,
reason=reason,
url=url,
)
def serialize(
data: BaseModel,
serializers: Optional[Sequence[Tuple[str, str, Callable]]] = None,
negotiate: bool = False,
) -> Response:
"""Serialize either as JSON or YAML."""
if negotiate:
accept = get_accept_media_type()
else:
arg = request.args.get("format", "json")
if arg not in FORMAT_MAP:
return abort(
400, f"unhandled value for `format`: {arg}. Use one of: {sorted(FORMAT_MAP)}"
)
accept = FORMAT_MAP[arg]
if accept == "application/json":
return current_app.response_class(
json.dumps(data.dict(exclude_unset=True, exclude_none=True), ensure_ascii=False),
mimetype="application/json",
)
elif accept in "application/yaml":
return current_app.response_class(
yaml.safe_dump(data.dict(exclude_unset=True, exclude_none=True), allow_unicode=True),
mimetype="text/plain",
)
for _name, mimetype, func in serializers or []:
if accept == mimetype:
return current_app.response_class(func(data), mimetype=mimetype)
return abort(404, f"unhandled media type: {accept}")
def serialize_model(entry: BaseModel, func, negotiate: bool = False) -> Response:
"""Serialize a model."""
return serialize(
entry,
negotiate=negotiate,
serializers=[
("turtle", "text/turtle", partial(func, manager=manager, fmt="turtle")),
("n3", "text/n3", partial(func, manager=manager, fmt="n3")),
("rdf", "application/rdf+xml", partial(func, manager=manager, fmt="xml")),
(
"jsonld",
"application/ld+json",
partial(func, manager=manager, fmt="json-ld"),
),
],
)
def get_accept_media_type() -> str:
"""Get accept type."""
fmt = request.args.get("format")
if fmt is not None:
rv = FORMAT_MAP.get(fmt)
if rv:
return rv
return abort(400, f"bad query parameter format={fmt}. Should be one of {list(FORMAT_MAP)}")
# If accept is specifically set to one of the special quanties, then use it.
accept = str(request.accept_mimetypes)
if accept in FORMAT_MAP.values():
return accept
# Otherwise, return HTML
return "text/html"
FORMAT_MAP = {
"json": "application/json",
"yml": "application/yaml",
"yaml": "application/yaml",
"turtle": "text/turtle",
"jsonld": "application/ld+json",
"json-ld": "application/ld+json",
"rdf": "application/rdf+xml",
"n3": "text/n3",
}
|
2,235 |
fcn
|
from iminuit._core import (
FCN,
MnUserParameterState,
MnUserTransformation,
MnMigrad,
MnStrategy,
MnScan,
FunctionMinimum,
MnSimplex,
MnPrint,
MnUserCovariance,
MinimumState,
)
from pytest import approx
import pytest
import pickle
import numpy as np
@pytest.fixture
def debug():
prev = MnPrint.global_level
MnPrint.global_level = 3
MnPrint.show_prefix_stack(True)
yield
MnPrint.global_level = prev
MnPrint.show_prefix_stack(False)
def test_MnStrategy():
assert MnStrategy() == 1
assert MnStrategy(0) == 0
assert MnStrategy(2) == 2
s = MnStrategy()
s.strategy = 2
assert s.strategy == 2
assert s != 1
assert not (s != 2)
def test_MnUserCovariance():
c = MnUserCovariance((1, 2, 3), 2)
assert c.nrow == 2
assert c[(0, 0)] == 1
assert c[(1, 0)] == 2
assert c[(0, 1)] == 2
assert c[(1, 1)] == 3
pkl = pickle.dumps(c)
c2 = pickle.loads(pkl)
assert c2.nrow == 2
assert c2[(0, 0)] == 1
assert c2[(1, 1)] == 3
assert c2 == c
def fn(x, y):
return 10 + x**2 + ((y - 1) / 2) ** 2
def fn_grad(x, y):
return (2 * x, y - 1)
def test_MnUserParameterState():
st = MnUserParameterState()
st.add("x", 1, 0.2)
st.add("😁", 3, 0.3, 1, 4)
assert len(st) == 2
assert st[0].number == 0
assert st[0].name == "x"
assert st[0].value == 1
assert st[0].error == 0.2
assert st[1].number == 1
assert st[1].name == "😁"
assert st[1].value == 3
assert st[1].error == 0.3
assert st[1].lower_limit == 1
assert st[1].upper_limit == 4
st2 = MnUserParameterState(st)
assert st2 == st
st2.set_value(0, 1.1)
assert st2 != st
def test_MnMigrad():
METHOD_NAME = FCN(fn, None, False, 1)
state = MnUserParameterState()
state.add("x", 5, 0.1)
state.add("y", 3, 0.2, -5, 5)
migrad = MnMigrad(METHOD_NAME, state, 1)
fmin = migrad(0, 0.1)
assert fmin.is_valid
state = fmin.state
assert state[0].value == approx(0, abs=5e-3)
assert state[0].error == approx(1, abs=5e-3)
assert state[1].value == approx(1, abs=5e-3)
assert state[1].error == approx(2, abs=6e-2)
assert METHOD_NAME._nfcn > 0
assert METHOD_NAME._ngrad == 0
def test_MnMigrad_grad():
METHOD_NAME = FCN(lambda x: 10 + x**2, lambda x: [2 * x], False, 1)
state = MnUserParameterState()
state.add("x", 5, 0.1)
migrad = MnMigrad(METHOD_NAME, state, 1)
fmin = migrad(0, 0.1)
state = fmin.state
assert len(state) == 1
assert state[0].number == 0
assert state[0].name == "x"
assert state[0].value == approx(0, abs=1e-3)
assert state[0].error == approx(1, abs=1e-3)
assert METHOD_NAME._nfcn > 0
assert METHOD_NAME._ngrad > 0
def test_MnMigrad_cfunc():
nb = pytest.importorskip("numba")
c_sig = nb.types.double(nb.types.uintc, nb.types.CPointer(nb.types.double))
y = np.arange(5)
@nb.cfunc(c_sig)
def METHOD_NAME(n, x):
x = nb.carray(x, (n,))
r = 0.0
for i in range(n):
r += (y[i] - x[i]) ** 2
return r
METHOD_NAME = FCN(METHOD_NAME, None, True, 1)
state = MnUserParameterState()
for i in range(len(y)):
state.add(f"x{i}", 5, 0.1)
migrad = MnMigrad(METHOD_NAME, state, 1)
fmin = migrad(0, 0.1)
state = fmin.state
assert len(state) == len(y)
for i, p in enumerate(state):
assert p.number == i
assert p.value == approx(i, abs=1e-3)
assert p.error == approx(1, abs=1e-3)
def test_MnMigrad_np():
METHOD_NAME = FCN(
lambda xy: 10 + xy[0] ** 2 + ((xy[1] - 1) / 2) ** 2,
lambda xy: [2 * xy[0], (xy[1] - 1)],
True,
1,
)
state = MnUserParameterState()
state.add("x", 5, 0.1)
state.add("😁", 3, 0.2, -5, 5)
assert len(state) == 2
str = MnStrategy(2)
migrad = MnMigrad(METHOD_NAME, state, str)
fmin = migrad(0, 0.1)
state = fmin.state
assert len(state) == 2
assert state[0].number == 0
assert state[0].name == "x"
assert state[0].value == approx(0, abs=1e-2)
assert state[0].error == approx(1, abs=1e-2)
assert state[1].number == 1
assert state[1].name == "😁"
assert state[1].value == approx(1, abs=1e-2)
assert state[1].error == approx(2, abs=6e-2)
assert METHOD_NAME._nfcn > 0
assert METHOD_NAME._ngrad > 0
def test_MnScan():
METHOD_NAME = FCN(lambda x: 10 + x**2, None, False, 1)
state = MnUserParameterState()
state.add("x", 2, 5)
scan = MnScan(METHOD_NAME, state, 1)
fmin = scan(0, 0.1)
assert fmin.is_valid
state = fmin.state
assert len(state) == 1
assert state[0].value == approx(0, abs=1e-2)
def test_MnSimplex():
METHOD_NAME = FCN(lambda x: 10 + x**2, None, False, 1)
state = MnUserParameterState()
state.add("x", 2, 5)
simplex = MnSimplex(METHOD_NAME, state, 1)
fmin = simplex(0, 0.1)
assert fmin.is_valid
state = fmin.state
assert len(state) == 1
assert state[0].value == approx(0, abs=5e-2)
def test_FunctionMinimum():
METHOD_NAME = FCN(lambda x: 10 + x**2, None, False, 1)
st = MnUserParameterState()
st.add("x", 0.01, 5)
str = MnStrategy(1)
fm1 = FunctionMinimum(METHOD_NAME, st, str, 0.2)
assert fm1.is_valid
assert len(fm1.state) == 1
assert fm1.fval == 10.0001
fm2 = FunctionMinimum(METHOD_NAME, st, str, 0)
assert not fm2.is_valid
def test_FunctionMinimum_pickle():
st = MnUserParameterState()
st.add("x", 1, 0.1)
st.add("y", 2, 0.1, 1, 3)
fm = FunctionMinimum(FCN(fn, None, False, 1), st, 1, 0.1)
pkl = pickle.dumps(fm)
fm2 = pickle.loads(pkl)
assert len(fm.state) == len(fm2.state)
assert fm.state == fm2.state
assert fm.edm == fm2.edm
assert fm.fval == fm2.fval
assert fm.is_valid == fm2.is_valid
assert fm.has_accurate_covar == fm2.has_accurate_covar
assert fm.has_posdef_covar == fm2.has_posdef_covar
assert fm.has_made_posdef_covar == fm2.has_made_posdef_covar
assert fm.hesse_failed == fm2.hesse_failed
assert fm.has_covariance == fm2.has_covariance
assert fm.is_above_max_edm == fm2.is_above_max_edm
assert fm.has_reached_call_limit == fm2.has_reached_call_limit
assert fm.errordef == fm2.errordef
def test_MnUserTransformation_pickle():
tr = MnUserTransformation()
pkl = pickle.dumps(tr)
tr2 = pickle.loads(pkl)
assert len(tr2) == len(tr)
def test_MinimumState_pickle():
st = MinimumState(3)
pkl = pickle.dumps(st)
st2 = pickle.loads(pkl)
assert st.vec == st2.vec
assert st.fval == st2.fval
assert st.edm == st2.edm
assert st.nfcn == st2.nfcn
assert st.is_valid == st2.is_valid
assert st.has_parameters == st2.has_parameters
assert st.has_covariance == st2.has_covariance
|
2,236 |
test parallel optimization without grad
|
import logging
import unittest
from monty.tempfile import ScratchDir
import numpy as np
try:
from mpi4py import MPI
except:
MPI = None
from simsopt._core.optimizable import Optimizable
from simsopt.objectives.functions import Beale
from simsopt.objectives.least_squares import LeastSquaresProblem
if MPI is not None:
from simsopt.util.mpi import MpiPartition
from simsopt.solve.mpi import least_squares_mpi_solve
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestFunction1(Optimizable):
def __init__(self):
x = np.array([1.2, 0.9, -0.4])
fixed = np.full(3, False)
super().__init__(x0=x, fixed=fixed)
def J(self):
return np.exp(self.full_x[0] ** 2 - np.exp(self.full_x[1]) \
+ np.sin(self.full_x[2]))
return_fn_map = {'J': J}
class TestFunction2(Optimizable):
def __init__(self):
x = np.array([1.2, 0.9])
fixed = np.full(2, False)
super().__init__(x0=x, fixed=fixed)
def f0(self):
return np.exp(0 + self.full_x[0] ** 2 - np.exp(self.full_x[1]))
def f1(self):
return np.exp(1 + self.full_x[0] ** 2 - np.exp(self.full_x[1]))
def f2(self):
return np.exp(2 + self.full_x[0] ** 2 - np.exp(self.full_x[1]))
def f3(self):
return np.exp(3 + self.full_x[0] ** 2 - np.exp(self.full_x[1]))
return_fn_map = {'f0': f0, 'f1': f1, 'f2': f2, 'f3': f3}
class TestFunction3(Optimizable):
"""
This is the Rosenbrock function again, but with some unnecessary
MPI communication added in order to test optimization with MPI.
"""
def __init__(self, comm, x=[0, 0]):
self.comm = comm
self.dummy = 42
self.f0_call_cnt = 0
self.f1_call_cnt = 0
logger.debug("inside test function 3 init")
super().__init__(x0=x)
def f0(self):
# Do some random MPI stuff just for the sake of testing.
self.comm.barrier()
self.comm.bcast(self.local_full_x)
self.f0_call_cnt += 1
print(f"x is {self.local_full_x}")
print(f"TestFunction3.f0 called {self.f0_call_cnt} times")
return self.local_full_x[0] - 1
def f1(self):
# Do some random MPI stuff just for the sake of testing.
self.comm.bcast(self.dummy)
self.comm.barrier()
self.f1_call_cnt += 1
print(f"x is {self.local_full_x}")
print(f"TestFunction3.f1 called {self.f1_call_cnt} times")
return self.local_full_x[0] ** 2 - self.local_full_x[1]
return_fn_map = {'f0': f0, 'f1': f1}
@unittest.skipIf(MPI is None, "Requires mpi4py")
class MPISolveTests(unittest.TestCase):
def METHOD_NAME(self):
"""
Test a full least-squares optimization.
"""
with ScratchDir("."):
for ngroups in range(1, 4):
mpi = MpiPartition(ngroups=ngroups)
o = TestFunction3(mpi.comm_groups)
term1 = (o.f0, 0, 1)
term2 = (o.f1, 0, 1)
prob = LeastSquaresProblem.from_tuples([term1, term2])
least_squares_mpi_solve(prob, mpi, grad=False)
self.assertAlmostEqual(prob.x[0], 1)
self.assertAlmostEqual(prob.x[1], 1)
def test_parallel_optimization_with_grad(self):
"""
Test a full least-squares optimization.
"""
with ScratchDir("."):
for ngroups in range(1, 4):
for abs_step in [0, 1.0e-7]:
# Only try rel_step=0 if abs_step is positive:
rel_steps = [0, 1.0e-7]
if abs_step == 0:
rel_steps = [1.0e-7]
for rel_step in rel_steps:
for diff_method in ["forward", "centered"]:
logger.debug(f'ngroups={ngroups} abs_step={abs_step} ' \
f'rel_step={rel_step} diff_method={diff_method}')
mpi = MpiPartition(ngroups=ngroups)
o = TestFunction3(mpi.comm_groups)
term1 = (o.f0, 0, 1)
term2 = (o.f1, 0, 1)
prob = LeastSquaresProblem.from_tuples([term1, term2])
# Set initial condition different from 0,
# because otherwise abs_step=0 causes step
# size to be 0.
prob.x = [-0.1, 0.2]
least_squares_mpi_solve(prob, mpi, grad=True,
diff_method=diff_method,
abs_step=abs_step,
rel_step=rel_step)
self.assertAlmostEqual(prob.x[0], 1)
self.assertAlmostEqual(prob.x[1], 1)
|
2,237 |
register repo kind
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains various utilities which make it easier to test plugins.
"""
import os
from collections import OrderedDict
from typing import Tuple
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from ._yaml import generate_project, generate_element, load_yaml
from .repo import Repo
from .runcli import cli, cli_integration, cli_remote_execution, Cli
from .integration import integration_cache
from ._cachekeys import check_cache_key_stability
__all__ = [
"check_cache_key_stability",
"create_repo",
"register_repo_kind",
"sourcetests_collection_hook",
]
# To make use of these test utilities it is necessary to have pytest
# available. However, we don't want to have a hard dependency on
# pytest.
try:
import pytest
except ImportError:
module_name = globals()["__name__"]
msg = "Could not import pytest:\n" "To use the {} module, you must have pytest installed.".format(module_name)
raise ImportError(msg)
# Of the form plugin_name -> (repo_class, plugin_package)
ALL_REPO_KINDS = OrderedDict() # type: OrderedDict[str, Tuple[Repo, str]]
def create_repo(kind, directory, subdir="repo"):
"""Convenience method for creating a Repo
Args:
kind (str): The kind of repo to create (a source plugin basename). This
must have previously been registered using
`register_repo_kind`
directory (str): The path where the repo will keep a cache
Returns:
(Repo): A new Repo object
"""
try:
constructor = ALL_REPO_KINDS[kind]
except KeyError as e:
raise AssertionError("Unsupported repo kind {}".format(kind)) from e
return constructor[0](directory, subdir=subdir)
def METHOD_NAME(kind, cls, plugin_package):
"""Register a new repo kind.
Registering a repo kind will allow the use of the `create_repo`
method for that kind and include that repo kind in ALL_REPO_KINDS
In addition, repo_kinds registred prior to
`sourcetests_collection_hook` being called will be automatically
used to test the basic behaviour of their associated source
plugins using the tests in `testing._sourcetests`.
Args:
kind (str): The kind of repo to create (a source plugin basename)
cls (cls) : A class derived from Repo.
plugin_package (str): The name of the python package containing the plugin
"""
ALL_REPO_KINDS[kind] = (cls, plugin_package)
def sourcetests_collection_hook(session):
"""Used to hook the templated source plugin tests into a pyest test suite.
This should be called via the `pytest_sessionstart
hook <https://docs.pytest.org/en/latest/reference.html#collection-hooks>`_.
The tests in the _sourcetests package will be collected as part of
whichever test package this hook is called from.
Args:
session (pytest.Session): The current pytest session
"""
def should_collect_tests(config):
args = config.args
rootdir = config.rootdir
# When no args are supplied, pytest defaults the arg list to
# just include the session's root_dir. We want to collect
# tests as part of the default collection
if args == [str(rootdir)]:
return True
# If specific tests are passed, don't collect
# everything. Pytest will handle this correctly without
# modification.
if len(args) > 1 or rootdir not in args:
return False
# If in doubt, collect them, this will be an easier bug to
# spot and is less likely to result in bug not being found.
return True
from . import _sourcetests
source_test_path = os.path.dirname(_sourcetests.__file__)
# Add the location of the source tests to the session's
# python_files config. Without this, pytest may filter out these
# tests during collection.
session.config.addinivalue_line("python_files", os.path.join(source_test_path, "*.py"))
# If test invocation has specified specic tests, don't
# automatically collect templated tests.
if should_collect_tests(session.config):
session.config.args.append(source_test_path)
|
2,238 |
normalize sample format
|
from datetime import datetime
from typing import Any, Mapping, Optional
from uuid import UUID
from snuba import environment
from snuba.consumers.types import KafkaMessageMetadata
from snuba.datasets.events_format import EventTooOld, enforce_retention
from snuba.datasets.processors import DatasetMessageProcessor
from snuba.processor import InsertBatch, ProcessedMessage
from snuba.utils.metrics.wrapper import MetricsWrapper
metrics = MetricsWrapper(environment.metrics, "profiles.processor")
class ProfilesMessageProcessor(DatasetMessageProcessor):
def process_message(
self, message: Mapping[str, Any], metadata: KafkaMessageMetadata
) -> Optional[ProcessedMessage]:
try:
received = datetime.utcfromtimestamp(message["received"])
retention_days = enforce_retention(message["retention_days"], received)
if "version" in message:
processed = METHOD_NAME(
message, metadata, retention_days, received
)
else:
processed = _normalize_legacy_format(
message, metadata, retention_days, received
)
except EventTooOld:
metrics.increment("event_too_old")
return None
except IndexError:
metrics.increment("invalid_transaction")
return None
except ValueError:
metrics.increment("invalid_uuid")
return None
except KeyError:
metrics.increment("missing_field")
return None
return InsertBatch([processed], received)
def _normalize_legacy_format(
message: Mapping[str, Any],
metadata: KafkaMessageMetadata,
retention_days: int,
received: datetime,
) -> Mapping[str, Any]:
return {
"android_api_level": message.get("android_api_level"),
"architecture": message.get("architecture", "unknown"),
"device_classification": message.get("device_classification", ""),
"device_locale": message["device_locale"],
"device_manufacturer": message["device_manufacturer"],
"device_model": message["device_model"],
"device_os_build_number": message.get("device_os_build_number"),
"device_os_name": message["device_os_name"],
"device_os_version": message["device_os_version"],
"duration_ns": message["duration_ns"],
"environment": message.get("environment"),
"offset": metadata.offset,
"organization_id": message["organization_id"],
"partition": metadata.partition,
"platform": message["platform"],
"profile_id": str(UUID(message["profile_id"])),
"project_id": message["project_id"],
"received": received,
"retention_days": retention_days,
"trace_id": str(UUID(message["trace_id"])),
"transaction_id": str(UUID(message["transaction_id"])),
"transaction_name": message["transaction_name"],
"version_code": message["version_code"],
"version_name": message["version_name"],
}
def METHOD_NAME(
message: Mapping[str, Any],
metadata: KafkaMessageMetadata,
retention_days: int,
received: datetime,
) -> Mapping[str, Any]:
transaction = message["transactions"][0]
device = message["device"]
os = message["os"]
return {
"android_api_level": message.get("android_api_level"),
"architecture": device.get("architecture", "unknown"),
"device_classification": device.get("classification", ""),
"device_locale": device.get("locale", ""),
"device_manufacturer": device.get("manufacturer", ""),
"device_model": device.get("model", ""),
"device_os_build_number": os.get("build_number"),
"device_os_name": os.get("name", ""),
"device_os_version": os.get("version", ""),
"duration_ns": int(
transaction["relative_end_ns"] - transaction["relative_start_ns"]
),
"environment": message.get("environment"),
"offset": metadata.offset,
"organization_id": message["organization_id"],
"partition": metadata.partition,
"platform": message["platform"],
"profile_id": str(UUID(message["event_id"])),
"project_id": message["project_id"],
"received": received,
"retention_days": retention_days,
"trace_id": str(UUID(transaction["trace_id"])),
"transaction_id": str(UUID(transaction["id"])),
"transaction_name": transaction["name"],
"version_code": message.get("version_code", ""),
"version_name": message["release"],
}
|
2,239 |
mutate params
|
"""Quantile Random Forest Regression model from skgarden"""
import datatable as dt
import numpy as np
from h2oaicore.models import CustomModel
from h2oaicore.systemutils import physical_cores_count
class RandomForestQuantileModel(CustomModel):
_regression = True
_binary = False
_multiclass = False
_alpha = 0.8 # PLEASE CONFIGURE
_display_name = "QuantileRandomForest alpha=%g" % _alpha
_description = "Quantile Random Forest Regression"
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
_modules_needed_by_name = ['scikit-garden==0.1.3']
# pre-built:
# _modules_needed_by_name = ['https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes/scikit_garden-0.1.3-cp38-cp38-linux_x86_64.whl']
@staticmethod
def do_acceptance_test():
return False
@staticmethod
def is_enabled():
return False # scikit-garden is from 2017 and no longer compatible with new sklearn despite attempts to make it work
def set_default_params(
self,
accuracy=None,
time_tolerance=None,
interpretability=None,
**kwargs
):
# fill up parameters we care about
self.params = dict(
random_state=kwargs.get("random_state", 1234),
n_estimators=min(kwargs.get("n_estimators", 100), 2000),
criterion="mse",
max_depth=10,
min_samples_leaf=10,
n_jobs=self.params_base.get("n_jobs", max(1, physical_cores_count)),
)
def METHOD_NAME(
self,
accuracy=10,
**kwargs
):
if accuracy > 8:
estimators_list = [300, 500, 1000, 2000, ]
depth_list = [10, 20, 30, 50, 100, ]
samples_leaf_list = [10, 20, 30, ]
elif accuracy >= 5:
estimators_list = [50, 100, 200, 300, ]
depth_list = [5, 10, 15, 25, 50, ]
samples_leaf_list = [20, 40, 60, ]
else:
estimators_list = [10, 20, 40, 60, ]
depth_list = [1, 2, 3, 5, 10, ]
samples_leaf_list = [30, 60, 90, ]
criterion_list = ["mse", "mae", ]
# modify certain parameters for tuning
self.params["n_estimators"] = int(np.random.choice(estimators_list))
self.params["criterion"] = np.random.choice(criterion_list)
self.params["max_depth"] = int(np.random.choice(depth_list))
self.params["min_samples_leaf"] = int(np.random.choice(samples_leaf_list))
def fit(
self,
X,
y,
sample_weight=None,
eval_set=None,
sample_weight_eval_set=None,
**kwargs
):
X = dt.Frame(X)
orig_cols = list(X.names)
self.pre_get_model()
from skgarden import RandomForestQuantileRegressor
model = RandomForestQuantileRegressor(**self.params)
X = self.basic_impute(X)
X = X.to_numpy()
model.fit(X, y)
importances = np.array(model.feature_importances_)
self.set_model_properties(
model=model,
features=orig_cols,
importances=importances.tolist(),
iterations=self.params["n_estimators"],
)
def basic_impute(
self,
X
):
# scikit extra trees internally converts to np.float32 during all operations,
# so if float64 datatable, need to cast first, in case will be nan for float32
from h2oaicore.systemutils import update_precision
X = update_precision(X, data_type=np.float32)
# replace missing values with a value smaller than all observed values
if not hasattr(self, "min"):
self.min = dict()
for col in X.names:
XX = X[:, col]
if col not in self.min:
self.min[col] = XX.min1()
if (
self.min[col] is None
or np.isnan(self.min[col])
or np.isinf(self.min[col])
):
self.min[col] = -1e10
else:
self.min[col] -= 1
XX.replace([None, np.inf, -np.inf], self.min[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
return X
def predict(
self,
X,
**kwargs
):
X = dt.Frame(X)
X = self.basic_impute(X)
X = X.to_numpy()
model, _, _, _ = self.get_model_properties()
preds = model.predict(X, quantile=RandomForestQuantileModel._alpha)
return preds
def pre_get_model(self, X_shape=(1, 1), **kwargs):
# work-around use of old code that applies only for scikit-learn <=0.22 and runs from sklearn.externals import six
import six
import sys
sys.modules['sklearn.externals.six'] = six
from sklearn import ensemble
sys.modules['sklearn.ensemble.forest'] = ensemble._forest
from sklearn import tree
sys.modules['sklearn.tree.tree'] = tree._tree
|
2,240 |
signextend
|
"""
Ethereum Virtual Machine (EVM) Arithmetic Instructions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Implementations of the EVM Arithmetic instructions.
"""
from ethereum.base_types import U255_CEIL_VALUE, U256, U256_CEIL_VALUE, Uint
from ethereum.utils.numeric import get_sign
from .. import Evm
from ..gas import (
GAS_EXPONENTIATION,
GAS_EXPONENTIATION_PER_BYTE,
GAS_LOW,
GAS_MID,
GAS_VERY_LOW,
charge_gas,
)
from ..stack import pop, push
def add(evm: Evm) -> None:
"""
Adds the top two elements of the stack together, and pushes the result back
on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = pop(evm.stack)
y = pop(evm.stack)
# GAS
charge_gas(evm, GAS_VERY_LOW)
# OPERATION
result = x.wrapping_add(y)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def sub(evm: Evm) -> None:
"""
Subtracts the top two elements of the stack, and pushes the result back
on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = pop(evm.stack)
y = pop(evm.stack)
# GAS
charge_gas(evm, GAS_VERY_LOW)
# OPERATION
result = x.wrapping_sub(y)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def mul(evm: Evm) -> None:
"""
Multiply the top two elements of the stack, and pushes the result back
on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = pop(evm.stack)
y = pop(evm.stack)
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
result = x.wrapping_mul(y)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def div(evm: Evm) -> None:
"""
Integer division of the top two elements of the stack. Pushes the result
back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
dividend = pop(evm.stack)
divisor = pop(evm.stack)
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
if divisor == 0:
quotient = U256(0)
else:
quotient = dividend // divisor
push(evm.stack, quotient)
# PROGRAM COUNTER
evm.pc += 1
def sdiv(evm: Evm) -> None:
"""
Signed integer division of the top two elements of the stack. Pushes the
result back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
dividend = pop(evm.stack).to_signed()
divisor = pop(evm.stack).to_signed()
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
if divisor == 0:
quotient = 0
elif dividend == -U255_CEIL_VALUE and divisor == -1:
quotient = -U255_CEIL_VALUE
else:
sign = get_sign(dividend * divisor)
quotient = sign * (abs(dividend) // abs(divisor))
push(evm.stack, U256.from_signed(quotient))
# PROGRAM COUNTER
evm.pc += 1
def mod(evm: Evm) -> None:
"""
Modulo remainder of the top two elements of the stack. Pushes the result
back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = pop(evm.stack)
y = pop(evm.stack)
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
if y == 0:
remainder = U256(0)
else:
remainder = x % y
push(evm.stack, remainder)
# PROGRAM COUNTER
evm.pc += 1
def smod(evm: Evm) -> None:
"""
Signed modulo remainder of the top two elements of the stack. Pushes the
result back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = pop(evm.stack).to_signed()
y = pop(evm.stack).to_signed()
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
if y == 0:
remainder = 0
else:
remainder = get_sign(x) * (abs(x) % abs(y))
push(evm.stack, U256.from_signed(remainder))
# PROGRAM COUNTER
evm.pc += 1
def addmod(evm: Evm) -> None:
"""
Modulo addition of the top 2 elements with the 3rd element. Pushes the
result back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = Uint(pop(evm.stack))
y = Uint(pop(evm.stack))
z = Uint(pop(evm.stack))
# GAS
charge_gas(evm, GAS_MID)
# OPERATION
if z == 0:
result = U256(0)
else:
result = U256((x + y) % z)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def mulmod(evm: Evm) -> None:
"""
Modulo multiplication of the top 2 elements with the 3rd element. Pushes
the result back on the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
x = Uint(pop(evm.stack))
y = Uint(pop(evm.stack))
z = Uint(pop(evm.stack))
# GAS
charge_gas(evm, GAS_MID)
# OPERATION
if z == 0:
result = U256(0)
else:
result = U256((x * y) % z)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def exp(evm: Evm) -> None:
"""
Exponential operation of the top 2 elements. Pushes the result back on
the stack.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
base = Uint(pop(evm.stack))
exponent = Uint(pop(evm.stack))
# GAS
# This is equivalent to 1 + floor(log(y, 256)). But in python the log
# function is inaccurate leading to wrong results.
exponent_bits = exponent.bit_length()
exponent_bytes = (exponent_bits + 7) // 8
charge_gas(
evm, GAS_EXPONENTIATION + GAS_EXPONENTIATION_PER_BYTE * exponent_bytes
)
# OPERATION
result = U256(pow(base, exponent, U256_CEIL_VALUE))
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
def METHOD_NAME(evm: Evm) -> None:
"""
Sign extend operation. In other words, extend a signed number which
fits in N bytes to 32 bytes.
Parameters
----------
evm :
The current EVM frame.
"""
# STACK
byte_num = pop(evm.stack)
value = pop(evm.stack)
# GAS
charge_gas(evm, GAS_LOW)
# OPERATION
if byte_num > 31:
# Can't extend any further
result = value
else:
# U256(0).to_be_bytes() gives b'' instead b'\x00'.
value_bytes = bytes(value.to_be_bytes32())
# Now among the obtained value bytes, consider only
# N `least significant bytes`, where N is `byte_num + 1`.
value_bytes = value_bytes[31 - int(byte_num) :]
sign_bit = value_bytes[0] >> 7
if sign_bit == 0:
result = U256.from_be_bytes(value_bytes)
else:
num_bytes_prepend = 32 - (byte_num + 1)
result = U256.from_be_bytes(
bytearray([0xFF] * num_bytes_prepend) + value_bytes
)
push(evm.stack, result)
# PROGRAM COUNTER
evm.pc += 1
|
2,241 |
download
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
import os
import sys
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server, check_abs_path, prettify
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
import json
@click.group(short_help="Data Operations")
@click.pass_context
def data(ctx):
"""
\b
Provides numbers of data operational commands, including upload, download and etc.
For more details, please check out the help text.
"""
pass
@data.command("upload", short_help="Upload Table Command")
@cli_args.CONF_PATH
@click.option('--verbose', is_flag=True, default=False,
help="If specified, verbose mode will be turn on. "
"Users can have feedback on upload task in progress. (default: False)")
@click.option('--drop', is_flag=True, default=False,
help="If specified, data of old version would be replaced by the current version. "
"Otherwise, current upload task would be rejected. (default: False)")
@click.pass_context
def upload(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Upload Data Table.
\b
- Usage:
flow data upload -c fateflow/examples/upload/upload_guest.json
flow data upload -c fateflow/examples/upload/upload_host.json --verbose --drop
"""
kwargs['drop'] = 1 if kwargs['drop'] else 0
kwargs['verbose'] = int(kwargs['verbose'])
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('use_local_data', 1):
file_name = check_abs_path(config_data.get('file'))
if os.path.exists(file_name):
with open(file_name, 'rb') as fp:
data = MultipartEncoder(
fields={'file': (os.path.basename(file_name), fp, 'application/octet-stream')}
)
tag = [0]
def read_callback(monitor):
if config_data.get('verbose') == 1:
sys.stdout.write("\r UPLOADING:{0}{1}".format(
"|" * (monitor.bytes_read * 100 // monitor.len), '%.2f%%' % (monitor.bytes_read * 100 // monitor.len)))
sys.stdout.flush()
if monitor.bytes_read / monitor.len == 1:
tag[0] += 1
if tag[0] == 2:
sys.stdout.write('\n')
data = MultipartEncoderMonitor(data, read_callback)
access_server('post', ctx, 'data/upload', json_data=None, data=data,
params=json.dumps(config_data), headers={'Content-Type': data.content_type})
else:
prettify(
{
"retcode": 100,
"retmsg": "The file is obtained from the fate flow client machine, but it does not exist, "
"please check the path: {}".format(file_name)
}
)
else:
access_server('post', ctx, 'data/upload', config_data)
@data.command("download", short_help="Download Table Command")
@cli_args.CONF_PATH
@click.pass_context
def METHOD_NAME(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Data Table.
\b
- Usage:
flow data download -c fateflow/examples/download/download_table.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/download", config_data)
@data.command("writer", short_help="write Table Command")
@cli_args.CONF_PATH
@click.pass_context
def writer(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Data Table.
\b
- Usage:
flow data download -c fateflow/examples/writer/external_storage.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/writer", config_data)
@data.command("upload-history", short_help="Upload History Command")
@cli_args.LIMIT
@cli_args.JOBID
@click.pass_context
def upload_history(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Upload Table History.
\b
- USAGE:
flow data upload-history -l 20
flow data upload-history --job-id $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/upload/history", config_data)
# @data.command(short_help="")
@click.pass_context
def download_history(ctx):
"""
"""
pass
|
2,242 |
target overloaded calls target overloaded
|
from numba import cuda, njit
from numba.core.extending import overload
from numba.cuda.testing import CUDATestCase, skip_on_cudasim, unittest
import numpy as np
# Dummy function definitions to overload
def generic_func_1():
pass
def cuda_func_1():
pass
def generic_func_2():
pass
def cuda_func_2():
pass
def generic_calls_generic():
pass
def generic_calls_cuda():
pass
def cuda_calls_generic():
pass
def cuda_calls_cuda():
pass
def target_overloaded():
pass
def generic_calls_target_overloaded():
pass
def cuda_calls_target_overloaded():
pass
def METHOD_NAME():
pass
# To recognise which functions are resolved for a call, we identify each with a
# prime number. Each function called multiplies a value by its prime (starting
# with the value 1), and we can check that the result is as expected based on
# the final value after all multiplications.
GENERIC_FUNCTION_1 = 2
CUDA_FUNCTION_1 = 3
GENERIC_FUNCTION_2 = 5
CUDA_FUNCTION_2 = 7
GENERIC_CALLS_GENERIC = 11
GENERIC_CALLS_CUDA = 13
CUDA_CALLS_GENERIC = 17
CUDA_CALLS_CUDA = 19
GENERIC_TARGET_OL = 23
CUDA_TARGET_OL = 29
GENERIC_CALLS_TARGET_OL = 31
CUDA_CALLS_TARGET_OL = 37
GENERIC_TARGET_OL_CALLS_TARGET_OL = 41
CUDA_TARGET_OL_CALLS_TARGET_OL = 43
# Overload implementations
@overload(generic_func_1, target='generic')
def ol_generic_func_1(x):
def impl(x):
x[0] *= GENERIC_FUNCTION_1
return impl
@overload(cuda_func_1, target='cuda')
def ol_cuda_func_1(x):
def impl(x):
x[0] *= CUDA_FUNCTION_1
return impl
@overload(generic_func_2, target='generic')
def ol_generic_func_2(x):
def impl(x):
x[0] *= GENERIC_FUNCTION_2
return impl
@overload(cuda_func_2, target='cuda')
def ol_cuda_func(x):
def impl(x):
x[0] *= CUDA_FUNCTION_2
return impl
@overload(generic_calls_generic, target='generic')
def ol_generic_calls_generic(x):
def impl(x):
x[0] *= GENERIC_CALLS_GENERIC
generic_func_1(x)
return impl
@overload(generic_calls_cuda, target='generic')
def ol_generic_calls_cuda(x):
def impl(x):
x[0] *= GENERIC_CALLS_CUDA
cuda_func_1(x)
return impl
@overload(cuda_calls_generic, target='cuda')
def ol_cuda_calls_generic(x):
def impl(x):
x[0] *= CUDA_CALLS_GENERIC
generic_func_1(x)
return impl
@overload(cuda_calls_cuda, target='cuda')
def ol_cuda_calls_cuda(x):
def impl(x):
x[0] *= CUDA_CALLS_CUDA
cuda_func_1(x)
return impl
@overload(target_overloaded, target='generic')
def ol_target_overloaded_generic(x):
def impl(x):
x[0] *= GENERIC_TARGET_OL
return impl
@overload(target_overloaded, target='cuda')
def ol_target_overloaded_cuda(x):
def impl(x):
x[0] *= CUDA_TARGET_OL
return impl
@overload(generic_calls_target_overloaded, target='generic')
def ol_generic_calls_target_overloaded(x):
def impl(x):
x[0] *= GENERIC_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(cuda_calls_target_overloaded, target='cuda')
def ol_cuda_calls_target_overloaded(x):
def impl(x):
x[0] *= CUDA_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(METHOD_NAME, target='generic')
def ol_generic_calls_target_overloaded_generic(x):
def impl(x):
x[0] *= GENERIC_TARGET_OL_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(METHOD_NAME, target='cuda')
def ol_generic_calls_target_overloaded_cuda(x):
def impl(x):
x[0] *= CUDA_TARGET_OL_CALLS_TARGET_OL
target_overloaded(x)
return impl
@skip_on_cudasim('Overloading not supported in cudasim')
class TestOverload(CUDATestCase):
def check_overload(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
cuda.jit(kernel)[1, 1](x)
self.assertEqual(x[0], expected)
def check_overload_cpu(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
njit(kernel)(x)
self.assertEqual(x[0], expected)
def test_generic(self):
def kernel(x):
generic_func_1(x)
expected = GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda(self):
def kernel(x):
cuda_func_1(x)
expected = CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_and_cuda(self):
def kernel(x):
generic_func_1(x)
cuda_func_1(x)
expected = GENERIC_FUNCTION_1 * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_two_generic_calls(self):
def kernel(x):
generic_func_1(x)
generic_func_2(x)
expected = GENERIC_FUNCTION_1 * GENERIC_FUNCTION_2
self.check_overload(kernel, expected)
def test_call_two_cuda_calls(self):
def kernel(x):
cuda_func_1(x)
cuda_func_2(x)
expected = CUDA_FUNCTION_1 * CUDA_FUNCTION_2
self.check_overload(kernel, expected)
def test_generic_calls_generic(self):
def kernel(x):
generic_calls_generic(x)
expected = GENERIC_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_calls_cuda(self):
def kernel(x):
generic_calls_cuda(x)
expected = GENERIC_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_generic(self):
def kernel(x):
cuda_calls_generic(x)
expected = CUDA_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_cuda(self):
def kernel(x):
cuda_calls_cuda(x)
expected = CUDA_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_target_overloaded(self):
def kernel(x):
target_overloaded(x)
expected = CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_generic_calls_target_overloaded(self):
def kernel(x):
generic_calls_target_overloaded(x)
expected = GENERIC_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_cuda_calls_target_overloaded(self):
def kernel(x):
cuda_calls_target_overloaded(x)
expected = CUDA_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_target_overloaded_calls_target_overloaded(self):
def kernel(x):
METHOD_NAME(x)
# Check the CUDA overloads are used on CUDA
expected = CUDA_TARGET_OL_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
# Also check that the CPU overloads are used on the CPU
expected = GENERIC_TARGET_OL_CALLS_TARGET_OL * GENERIC_TARGET_OL
self.check_overload_cpu(kernel, expected)
if __name__ == '__main__':
unittest.main()
|
2,243 |
load events
|
import click.testing
import numpy as np
import os
import tempfile
import unittest
from caffe2.python import brew, core, model_helper
import caffe2.contrib.tensorboard.tensorboard as tb
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
try:
# tensorboard>=1.14.0
from tensorboard.compat.proto.graph_pb2 import GraphDef
except ImportError:
from tensorflow import GraphDef
def METHOD_NAME(filename):
try:
# tensorboard>=1.14.0
from tensorboard.backend.event_processing import event_file_loader
loader = event_file_loader.EventFileLoader(filename)
return list(loader.Load())
except ImportError:
import tensorflow as tf
return list(tf.train.summary_iterator(filename))
class TensorboardTest(unittest.TestCase):
def test_events(self):
runner = click.testing.CliRunner()
c2_dir = tempfile.mkdtemp()
np.random.seed(1701)
n_iters = 2
blobs = ["w", "b"]
data = np.random.randn(len(blobs), n_iters, 10)
for i, blob in enumerate(blobs):
with open(os.path.join(c2_dir, blob), "w") as f:
for row in data[i]:
stats = [row.min(), row.max(), row.mean(), row.std()]
f.write(" ".join(str(s) for s in stats) + "\n")
# Test error handling path
with open(os.path.join(c2_dir, "not-a-summary"), "w") as f:
f.write("not-a-summary")
tf_dir = tempfile.mkdtemp()
result = runner.invoke(
tb.cli,
["tensorboard-events", "--c2-dir", c2_dir, "--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = METHOD_NAME(os.path.join(tf_dir, fname))
self.assertEqual(len(events), n_iters + 1)
events = events[1:]
self.maxDiff = None
self.assertEqual(len(events), 2)
def test_tensorboard_graphs(self):
model = model_helper.ModelHelper(name="overfeat")
data, label = brew.image_input(
model, ["db"], ["data", "label"], is_test=0
)
with core.NameScope("conv1"):
conv1 = brew.conv(model, data, "conv1", 3, 96, 11, stride=4)
relu1 = brew.relu(model, conv1, conv1)
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = brew.fc(model, pool1, "fc", 4096, 1000)
pred = brew.softmax(model, fc, "pred")
xent = model.LabelCrossEntropy([pred, label], "xent")
loss = model.AveragedLoss(xent, "loss")
model.AddGradientOperators([loss], skip=1)
c2_dir = tempfile.mkdtemp()
tf_dir = tempfile.mkdtemp()
with open(os.path.join(c2_dir, "init"), "w") as f:
f.write(str(model.param_init_net.Proto()))
with open(os.path.join(c2_dir, "net"), "w") as f:
f.write(str(model.net.Proto()))
runner = click.testing.CliRunner()
result = runner.invoke(
tb.cli,
["tensorboard-graphs",
"--c2-netdef", os.path.join(c2_dir, "init"),
"--c2-netdef", os.path.join(c2_dir, "net"),
"--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = METHOD_NAME(os.path.join(tf_dir, fname))
self.assertEqual(len(events), 3)
events = events[1:]
nets = [model.param_init_net, model.net]
for i, (event, net) in enumerate(zip(events, nets), start=1):
self.assertEqual(event.step, i)
self.assertEqual(event.wall_time, i)
g = GraphDef()
g.ParseFromString(event.graph_def)
self.assertMultiLineEqual(
str(g),
str(tb_exporter.nets_to_graph_def([net])))
if __name__ == "__main__":
unittest.main()
|
2,244 |
bacenpix get form action url
|
# Copyright 2023 KMEE
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import json
import logging
import requests
from werkzeug.urls import url_join
from odoo import fields, models
_logger = logging.getLogger(__name__)
BACENPIX_PROVIDER = "bacenpix"
SANDBOX_GET_TOKEN_URL = "https://oauth.sandbox.bb.com.br/"
PROD_GET_TOKEN_URL = "https://oauth.bb.com.br/"
BACENPIX_GET_TOKEN = {"enabled": PROD_GET_TOKEN_URL, "test": SANDBOX_GET_TOKEN_URL}
SANDBOX_URL = "https://api.sandbox.bb.com.br/"
PROD_URL = "https://api-pix.bb.com.br/"
AUTH_ENDPOINT = "oauth/token"
PIX_ENDPOINT_V1 = "pix/v1/cob/"
TRANSACTION_STATUS_V1 = "v1/transactions/?id={}"
BACENPIX = {
"enabled": PROD_URL,
"test": SANDBOX_URL,
}
class PaymentAcquirer(models.Model):
_inherit = "payment.acquirer"
provider = fields.Selection(
selection_add=[(BACENPIX_PROVIDER, "Bacen (pix)")],
ondelete={BACENPIX_PROVIDER: "set default"},
)
bacenpix_email_account = fields.Char("Email", groups="base.group_user")
bacenpix_client_id = fields.Char("Client ID", groups="base.group_user")
bacenpix_client_secret = fields.Char("Client Secret", groups="base.group_user")
bacenpix_api_key = fields.Char(string="API KEY", groups="base.group_user")
bacenpix_dev_app_key = fields.Char(string="Dev APP KEY", groups="base.group_user")
bacen_pix_basic = fields.Char(string="Basic", groups="base.group_user")
bacen_pix_key = fields.Char(string="PIX Key", groups="base.group_user")
bacen_pix_expiration = fields.Integer(
string="Bacen PIX Expiration", default=3600, groups="base.group_user"
)
def bacenpix_compute_fees(self, amount, currency_id, country_id):
"""Compute fees
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
fees = 0.0
if self.fees_active:
country = self.env["res.country"].browse(country_id)
if country and self.company_id.sudo().country_id.id == country.id:
percentage = self.fees_dom_var
fixed = self.fees_dom_fixed
else:
percentage = self.fees_int_var
fixed = self.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed) / (1 - percentage / 100.0)
return fees
def bacen_pix_get_token(self):
querystring = {
"client_id": self.bacenpix_client_id,
"client_secret": self.bacenpix_client_secret,
}
payload = {
"grant_type": "client_credentials",
"scope": "cob.write cob.read pix.read pix.write",
}
headers = {
"Content-Type": "application/json",
"Authorization": self.bacen_pix_basic,
}
response = requests.request(
"POST",
url_join(BACENPIX_GET_TOKEN[self.state], AUTH_ENDPOINT),
params=querystring,
headers=headers,
json=payload,
verify=False,
)
if response.status_code != 200 and response.status_code != 201:
self.bacenpix_api_key = "Error"
else:
response_data = json.loads(json.dumps(response.json()))
self.bacenpix_api_key = (
response_data["token_type"] + " " + response_data["access_token"]
)
def _bacenpix_header(self):
self.bacen_pix_get_token()
return {
"Authorization": self.bacenpix_api_key,
"Content-Type": "application/json",
}
def _bacenpix_new_transaction(self, tx_id, payload):
if self.state == "test":
params = {"txid": tx_id, "gw-dev-app-key": self.bacenpix_dev_app_key}
else:
params = {"txid": tx_id}
response = requests.request(
"PUT",
url_join(BACENPIX[self.state], PIX_ENDPOINT_V1),
params=params,
headers=self._bacenpix_header(),
data=payload,
verify=False,
)
return response
def _bacenpix_status_transaction(self, tx_bacen_id):
response = requests.request(
"GET",
url_join(BACENPIX[self.state], TRANSACTION_STATUS_V1.format(tx_bacen_id)),
headers=self._bacenpix_header(),
data={},
verify=False,
)
return response
def METHOD_NAME(self):
# 3. URL callback de feedback
return "/payment/bacenpix/feedback"
def _handle_bacenpix_webhook(self, tx_reference, jsonrequest):
"""Webhook para processamento da transação"""
transaction_id = self.env["payment.transaction"].search(
[
("callback_hash", "=", tx_reference),
("acquirer_id.provider", "=", BACENPIX_PROVIDER),
]
)
if not transaction_id:
return False
return transaction_id._bacenpix_validate_webhook(tx_reference, jsonrequest)
|
2,245 |
test returns list from previous configuration
|
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2015 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
import configparser
from mock import patch, mock_open, Mock
from GTG.core.config import open_config_file, SectionConfig
class TestOpenConfigFile(TestCase):
def setUp(self):
self.mock_parser = patch(
'GTG.core.config.configparser.ConfigParser.read').start()
self.mock_os = patch('GTG.core.config.os').start()
self.mock_path = patch('GTG.core.config.os.path').start()
self.mock_open = patch(
'GTG.core.config.open', mock_open(), create=True).start()
def tearDown(self):
patch.stopall()
def test_reads_configuration(self):
open_config_file('gtg.conf')
self.mock_parser.assert_called_once_with('gtg.conf')
@patch('GTG.core.config.log')
def test_falls_back_when_there_is_config_error(self, mock_log):
self.mock_parser.side_effect = configparser.Error()
open_config_file('gtg.conf')
self.mock_parser.assert_called_once_with('gtg.conf')
self.assertTrue(mock_log.warning.called)
def test_creates_config_folder_when_missing(self):
self.mock_path.exists.return_value = False
self.mock_path.dirname.return_value = 'config'
open_config_file('config/gtg.conf')
self.mock_os.makedirs.assert_called_once_with('config')
def test_creates_config_file_when_missing(self):
self.mock_path.exists.side_effect = lambda name: name != 'gtg.conf'
open_config_file('gtg.conf')
self.mock_open.assert_called_with('gtg.conf', 'w')
def test_raises_error_when_config_is_not_accessible(self):
self.mock_os.access.return_value = False
with self.assertRaises(Exception):
open_config_file('gtg.conf')
class TestSectionConfig(TestCase):
def make_section_config(self, config_dict):
""" Creates a section from a dictionary """
config = configparser.ConfigParser()
config.read_dict({'section': config_dict})
return config['section']
@patch('GTG.core.config.log')
def test_warns_when_no_default_value_is_provided(self, mock_log):
config = self.make_section_config({'option': '1'})
section = SectionConfig('Name', config, {}, Mock())
value = section.get('option')
self.assertEqual('1', value)
@patch('GTG.core.config.log')
def test_warns_when_value_is_wrong_type(self, mock_log):
config = self.make_section_config({'option': 'text'})
section = SectionConfig('Name', config, {'option': 42}, Mock())
value = section.get('option')
self.assertTrue(mock_log.warning.called)
# It should fall back to default value as 'text' is not an int
self.assertEqual(42, value)
def test_returns_int_when_expected_int(self):
config = self.make_section_config({'option': '42'})
section = SectionConfig('Name', config, {'option': 42}, Mock())
value = section.get('option')
self.assertEqual(int, type(value))
self.assertEqual(42, value)
def test_returns_bool_when_expected_bool(self):
config = self.make_section_config({'option': 'False'})
section = SectionConfig('Name', config, {'option': False}, Mock())
value = section.get('option')
self.assertEqual(bool, type(value))
self.assertEqual(False, value)
def test_returns_string_when_expected_string(self):
config = self.make_section_config({'option': 'Hello'})
section = SectionConfig('Name', config, {'option': 'World'}, Mock())
value = section.get('option')
self.assertEqual(str, type(value))
self.assertEqual('Hello', value)
def test_returns_empty_list_for_non_existing_value(self):
config = self.make_section_config({})
section = SectionConfig('Name', config, {'option': []}, Mock())
value = section.get('option')
self.assertEqual([], value)
def test_returns_empty_list_for_empty_value(self):
config = self.make_section_config({'option': ''})
section = SectionConfig('Name', config, {'option': []}, Mock())
value = section.get('option')
self.assertEqual([], value)
def METHOD_NAME(self):
# Config from GTG 0.2.4
config = self.make_section_config({
'opened_tasks': '8@1, 6@1, 4@1'})
section = SectionConfig('Name', config, {'opened_tasks': []}, Mock())
value = section.get('opened_tasks')
self.assertEqual(['8@1', '6@1', '4@1'], value)
def test_returns_empty_list_from_previous_empty_configuration(self):
# Config from GTG 0.2.4
config = self.make_section_config({
'opened_tasks': ','})
section = SectionConfig('Name', config, {'opened_tasks': []}, Mock())
value = section.get('opened_tasks')
self.assertEqual([], value)
def test_returns_list_of_tuples(self):
# Splitting only by ',' caused bugs
# - https://bugs.launchpad.net/gtg/+bug/1218093
# - https://bugs.launchpad.net/gtg/+bug/1216807
config = self.make_section_config({
'collapsed_tasks': "('0@1', '6@1'),('0@1', '8@1', '3@1', '5@1')"})
section = SectionConfig(
'Name', config, {'collapsed_tasks': []}, Mock())
value = section.get('collapsed_tasks')
self.assertEqual(
["('0@1', '6@1')", "('0@1', '8@1', '3@1', '5@1')"],
value)
@patch('GTG.core.config.log')
def test_raises_an_error_when_no_value_and_no_default_value(
self, mock_log):
config = self.make_section_config({})
section = SectionConfig('Name', config, {}, Mock())
with self.assertRaises(ValueError):
section.get('option')
def test_can_set_value(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('option', 42)
self.assertEqual('42', config['option'])
# Automatically saved value
save_mock.assert_any_call()
def test_can_set_list(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('list', [1, True, 'Hello'])
self.assertEqual('1,True,Hello', config['list'])
# Automatically saved value
save_mock.assert_any_call()
def test_can_set_tuple(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('list', (1, 2))
self.assertEqual('1,2', config['list'])
# Automatically saved value
save_mock.assert_any_call()
|
2,246 |
write vertices ply
|
import numpy as np
import h5py
from scipy import spatial
from sklearn import decomposition
import plyfile
import os
import networkx as nx
import cloudvolume
from multiwrapper import multiprocessing_utils as mu
def read_mesh_h5():
pass
def write_mesh_h5():
pass
def read_obj(path):
return Mesh(path)
def _download_meshes_thread(args):
""" Downloads meshes into target directory
:param args: list
"""
seg_ids, cv_path, target_dir = args
cv = cloudvolume.CloudVolume(cv_path)
os.chdir(target_dir)
for seg_id in seg_ids:
cv.mesh.save(seg_id)
def download_meshes(seg_ids, target_dir, cv_path, n_threads=1):
""" Downloads meshes in target directory (parallel)
:param seg_ids: list of ints
:param target_dir: str
:param cv_path: str
:param n_threads: int
"""
n_jobs = n_threads * 3
if len(seg_ids) < n_jobs:
n_jobs = len(seg_ids)
seg_id_blocks = np.array_split(seg_ids, n_jobs)
multi_args = []
for seg_id_block in seg_id_blocks:
multi_args.append([seg_id_block, cv_path, target_dir])
if n_jobs == 1:
mu.multiprocess_func(_download_meshes_thread,
multi_args, debug=True,
verbose=True, n_threads=1)
else:
mu.multisubprocess_func(_download_meshes_thread,
multi_args, n_threads=n_threads)
def refine_mesh():
pass
class MeshMeta(object):
def __init__(self):
self.filename_dict = {}
def mesh(self, filename):
if not filename in self.filename_dict:
try:
self.filename_dict[filename] = Mesh(filename)
except:
self.filename_dict[filename] = None
return self.filename_dict[filename]
class Mesh(object):
def __init__(self, filename):
self._vertices = []
self._normals = []
self._faces = []
self._filename = filename
self._kdtree = None
self._graph = None
self._edges = None
if not os.path.exists(filename):
raise Exception("File does not exist")
if filename.endswith(".obj"):
self.load_obj()
elif filename.endswith(".h5"):
self.load_h5()
else:
raise Exception("Unknown filetype")
@property
def filename(self):
return self._filename
@property
def vertices(self):
return self._vertices
@property
def faces(self):
return self._faces
@property
def normals(self):
return self._normals
@property
def edges(self):
if self._edges is None:
self._edges = np.concatenate([self.faces[:, :2],
self.faces[:, 1:3]], axis=0)
return self._edges
@property
def kdtree(self):
if self._kdtree is None:
self._kdtree = spatial.cKDTree(self.vertices)
return self._kdtree
@property
def graph(self):
if self._graph is None:
self._graph = self.create_nx_graph()
return self._graph
def load_obj(self):
# adapted from https://www.pygame.org/wiki/OBJFileLoader
vertices = []
faces = []
normals = []
for line in open(self.filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = values[1:4]
vertices.append(v)
elif values[0] == 'vn':
v = map(float, values[1:4])
normals.append(v)
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
texcoords.append(int(w[1]))
else:
texcoords.append(0)
if len(w) >= 3 and len(w[2]) > 0:
norms.append(int(w[2]))
else:
norms.append(0)
faces.append(face)
self._faces = np.array(faces, dtype=int) - 1
self._vertices = np.array(vertices, dtype=np.float)
self._normals = np.array(normals, dtype=np.float)
def load_h5(self):
with h5py.File(self.filename, "r") as f:
self._vertices = f["vertices"].value
self._normals = f["normals"].value
self._faces = f["faces"].value
def write_h5(self):
with h5py.File(self.filename, "w") as f:
f.create_dataset("vertices", self.vertices, compression="gzip")
f.create_dataset("faces", self.faces, compression="gzip")
f.create_dataset("normals", self.normals, compression="gzip")
def METHOD_NAME(self, out_fname, coords=None):
"""Writing vertex coordinates as a .ply file using plyfile"""
if coords is None:
coords = self.vertices
tweaked_array = np.array(
list(zip(coords[:, 0], coords[:, 1], coords[:, 2])),
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_element = plyfile.PlyElement.describe(tweaked_array, "vertex")
if not os.path.exists(os.path.dirname(out_fname)):
os.makedirs(os.path.dirname(out_fname))
plyfile.PlyData([vertex_element]).write(out_fname)
def get_local_view(self, n_points, pc_align=False, center_node_id=None,
center_coord=None, method="kdtree", verbose=False):
if center_node_id is None and center_coord is None:
center_node_id = np.random.randint(len(self.vertices))
if center_coord is None:
center_coord = self.vertices[center_node_id]
n_samples = np.min([n_points, len(self.vertices)])
if method == "kdtree":
dists, node_ids = self.kdtree.query(center_coord, n_samples)
if verbose:
print(np.mean(dists), np.max(dists), np.min(dists))
elif method == "graph":
dist_dict = nx.single_source_dijkstra_path_length(self.graph,
center_node_id,
weight="weight")
sorting = np.argsort(np.array(list(dist_dict.values())))
node_ids = np.array(list(dist_dict.keys()))[sorting[:n_points]]
else:
raise Exception("unknow method")
local_vertices = self.vertices[node_ids]
if pc_align:
local_vertices = self.calc_pc_align(local_vertices)
return local_vertices, center_node_id
def calc_pc_align(self, vertices):
pca = decomposition.PCA(n_components=3)
pca.fit(vertices)
return pca.transform(vertices)
def create_nx_graph(self):
weights = np.linalg.norm(self.vertices[self.edges[:, 0]] - self.vertices[self.edges[:, 1]], axis=1)
print(weights.shape)
weighted_graph = nx.Graph()
weighted_graph.add_edges_from(self.edges)
for i_edge, edge in enumerate(self.edges):
weighted_graph[edge[0]][edge[1]]['weight'] = weights[i_edge]
return weighted_graph
|
2,247 |
shape parameter as tuple
|
import pytensor.tensor as at
from pytensor.compile import optdb
from pytensor.graph.rewriting.basic import in2out, node_rewriter
from pytensor.tensor.basic import MakeVector
from pytensor.tensor.elemwise import DimShuffle
from pytensor.tensor.math import Sum
from pytensor.tensor.shape import Reshape
from pytensor.tensor.subtensor import AdvancedIncSubtensor, AdvancedSubtensor
from pytensor.tensor.variable import TensorVariable
@node_rewriter([AdvancedIncSubtensor])
def boolean_indexing_set_or_inc(fgraph, node):
"""Replace `AdvancedIncSubtensor` when using boolean indexing using `Switch`.
JAX cannot JIT-compile functions that use boolean indexing to set values in
an array. A workaround is to re-express this logic using `jax.numpy.where`.
This rewrite allows to improve upon JAX's API.
"""
# Get out if there is more than one indexing group
if len(node.inputs) != 3:
return None
op = node.op
[x, y, cond] = node.inputs
# This rewrite only works when `y` is a scalar, so it can broadcast to the shape of x[cond]
if y.type.ndim > 0:
return
if not isinstance(cond, TensorVariable):
return
if not cond.type.dtype == "bool":
return
if op.set_instead_of_inc:
out = at.where(cond, y, x)
return out.owner.outputs
else:
out = at.where(cond, x + y, x)
return out.owner.outputs
optdb.register(
"jax_boolean_indexing_set_or_inc",
in2out(boolean_indexing_set_or_inc),
"jax",
position=100,
)
@node_rewriter([Sum])
def boolean_indexing_sum(fgraph, node):
"""Replace the sum of `AdvancedSubtensor` with exclusively boolean indexing.
JAX cannot JIT-compile functions that use boolean indexing, but can compile
those expressions that can be re-expressed using `jax.numpy.where`. This
rewrite re-rexpressed the model on the behalf of the user and thus allows to
improve upon JAX's API.
"""
operand = node.inputs[0]
if not isinstance(operand, TensorVariable):
return
# If it's not a scalar reduction, it couldn't have been a pure boolean mask
if node.outputs[0].ndim != 0:
return
if operand.owner is None:
return
if not isinstance(operand.owner.op, AdvancedSubtensor):
return
# Get out if AdvancedSubtensor has more than a single indexing operation
if len(operand.owner.inputs) > 2:
return
[x, cond] = operand.owner.inputs
if not isinstance(cond, TensorVariable):
return
if not cond.type.dtype == "bool":
return
# Output must be a scalar, since pure boolean indexing returns a vector
# No need to worry about axis
out = at.sum(at.where(cond, x, 0))
return out.owner.outputs
optdb.register(
"jax_boolean_indexing_sum", in2out(boolean_indexing_sum), "jax", position=100
)
@node_rewriter([Reshape])
def METHOD_NAME(fgraph, node):
"""Replace `MakeVector` and `DimShuffle` (when used to transform a scalar
into a 1d vector) when they are found as the input of a `shape`
parameter by `JAXShapeTuple` during transpilation.
The JAX implementations of `MakeVector` and `DimShuffle` always return JAX
`TracedArrays`, but JAX only accepts concrete values as inputs for the `size`
or `shape` parameter. When these `Op`s are used to convert scalar or tuple
inputs, however, we can avoid tracing by making them return a tuple of their
inputs instead.
Note that JAX does not accept scalar inputs for the `size` or `shape`
parameters, and this rewrite also ensures that scalar inputs are turned into
tuples during transpilation.
"""
from pytensor.link.jax.dispatch.shape import JAXShapeTuple
shape_arg = node.inputs[1]
shape_node = shape_arg.owner
if shape_node is None:
return
if isinstance(shape_node.op, JAXShapeTuple):
return
if isinstance(shape_node.op, MakeVector) or (
isinstance(shape_node.op, DimShuffle)
and shape_node.op.input_broadcastable == ()
and shape_node.op.new_order == ("x",)
):
# Here PyTensor converted a tuple or list to a tensor
new_shape_args = JAXShapeTuple()(*shape_node.inputs)
new_inputs = list(node.inputs)
new_inputs[1] = new_shape_args
new_node = node.clone_with_new_inputs(new_inputs)
return new_node.outputs
optdb.register(
"jax_shape_parameter_as_tuple",
in2out(METHOD_NAME),
"jax",
position=100,
)
|
2,248 |
test license headers
|
# Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for iris-grib license header conformance.
"""
from datetime import datetime
from fnmatch import fnmatch
import os
import subprocess
import unittest
import iris_grib
LICENSE_TEMPLATE = """# Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details."""
# Guess iris-grib repo directory of Iris-grib - realpath is used to mitigate
# against Python finding the iris-grib package via a symlink.
IRIS_GRIB_DIR = os.path.realpath(os.path.dirname(iris_grib.__file__))
REPO_DIR = os.path.dirname(IRIS_GRIB_DIR)
class TestLicenseHeaders(unittest.TestCase):
@staticmethod
def whatchanged_parse(whatchanged_output):
"""
Returns a generator of tuples of data parsed from
"git whatchanged --pretty='TIME:%at". The tuples are of the form
``(filename, last_commit_datetime)``
Sample input::
['TIME:1366884020', '',
':000000 100644 0000000... 5862ced... A\tlib/iris/cube.py']
"""
dt = None
for line in whatchanged_output:
if not line.strip():
continue
elif line.startswith('TIME:'):
dt = datetime.fromtimestamp(int(line[5:]))
else:
# Non blank, non date, line -> must be the lines
# containing the file info.
fname = ' '.join(line.split('\t')[1:])
yield fname, dt
@staticmethod
def last_change_by_fname():
"""
Return a dictionary of all the files under git which maps to
the datetime of their last modification in the git history.
.. note::
This function raises a ValueError if the repo root does
not have a ".git" folder. If git is not installed on the system,
or cannot be found by subprocess, an IOError may also be raised.
"""
# Check the ".git" folder exists at the repo dir.
if not os.path.isdir(os.path.join(REPO_DIR, '.git')):
raise ValueError('{} is not a git repository.'.format(REPO_DIR))
# Call "git whatchanged" to get the details of all the files and when
# they were last changed.
output = subprocess.check_output(['git', 'whatchanged',
"--pretty=TIME:%ct"],
cwd=REPO_DIR)
output = output.decode().split('\n')
res = {}
for fname, dt in TestLicenseHeaders.whatchanged_parse(output):
if fname not in res or dt > res[fname]:
res[fname] = dt
return res
def METHOD_NAME(self):
exclude_patterns = ('setup.py',
'build/*',
'dist/*',
'docs/*',
'iris_grib/tests/unit/results/*',
'iris_grib.egg-info/*')
try:
last_change_by_fname = self.last_change_by_fname()
except ValueError:
# Caught the case where this is not a git repo.
return self.skipTest('Iris-grib installation did not look like a '
'git repo.')
failed = False
for fname, last_change in sorted(last_change_by_fname.items()):
full_fname = os.path.join(REPO_DIR, fname)
if full_fname.endswith('.py') and os.path.isfile(full_fname) and \
not any(fnmatch(fname, pat) for pat in exclude_patterns):
with open(full_fname) as fh:
content = fh.read()
if not content.startswith(LICENSE_TEMPLATE):
print('The file {} does not start with the required '
'license header.'.format(fname))
failed = True
if failed:
raise ValueError('There were license header failures. See stdout.')
if __name__ == '__main__':
unittest.main()
|
2,249 |
pmids from ncbi email
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str, bytes
import re
import os
import sys
import imaplib
import email
import email.header
import datetime
import getpass
import base64
import shutil
def get_mailboxes(M):
res, mailboxes = M.list()
if res == 'OK':
return mailboxes
else:
return None
def select_mailbox(M, mailbox):
res, data = M.select(mailbox)
if res == 'OK':
return data
else:
return None
def fetch_email(M, msg_id):
"""Returns the given email message as a unicode string."""
res, data = M.fetch(msg_id, '(RFC822)')
if res == 'OK':
# Data here is a list with 1 element containing a tuple
# whose 2nd element is a long string containing the email
# The content is a bytes that must be decoded
raw_msg_txt = data[0][1]
# In Python3, we call message_from_bytes, but this function doesn't
# exist in Python 2.
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
# At this point, we have a message containing bytes (not unicode)
# fields that will still need to be decoded, ideally according to the
# character set specified in the message.
return msg
else:
return None
def get_headers(msg):
"""Takes email.message.Message object initialized from unicode string,
returns dict with header fields."""
headers = {}
for k in msg.keys():
# decode_header decodes header but does not convert charset, so these
# may still be bytes, even in Python 3. However, if it's ASCII
# only (hence unambiguous encoding), the header fields come back
# as str (unicode) in Python 3.
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if charset is not None:
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers
def get_text(msg):
# Decode=True argument handles quoted-printable and Base64 encoding.
# parts variable may be a string or a list (in the case of a multi-part
# message).
parts = msg.get_payload(decode=True)
content_type = msg.get_content_type()
msg_txt = None
if content_type == 'text/html':
if isinstance(parts, list) or isinstance(parts, tuple):
pass
# If this is a bytes, we need to decode it
elif isinstance(parts, bytes):
charset = msg.get_charset()
if charset is None:
msg_txt = parts.decode('utf-8')
else:
msg_txt = parts.decode(charset)
# If it's already a str, we're good to go
elif isinstance(parts, str):
msg_txt = parts
else:
raise Exception("Message payload was neither string nor list.")
else:
print('Can\'t handle content type %s' % content_type)
return msg_txt
def print_msg(msg):
headers = get_headers(msg)
text = get_text(msg)
print('-----------')
print('Subject: %s' % headers['Subject'])
print('From: %s' % headers['From'])
print('To: %s' % headers['To'])
print('Message:')
print(text)
def get_message_pmids(M, day_limit=10):
if day_limit is not None:
date_now = datetime.datetime.now()
date_rel = date_now - datetime.timedelta(days=10) # 10
date_str = date_rel.strftime('%d-%b-%Y')
res, data = M.search(None, '(SINCE "%s")' % date_str)
else:
res, data = M.search(None, 'ALL')
# Data here is a space-separated list of message IDs
# like ['1 2 3']
msg_ids_str = data[0].decode('utf-8')
if not msg_ids_str:
return []
msg_ids = msg_ids_str.split(' ')
pmids = []
for mid in msg_ids:
# msg is returned as object containing bytes
msg = fetch_email(M, mid)
# get_headers converts fields to unicode
headers = get_headers(msg)
subject = headers['Subject']
subject_pmids = pmids_from_subject(subject)
pmids += subject_pmids
if headers['From'] == 'Sent by NCBI <[email protected]>' or\
headers['From'] == 'My NCBI <[email protected]>':
# Returns unicode
text = get_text(msg)
ncbi_pmids = METHOD_NAME(text)
pmids += ncbi_pmids
return pmids
def METHOD_NAME(msg_text):
res = re.findall('PMID: [^.;]+', msg_text.replace('\n',''))
pmids = [r[6:].strip() for r in res]
return pmids
def pmids_from_subject(subject):
pmids = []
# TODO: this only works if the subject has PMIDxxx as a word
# separated by spaces from other text.
# We should use regexp to isolate the PMID
subject_words = subject.split(' ')
for w in subject_words:
if w.startswith('PMID'):
pmids.append(w[4:])
return pmids
def gmail_login(email_addr, passwd):
M = imaplib.IMAP4_SSL('imap.gmail.com')
try:
M.login(email_addr, passwd)
except imaplib.IMAP4.error:
print('Login failed')
return None
return M
|
2,250 |
test mcol mgga ab ks
|
#!/usr/bin/env python
# Copyright 2022 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import unittest
import tempfile
import numpy
import copy
from pyscf import lib, gto, scf
from pyscf.x2c import x2c, dft, tdscf
try:
import mcfun
except ImportError:
mcfun = None
def setUpModule():
global mol, mf_lda
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = '''
H 0. 0. 0.
H 0. -0.7 0.7
H 0. 0.7 0.7'''
mol.basis = '6-31g'
mol.spin = 1
mol.build()
mf_lda = dft.UKS(mol).set(xc='lda,', conv_tol=1e-12,
chkfile=tempfile.NamedTemporaryFile().name).newton().run()
def tearDownModule():
global mol, mf_lda
mol.stdout.close()
del mol, mf_lda
def diagonalize(a, b, nroots=4):
nocc, nvir = a.shape[:2]
nov = nocc * nvir
a = a.reshape(nov, nov)
b = b.reshape(nov, nov)
h = numpy.block([[a , b ],
[-b.conj(),-a.conj()]])
e = numpy.linalg.eig(numpy.asarray(h))[0]
lowest_e = numpy.sort(e[e.real > 0].real)[:nroots]
lowest_e = lowest_e[lowest_e > 1e-3]
return lowest_e
class KnownValues(unittest.TestCase):
def test_tddft_lda(self):
td = mf_lda.TDDFT()
es = td.kernel(nstates=4)[0]
a,b = td.get_ab()
e_ref = diagonalize(a, b, 8)
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 5)
self.assertAlmostEqual(lib.fp(es[:3]*27.2114), 3.119041718921026, 5)
def test_tda_lda(self):
td = mf_lda.TDA()
es = td.kernel(nstates=5)[0]
a,b = td.get_ab()
nocc, nvir = a.shape[:2]
nov = nocc * nvir
e_ref = numpy.linalg.eigh(a.reshape(nov,nov))[0]
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 5)
self.assertAlmostEqual(lib.fp(es[:3] * 27.2114), 3.1825211067032253, 5)
def test_ab_hf(self):
mf = x2c.UHF(mol).newton().run(conv_tol=1e-12)
self._check_against_ab_ks(mf.TDHF(), -0.2404548371794495, 0.6508765417771681, 4)
def test_col_lda_ab_ks(self):
self._check_against_ab_ks(mf_lda.TDDFT(), -0.5231134770778959, 0.07879428138412828)
def test_col_gga_ab_ks(self):
mf_b3lyp = dft.UKS(mol).set(xc='b3lyp5')
mf_b3lyp.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mf_b3lyp.TDDFT(), -0.4758219953792988, 0.17715631269859033)
def test_col_mgga_ab_ks(self):
mf_m06l = dft.UKS(mol).run(xc='m06l', conv_tol=1e-12)
mf_m06l.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mf_m06l.TDDFT(), -0.4919270127924622, 0.14597029880651433, places=5)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def test_mcol_lda_ab_ks(self):
mcol_lda = dft.UKS(mol).set(xc='lda,', collinear='mcol')
mcol_lda._numint.spin_samples = 6
mcol_lda.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_lda.TDDFT(), -0.6154532929747091, 0.49991930461632084, places=5)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def test_mcol_gga_ab_ks(self):
mcol_b3lyp = dft.UKS(mol).set(xc='b3lyp5', collinear='mcol')
mcol_b3lyp._numint.spin_samples = 6
mcol_b3lyp.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_b3lyp.TDDFT(), -0.4954910129906521, 0.4808365159189027)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def METHOD_NAME(self):
mcol_m06l = dft.UKS(mol).set(xc='m06l', collinear='mcol')
mcol_m06l._numint.spin_samples = 6
mcol_m06l.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_m06l.TDDFT(), -0.6984240332038076, 2.0192987108288794)
def _check_against_ab_ks(self, td, refa, refb, places=6):
mf = td._scf
a, b = td.get_ab()
self.assertAlmostEqual(lib.fp(abs(a)), refa, places)
self.assertAlmostEqual(lib.fp(abs(b)), refb, places)
ftda = mf.TDA().gen_vind()[0]
ftdhf = td.gen_vind()[0]
nocc = numpy.count_nonzero(mf.mo_occ == 1)
nvir = numpy.count_nonzero(mf.mo_occ == 0)
numpy.random.seed(2)
x, y = xy = (numpy.random.random((2,nocc,nvir)) +
numpy.random.random((2,nocc,nvir)) * 1j)
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs(ax - ftda([x]).reshape(nocc,nvir)).max(), 0, 12)
ab1 = ax + numpy.einsum('iajb,jb->ia', b, y)
ab2 =-numpy.einsum('iajb,jb->ia', b.conj(), x)
ab2-= numpy.einsum('iajb,jb->ia', a.conj(), y)
abxy_ref = ftdhf([xy]).reshape(2,nocc,nvir)
self.assertAlmostEqual(abs(ab1 - abxy_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ab2 - abxy_ref[1]).max(), 0, 12)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def test_mcol_vs_gks(self):
with lib.temporary_env(lib.param, LIGHT_SPEED=20):
mol = gto.M(atom='C', basis='6-31g')
ref = dft.RKS(mol)
ref.xc = 'pbe'
ref.collinear = 'mcol'
ref._numint.spin_samples = 6
ref.run()
td = ref.TDA()
td.positive_eig_threshold = -10
eref = td.kernel(nstates=5)[0]
c = numpy.vstack(mol.sph2spinor_coeff())
mo1 = c.dot(ref.mo_coeff)
dm = ref.make_rdm1(mo1, ref.mo_occ)
mf = mol.GKS().x2c1e()
mf.xc = 'pbe'
mf.collinear = 'mcol'
mf._numint.spin_samples = 6
mf.max_cycle = 1
mf.kernel(dm0=dm)
td = mf.TDA()
td.positive_eig_threshold = -10
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(abs(es - eref).max(), 0, 7)
if __name__ == "__main__":
print("Full Tests for TD-X2C-KS")
unittest.main()
|
2,251 |
test case list lookup wo image
|
from django.test import SimpleTestCase
from corehq.apps.app_manager.models import Application, Module
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin, patch_get_xform_resource_overrides
@patch_get_xform_resource_overrides()
class CaseListLookupTest(SimpleTestCase, TestXmlMixin):
def METHOD_NAME(self, *args):
callout_action = "callout.commcarehq.org.dummycallout.LAUNCH"
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_action = callout_action
expected = """
<partial>
<lookup action="{}"/>
</partial>
""".format(callout_action)
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_w_image(self, *args):
action = "callout.commcarehq.org.dummycallout.LAUNCH"
image = "jr://file/commcare/image/callout"
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_action = action
module.case_details.short.lookup_image = image
expected = """
<partial>
<lookup action="{}" image="{}"/>
</partial>
""".format(action, image)
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_autolaunch(self, *args):
action = "callout.commcarehq.org.dummycallout.LAUNCH"
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_autolaunch = True
module.case_details.short.lookup_action = action
expected = """
<partial>
<lookup action="{action}" auto_launch="true"/>
</partial>
""".format(action=action)
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_w_name(self, *args):
action = "callout.commcarehq.org.dummycallout.LAUNCH"
image = "jr://file/commcare/image/callout"
name = "ιтѕ α тяαρ ʕ •ᴥ•ʔ"
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_action = action
module.case_details.short.lookup_image = image
module.case_details.short.lookup_name = name
expected = """
<partial>
<lookup name="{}" action="{}" image="{}"/>
</partial>
""".format(name, action, image)
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_w_extras_and_responses(self, *args):
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = True
module.case_details.short.lookup_action = "callout.commcarehq.org.dummycallout.LAUNCH"
module.case_details.short.lookup_extras = [
{'key': 'action_0', 'value': 'com.biometrac.core.SCAN'},
{'key': "action_1", 'value': "com.biometrac.core.IDENTIFY"},
]
module.case_details.short.lookup_responses = [
{"key": "match_id_0"},
{"key": "match_id_1"},
]
expected = """
<partial>
<lookup action="callout.commcarehq.org.dummycallout.LAUNCH">
<extra key="action_0" value="com.biometrac.core.SCAN"/>
<extra key="action_1" value="com.biometrac.core.IDENTIFY"/>
<response key="match_id_0"/>
<response key="match_id_1"/>
</lookup>
</partial>
"""
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_disabled(self, *args):
action = "callout.commcarehq.org.dummycallout.LAUNCH"
app = Application.new_app('domain', 'Untitled Application')
module = app.add_module(Module.new_module('Untitled Module', None))
module.case_type = 'patient'
module.case_details.short.lookup_enabled = False
module.case_details.short.lookup_action = action
module.case_details.short.lookup_responses = ["match_id_0", "left_index"]
expected = "<partial></partial>"
self.assertXmlPartialEqual(
expected,
app.create_suite(),
"./detail/lookup"
)
def test_case_list_lookup_display_results(self, *args):
factory = AppFactory(build_version='2.11.0')
module, form = factory.new_basic_module('follow_up', 'case')
case_list = module.case_details.short
case_list.lookup_enabled = True
case_list.lookup_action = "callout.commcarehq.org.dummycallout.LAUNCH"
case_list.lookup_name = 'Scan fingerprint'
case_list.lookup_extras = [
{'key': 'deviceId', 'value': '123'},
{'key': 'apiKey', 'value': '0000'},
{'key': 'packageName', 'value': 'foo'},
]
case_list.lookup_responses = [
{'key': 'fake'}
]
case_list.lookup_display_results = True
case_list.lookup_field_header['en'] = 'Accuracy'
case_list.lookup_field_template = '@case_id'
expected = """
<partial>
<lookup name="Scan fingerprint"
action="callout.commcarehq.org.dummycallout.LAUNCH">
<extra key="deviceId" value="123"/>
<extra key="apiKey" value="0000"/>
<extra key="packageName" value="foo"/>
<response key="fake"/>
<field>
<header>
<text>
<locale id="case_lists.m0.callout.header"/>
</text>
</header>
<template>
<text>
<xpath function="@case_id"/>
</text>
</template>
</field>
</lookup>
</partial>
"""
self.assertXmlPartialEqual(
expected,
factory.app.create_suite(),
"./detail[@id='m0_case_short']/lookup"
)
|
2,252 |
open
|
"""Generic interface to all dbm clones.
Use
import dbm
d = dbm.open(file, 'w', 0o666)
The returned object is a dbm.gnu, dbm.ndbm or dbm.dumb object, dependent on the
type of database being opened (determined by the whichdb function) in the case
of an existing dbm. If the dbm does not exist and the create or new flag ('c'
or 'n') was specified, the dbm type will be determined by the availability of
the modules (tested in the above order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, and add interfaces to other dbm-like
implementations.
"""
__all__ = ['open', 'whichdb', 'error']
import io
import os
import struct
import sys
class error(Exception):
pass
_names = ['dbm.gnu', 'dbm.ndbm', 'dbm.dumb']
_defaultmod = None
_modules = {}
error = (error, OSError)
try:
from dbm import ndbm
except ImportError:
ndbm = None
def METHOD_NAME(file, flag='r', mode=0o666):
"""Open or create database at path given by *file*.
Optional argument *flag* can be 'r' (default) for read-only access, 'w'
for read-write access of an existing database, 'c' for read-write access
to a new or existing database, and 'n' for read-write access to a new
database.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
global _defaultmod
if _defaultmod is None:
for name in _names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
if not _defaultmod:
_defaultmod = mod
_modules[name] = mod
if not _defaultmod:
raise ImportError("no dbm clone found; tried %s" % _names)
# guess the type of an existing database, if not creating a new one
result = whichdb(file) if 'n' not in flag else None
if result is None:
# db doesn't exist or 'n' flag was specified to create a new db
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new flag was used so use default type
mod = _defaultmod
else:
raise error[0]("need 'c' or 'n' flag to open new db")
elif result == "":
# db type cannot be determined
raise error[0]("db type could not be determined")
elif result not in _modules:
raise error[0]("db type is {0}, but the module is not "
"available".format(result))
else:
mod = _modules[result]
return mod.METHOD_NAME(file, flag, mode)
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for ndbm first -- this has a .pag and a .dir file
try:
f = io.METHOD_NAME(filename + ".pag", "rb")
f.close()
f = io.METHOD_NAME(filename + ".dir", "rb")
f.close()
return "dbm.ndbm"
except OSError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the bsd checks
try:
f = io.METHOD_NAME(filename + ".db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if ndbm is not None:
d = ndbm.METHOD_NAME(filename)
d.close()
return "dbm.ndbm"
except OSError:
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + ".dat")
size = os.stat(filename + ".dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dbm.dumb"
f = io.METHOD_NAME(filename + ".dir", "rb")
try:
if f.read(1) in (b"'", b'"'):
return "dbm.dumb"
finally:
f.close()
except OSError:
pass
# See if the file exists, return None if not
try:
f = io.METHOD_NAME(filename, "rb")
except OSError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic in (0x13579ace, 0x13579acd, 0x13579acf):
return "dbm.gnu"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print(whichdb(filename) or "UNKNOWN", filename)
|
2,253 |
is gossipsub
|
import anyio
from async_exit_stack import AsyncExitStack
from p2pclient.datastructures import StreamInfo
from p2pclient.utils import get_unused_tcp_port
import pytest
import trio
from libp2p.io.abc import ReadWriteCloser
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
from libp2p.security.secio.transport import ID as SECIO_PROTOCOL_ID
from libp2p.tools.factories import HostFactory, PubsubFactory
from libp2p.tools.interop.daemon import make_p2pd
from libp2p.tools.interop.utils import connect
@pytest.fixture(params=[NOISE_PROTOCOL_ID, SECIO_PROTOCOL_ID])
def security_protocol(request):
return request.param
@pytest.fixture
def num_p2pds():
return 1
@pytest.fixture
def METHOD_NAME():
return True
@pytest.fixture
def is_pubsub_signing():
return True
@pytest.fixture
def is_pubsub_signing_strict():
return True
@pytest.fixture
async def p2pds(
num_p2pds,
security_protocol,
METHOD_NAME,
is_pubsub_signing,
is_pubsub_signing_strict,
):
async with AsyncExitStack() as stack:
p2pds = [
await stack.enter_async_context(
make_p2pd(
get_unused_tcp_port(),
get_unused_tcp_port(),
security_protocol,
METHOD_NAME=METHOD_NAME,
is_pubsub_signing=is_pubsub_signing,
is_pubsub_signing_strict=is_pubsub_signing_strict,
)
)
for _ in range(num_p2pds)
]
try:
yield p2pds
finally:
for p2pd in p2pds:
await p2pd.close()
@pytest.fixture
async def pubsubs(num_hosts, security_protocol, METHOD_NAME, is_pubsub_signing_strict):
if METHOD_NAME:
yield PubsubFactory.create_batch_with_gossipsub(
num_hosts,
security_protocol=security_protocol,
strict_signing=is_pubsub_signing_strict,
)
else:
yield PubsubFactory.create_batch_with_floodsub(
num_hosts, security_protocol, strict_signing=is_pubsub_signing_strict
)
class DaemonStream(ReadWriteCloser):
stream_info: StreamInfo
stream: anyio.abc.SocketStream
def __init__(self, stream_info: StreamInfo, stream: anyio.abc.SocketStream) -> None:
self.stream_info = stream_info
self.stream = stream
async def close(self) -> None:
await self.stream.close()
async def read(self, n: int = None) -> bytes:
return await self.stream.receive_some(n)
async def write(self, data: bytes) -> None:
return await self.stream.send_all(data)
@pytest.fixture
async def is_to_fail_daemon_stream():
return False
@pytest.fixture
async def py_to_daemon_stream_pair(p2pds, security_protocol, is_to_fail_daemon_stream):
async with HostFactory.create_batch_and_listen(
1, security_protocol=security_protocol
) as hosts:
assert len(p2pds) >= 1
host = hosts[0]
p2pd = p2pds[0]
protocol_id = "/protocol/id/123"
stream_py = None
stream_daemon = None
event_stream_handled = trio.Event()
await connect(host, p2pd)
async def daemon_stream_handler(stream_info, stream):
nonlocal stream_daemon
stream_daemon = DaemonStream(stream_info, stream)
event_stream_handled.set()
await trio.lowlevel.checkpoint()
await p2pd.control.stream_handler(protocol_id, daemon_stream_handler)
# Sleep for a while to wait for the handler being registered.
await trio.sleep(0.01)
if is_to_fail_daemon_stream:
# FIXME: This is a workaround to make daemon reset the stream.
# We intentionally close the listener on the python side, it makes the connection from
# daemon to us fail, and therefore the daemon resets the opened stream on their side.
# Reference: https://github.com/libp2p/go-libp2p-daemon/blob/b95e77dbfcd186ccf817f51e95f73f9fd5982600/stream.go#L47-L50 # noqa: E501
# We need it because we want to test against `stream_py` after the remote side(daemon)
# is reset. This should be removed after the API `stream.reset` is exposed in daemon
# some day.
await p2pds[0].control.control.close()
stream_py = await host.new_stream(p2pd.peer_id, [protocol_id])
if not is_to_fail_daemon_stream:
await event_stream_handled.wait()
# NOTE: If `is_to_fail_daemon_stream == True`, then `stream_daemon == None`.
yield stream_py, stream_daemon
|
2,254 |
use ai pipeline params
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# generate default secret name
import os
import kfp
from kfp import components
from kfp import dsl
secret_name = 'kfp-creds'
configuration_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/ibm-components/commons/config/component.yaml')
train_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/ibm-components/watson/train/component.yaml')
store_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/ibm-components/watson/store/component.yaml')
deploy_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/ibm-components/watson/deploy/component.yaml')
# Helper function for secret mount and image pull policy
def METHOD_NAME(secret_name, secret_volume_mount_path='/app/secrets', image_pull_policy='IfNotPresent'):
def _use_ai_pipeline_params(task):
from kubernetes import client as k8s_client
task = task.add_volume(k8s_client.V1Volume(name=secret_name, # secret_name as volume name
secret=k8s_client.V1SecretVolumeSource(secret_name=secret_name)))
task.container.add_volume_mount(k8s_client.V1VolumeMount(mount_path=secret_volume_mount_path,
name=secret_name))
task.container.set_image_pull_policy(image_pull_policy)
return task
return _use_ai_pipeline_params
# create pipelines
@dsl.pipeline(
name='KFP on WML training',
description='Kubeflow pipelines running on WML performing tensorflow image recognition.'
)
def kfp_wml_pipeline(
GITHUB_TOKEN='',
CONFIG_FILE_URL='https://raw.githubusercontent.com/user/repository/branch/creds.ini',
train_code='tf-model.zip',
execution_command='\'python3 convolutional_network.py --trainImagesFile ${DATA_DIR}/train-images-idx3-ubyte.gz --trainLabelsFile ${DATA_DIR}/train-labels-idx1-ubyte.gz --testImagesFile ${DATA_DIR}/t10k-images-idx3-ubyte.gz --testLabelsFile ${DATA_DIR}/t10k-labels-idx1-ubyte.gz --learningRate 0.001 --trainingIters 20000\'',
framework='tensorflow',
framework_version='1.15',
runtime = 'python',
runtime_version='3.6',
run_definition = 'wml-tensorflow-definition',
run_name = 'wml-tensorflow-run',
model_name='wml-tensorflow-mnist',
scoring_payload='tf-mnist-test-payload.json',
compute_name='k80',
compute_nodes='1'
):
# op1 - this operation will create the credentials as secrets to be used by other operations
get_configuration = configuration_op(
token=GITHUB_TOKEN,
url=CONFIG_FILE_URL,
name=secret_name
)
# op2 - this operation trains the model with the model codes and data saved in the cloud object store
wml_train = train_op(
config=get_configuration.output,
train_code=train_code,
execution_command=execution_command,
framework=framework,
framework_version=framework_version,
runtime=runtime,
runtime_version=runtime_version,
run_definition=run_definition,
run_name=run_name,
compute_name=compute_name,
compute_nodes=compute_nodes
).apply(METHOD_NAME(secret_name, image_pull_policy='Always'))
# op3 - this operation stores the model trained above
wml_store = store_op(
wml_train.outputs['run_uid'],
model_name,
framework=framework,
framework_version=framework_version,
runtime_version=runtime_version
).apply(METHOD_NAME(secret_name, image_pull_policy='Always'))
# op4 - this operation deploys the model to a web service and run scoring with the payload in the cloud object store
wml_deploy = deploy_op(
wml_store.output,
model_name,
scoring_payload
).apply(METHOD_NAME(secret_name, image_pull_policy='Always'))
if __name__ == '__main__':
# compile the pipeline
import kfp.compiler as compiler
pipeline_filename = kfp_wml_pipeline.__name__ + '.zip'
compiler.Compiler().compile(kfp_wml_pipeline, pipeline_filename)
|
2,255 |
update gui
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
from gi.repository import GLib, Gtk, Pango
import gpodder
from gpodder import util
from gpodder.gtkui.widgets import SpinningProgressIndicator
_ = gpodder.gettext
class ProgressIndicator(object):
# Delayed time until window is shown (for short operations)
DELAY = 500
# Time between GUI updates after window creation
INTERVAL = 250
def __init__(self, title, subtitle=None, cancellable=False, parent=None, max_ticks=None):
self.title = title
self.subtitle = subtitle
self.cancellable = True if cancellable else False
self.cancel_callback = cancellable
self.cancel_id = 0
self.cancelled = False
self.next_update = time.time() + (self.DELAY / 1000)
self.parent = parent
self.dialog = None
self.progressbar = None
self.indicator = None
self._initial_message = None
self._initial_progress = None
self._progress_set = False
# use timeout_add, not util.idle_timeout_add, so it updates before Gtk+ redraws the dialog
self.source_id = GLib.timeout_add(self.DELAY, self._create_progress)
self.set_max_ticks(max_ticks)
def set_max_ticks(self, max_ticks):
self.max_ticks = max_ticks
self.tick_counter = 0
if max_ticks is not None:
self.on_message('0 / %d' % max_ticks)
def _on_delete_event(self, window, event):
if self.cancellable:
self.dialog.response(Gtk.ResponseType.CANCEL)
self.cancellable = False
self.cancelled = True
return True
def _create_progress(self):
self.dialog = Gtk.MessageDialog(self.parent,
0, 0, Gtk.ButtonsType.CANCEL, self.subtitle or self.title)
self.dialog.set_modal(True)
self.dialog.connect('delete-event', self._on_delete_event)
if self.cancellable:
def cancel_callback(dialog, response):
self.cancellable = False
self.cancelled = True
self.dialog.set_deletable(False)
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, False)
if callable(self.cancel_callback):
self.cancel_callback(dialog, response)
self.cancel_id = self.dialog.connect('response', cancel_callback)
self.dialog.set_title(self.title)
self.dialog.set_deletable(self.cancellable)
# Avoid selectable text (requires PyGTK >= 2.22)
if hasattr(self.dialog, 'get_message_area'):
for label in self.dialog.get_message_area():
if isinstance(label, Gtk.Label):
label.set_selectable(False)
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, self.cancellable)
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_show_text(True)
self.progressbar.set_ellipsize(Pango.EllipsizeMode.END)
# If the window is shown after the first update, set the progress
# info so that when the window appears, data is there already
if self._initial_progress is not None:
self.progressbar.set_fraction(self._initial_progress)
if self._initial_message is not None:
self.progressbar.set_text(self._initial_message)
self.dialog.vbox.add(self.progressbar)
self.indicator = SpinningProgressIndicator()
self.dialog.set_image(self.indicator)
self.dialog.show_all()
self.METHOD_NAME()
# previous self.source_id timeout is removed when this returns False
# use timeout_add, not util.idle_timeout_add, so it updates before Gtk+ redraws the dialog
self.source_id = GLib.timeout_add(self.INTERVAL, self.METHOD_NAME)
return False
def METHOD_NAME(self):
if self.indicator:
self.indicator.step_animation()
if not self._progress_set and self.progressbar:
self.progressbar.pulse()
self.next_update = time.time() + (self.INTERVAL / 1000)
return True
def on_message(self, message):
if self.progressbar:
self.progressbar.set_text(message)
else:
self._initial_message = message
def on_progress(self, progress):
self._progress_set = True
if self.progressbar:
self.progressbar.set_fraction(progress)
else:
self._initial_progress = progress
def on_tick(self, final=False):
if final:
# Dialog is no longer cancellable
self.cancellable = False
if self.dialog is not None:
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, False)
self.dialog.set_deletable(False)
elif 2 * (time.time() - (self.next_update - (self.DELAY / 1000))) > (self.DELAY / 1000):
# Assume final operation will take as long as all ticks and open dialog
if self.source_id:
GLib.source_remove(self.source_id)
self._create_progress()
if self.max_ticks is not None and not final:
self.tick_counter += 1
if time.time() >= self.next_update or (final and self.dialog):
if isinstance(final, str):
self.on_message(final)
self.on_progress(1.0)
elif self.max_ticks is not None:
self.on_message('%d / %d' % (self.tick_counter, self.max_ticks))
self.on_progress(self.tick_counter / self.max_ticks)
# Allow UI to redraw.
util.idle_add(Gtk.main_quit)
# self._create_progress() or self._update_gui() is called by a timer to update the dialog
Gtk.main()
if self.cancelled:
return False
return True
def on_finished(self):
if self.dialog is not None:
if self.cancel_id:
self.dialog.disconnect(self.cancel_id)
self.dialog.destroy()
if self.source_id:
GLib.source_remove(self.source_id)
|
2,256 |
critic fn
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC networks definition."""
import dataclasses
from typing import Optional, Tuple
from acme import core
from acme import specs
from acme.agents.jax import actor_core as actor_core_lib
from acme.jax import networks as networks_lib
from acme.jax import types
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass
class SACNetworks:
"""Network and pure functions for the SAC agent.."""
policy_network: networks_lib.FeedForwardNetwork
q_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
def default_models_to_snapshot(
networks: SACNetworks,
spec: specs.EnvironmentSpec):
"""Defines default models to be snapshotted."""
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.zeros_like(spec.actions)
dummy_key = jax.random.PRNGKey(0)
def q_network(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['critic'])[0]
return types.ModelToSnapshot(
networks.q_network.apply, params,
{'obs': dummy_obs, 'action': dummy_action})
def default_training_actor(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['policy'])[0]
return types.ModelToSnapshot(apply_policy_and_sample(networks, False),
params,
{'key': dummy_key, 'obs': dummy_obs})
def default_eval_actor(
source: core.VariableSource) -> types.ModelToSnapshot:
params = source.get_variables(['policy'])[0]
return types.ModelToSnapshot(
apply_policy_and_sample(networks, True), params,
{'key': dummy_key, 'obs': dummy_obs})
return {
'q_network': q_network,
'default_training_actor': default_training_actor,
'default_eval_actor': default_eval_actor,
}
def apply_policy_and_sample(
networks: SACNetworks,
eval_mode: bool = False) -> actor_core_lib.FeedForwardPolicy:
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
return sample_fn(networks.policy_network.apply(params, obs), key)
return apply_and_sample
def make_networks(
spec: specs.EnvironmentSpec,
hidden_layer_sizes: Tuple[int, ...] = (256, 256)) -> SACNetworks:
"""Creates networks used by the agent."""
num_dimensions = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs):
network = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes),
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu,
activate_final=True),
networks_lib.NormalTanhDistribution(num_dimensions),
])
return network(obs)
def METHOD_NAME(obs, action):
network1 = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu),
])
network2 = hk.Sequential([
hk.nets.MLP(
list(hidden_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
activation=jax.nn.relu),
])
input_ = jnp.concatenate([obs, action], axis=-1)
value1 = network1(input_)
value2 = network2(input_)
return jnp.concatenate([value1, value2], axis=-1)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
critic = hk.without_apply_rng(hk.transform(METHOD_NAME))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return SACNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply),
q_network=networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply),
log_prob=lambda params, actions: params.log_prob(actions),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode())
|
2,257 |
test edit with all parameters
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import date, datetime, timezone
from . import Framework
class Milestone(Framework.TestCase):
def setUp(self):
super().setUp()
self.milestone = self.g.get_user().get_repo("PyGithub").get_milestone(1)
def testAttributes(self):
self.assertEqual(self.milestone.closed_issues, 2)
self.assertEqual(
self.milestone.created_at,
datetime(2012, 3, 8, 12, 22, 10, tzinfo=timezone.utc),
)
self.assertEqual(self.milestone.description, "")
self.assertEqual(
self.milestone.due_on,
datetime(2012, 3, 13, 7, 0, 0, tzinfo=timezone.utc),
)
self.assertEqual(self.milestone.id, 93546)
self.assertEqual(self.milestone.number, 1)
self.assertEqual(self.milestone.open_issues, 0)
self.assertEqual(self.milestone.state, "closed")
self.assertEqual(self.milestone.title, "Version 0.4")
self.assertEqual(
self.milestone.url,
"https://api.github.com/repos/jacquev6/PyGithub/milestones/1",
)
self.assertEqual(self.milestone.creator.login, "jacquev6")
self.assertEqual(repr(self.milestone), 'Milestone(title="Version 0.4", number=1)')
def testEditWithMinimalParameters(self):
self.milestone.edit("Title edited by PyGithub")
self.assertEqual(self.milestone.title, "Title edited by PyGithub")
def METHOD_NAME(self):
self.milestone.edit(
"Title edited twice by PyGithub",
"closed",
"Description edited by PyGithub",
due_on=date(2012, 6, 16),
)
self.assertEqual(self.milestone.title, "Title edited twice by PyGithub")
self.assertEqual(self.milestone.state, "closed")
self.assertEqual(self.milestone.description, "Description edited by PyGithub")
self.assertEqual(
self.milestone.due_on,
datetime(2012, 6, 16, 7, 0, 0, tzinfo=timezone.utc),
)
def testGetLabels(self):
self.assertListKeyEqual(
self.milestone.get_labels(),
lambda l: l.name,
["Public interface", "Project management"],
)
def testDelete(self):
self.milestone.delete()
|
2,258 |
test create annotation ajax
|
from json import loads
from django.http.response import Http404
from django.test.client import RequestFactory
from django.test.testcases import TestCase
from mediathread.djangosherd.models import SherdNote
from mediathread.djangosherd.views import delete_annotation, edit_annotation, \
create_annotation
from mediathread.factories import MediathreadTestMixin, AssetFactory, \
SherdNoteFactory, ProjectFactory
from mediathread.projects.models import ProjectNote
class SherdNoteViewTest(MediathreadTestMixin, TestCase):
def setUp(self):
self.setup_sample_course()
self.asset = AssetFactory(course=self.sample_course)
self.data = {'annotation-title': 'Annotation Test',
'annotation-body': 'notes go here',
'annotation-annotation_data': '',
'annotation-context_pk': self.asset.id,
'annotation-range1': -4.5, 'annotation-range2': 23,
'annotation-tags': 'foo,bar', 'next': 'foo'}
def test_create_annotation(self):
request = RequestFactory().post('/', self.data)
request.user = self.student_one
response = create_annotation(request)
self.assertEquals(response.status_code, 302)
note = SherdNote.objects.get(title='Annotation Test')
self.assertEquals(note.range1, -4.5)
self.assertEquals(note.range2, 23)
self.assertEquals(note.tags, 'foo,bar')
def test_create_annotation_with_project(self):
project = ProjectFactory()
self.data['project'] = project.id
request = RequestFactory().post('/', self.data)
request.user = self.student_one
response = create_annotation(request)
self.assertEquals(response.status_code, 302)
note = SherdNote.objects.get(title='Annotation Test')
ProjectNote.objects.get(annotation=note, project=project)
def METHOD_NAME(self):
request = RequestFactory().post('/', self.data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.user = self.student_one
response = create_annotation(request)
self.assertEquals(response.status_code, 200)
note = SherdNote.objects.get(title='Annotation Test')
the_json = loads(response.content)
self.assertEquals(the_json['asset']['id'], self.asset.id)
self.assertEquals(the_json['annotation']['id'], note.id)
def test_delete_annotation(self):
note = SherdNoteFactory(
asset=self.asset, author=self.student_one,
title='Selection', range1=116.25, range2=6.75)
request = RequestFactory().post('/', {'next': 'foo'})
request.user = self.student_two
response = delete_annotation(request, note.id)
self.assertEquals(response.status_code, 403)
request.user = self.student_one
response = delete_annotation(request, note.id)
self.assertEquals(response.status_code, 302)
with self.assertRaises(SherdNote.DoesNotExist):
SherdNote.objects.get(title='Selection')
def test_edit_annotation(self):
note = SherdNoteFactory(
asset=self.asset, author=self.student_one,
title='Selection', range1=116.25, range2=6.75)
data = {'annotation-range1': -4.5,
'annotation-tags': 'foo,bar', 'next': 'foo'}
request = RequestFactory().post('/', data)
request.user = self.student_two
with self.assertRaises(Http404):
edit_annotation(request, 123)
response = edit_annotation(request, note.id)
self.assertEquals(response.status_code, 403)
# via post
request.user = self.student_one
response = edit_annotation(request, note.id)
self.assertEquals(response.status_code, 302)
note.refresh_from_db()
self.assertEquals(note.range1, -4.5)
self.assertEquals(note.tags, 'foo,bar')
# via ajax
data = {'annotation-range2': 7}
request = RequestFactory().post('/', data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.user = self.student_one
response = edit_annotation(request, note.id)
self.assertEquals(response.status_code, 200)
the_json = loads(response.content)
self.assertEquals(the_json['asset']['id'], self.asset.id)
self.assertEquals(the_json['annotation']['id'], note.id)
note.refresh_from_db()
self.assertEquals(note.range2, 7)
|
2,259 |
is before
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2011 (ita)
"""
Common mistakes highlighting.
There is a performance impact, so this tool is only loaded when running ``waf -v``
"""
typos = {
'feature':'features',
'sources':'source',
'targets':'target',
'include':'includes',
'export_include':'export_includes',
'define':'defines',
'importpath':'includes',
'installpath':'install_path',
'iscopy':'is_copy',
'uses':'use',
}
meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects']
import sys
from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils
from waflib.Tools import ccroot
def check_same_targets(self):
mp = Utils.defaultdict(list)
uids = {}
def check_task(tsk):
if not isinstance(tsk, Task.Task):
return
if hasattr(tsk, 'no_errcheck_out'):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except KeyError:
uids[tsk.uid()] = [tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
# raised if not a task generator, which should be uncommon
check_task(tg)
dupe = False
for (k, v) in mp.items():
if len(v) > 1:
dupe = True
msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "")
Logs.error(msg)
for x in v:
if Logs.verbose > 1:
Logs.error(' %d. %r', 1 + v.index(x), x.generator)
else:
Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None))
Logs.error('If you think that this is an error, set no_errcheck_out on the task instance')
if not dupe:
for (k, v) in uids.items():
if len(v) > 1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid')
tg_details = tsk.generator.name
if Logs.verbose > 2:
tg_details = tsk.generator
for tsk in v:
Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details)
def check_invalid_constraints(self):
feat = set()
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for (x, y) in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext = set()
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid = ext & feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid))
# the build scripts have been read, so we can check for invalid after/before attributes on task classes
for cls in list(Task.classes.values()):
if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str):
raise Errors.WafError('Class %r has hcode value %r of type <str>, expecting <bytes> (use Utils.h_cmd() ?)' % (cls, cls.hcode))
for x in ('before', 'after'):
for y in Utils.to_list(getattr(cls, x, [])):
if not Task.classes.get(y):
Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__)
if getattr(cls, 'rule', None):
Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__)
def replace(m):
"""
Replaces existing BuildContext methods to verify parameter names,
for example ``bld(source=)`` has no ending *s*
"""
oldcall = getattr(Build.BuildContext, m)
def call(self, *k, **kw):
ret = oldcall(self, *k, **kw)
for x in typos:
if x in kw:
if x == 'iscopy' and 'subst' in getattr(self, 'features', ''):
continue
Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret)
return ret
setattr(Build.BuildContext, m, call)
def enhance_lib():
"""
Modifies existing classes and methods to enable error verification
"""
for m in meths_typos:
replace(m)
# catch '..' in ant_glob patterns
def ant_glob(self, *k, **kw):
if k:
lst = Utils.to_list(k[0])
for pat in lst:
sp = pat.split('/')
if '..' in sp:
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0])
if '.' in sp:
Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0])
return self.old_ant_glob(*k, **kw)
Node.Node.old_ant_glob = Node.Node.ant_glob
Node.Node.ant_glob = ant_glob
# catch ant_glob on build folders
def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False):
if remove:
try:
if self.is_child_of(self.ctx.bldnode) and not quiet:
quiet = True
Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self)
except AttributeError:
pass
return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet)
Node.Node.old_ant_iter = Node.Node.ant_iter
Node.Node.ant_iter = ant_iter
# catch conflicting ext_in/ext_out/before/after declarations
old = Task.METHOD_NAME
def METHOD_NAME(t1, t2):
ret = old(t1, t2)
if ret and old(t2, t1):
Logs.error('Contradictory order constraints in classes %r %r', t1, t2)
return ret
Task.METHOD_NAME = METHOD_NAME
# check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose
# so we only issue a warning
def check_err_features(self):
lst = self.to_list(self.features)
if 'shlib' in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib')
for x in ('c', 'cxx', 'd', 'fc'):
if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]:
Logs.error('%r features is probably missing %r', self, x)
TaskGen.feature('*')(check_err_features)
# check for erroneous order constraints
def check_err_order(self):
if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features):
for x in ('before', 'after', 'ext_in', 'ext_out'):
if hasattr(self, x):
Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self)
else:
for x in ('before', 'after'):
for y in self.to_list(getattr(self, x, [])):
if not Task.classes.get(y):
Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self)
TaskGen.feature('*')(check_err_order)
# check for @extension used with @feature/@before_method/@after_method
def check_compile(self):
check_invalid_constraints(self)
try:
ret = self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile = Build.BuildContext.compile
Build.BuildContext.compile = check_compile
# check for invalid build groups #914
def use_rec(self, name, **kw):
try:
y = self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx = self.bld.get_group_idx(self)
odx = self.bld.get_group_idx(y)
if odx > idx:
msg = "Invalid 'use' across build groups:"
if Logs.verbose > 1:
msg += '\n target %r\n uses:\n %r' % (self, y)
else:
msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name)
raise Errors.WafError(msg)
self.orig_use_rec(name, **kw)
TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec = use_rec
# check for env.append
def _getattr(self, name, default=None):
if name == 'append' or name == 'add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name == 'prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return super(ConfigSet.ConfigSet, self).__getattr__(name, default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__ = _getattr
def options(opt):
"""
Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options
"""
enhance_lib()
|
2,260 |
pre operations
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"redisenterprise operation-status show",
)
class Show(AAZCommand):
"""Get the status of operation.
:example: Get the status of an operation
az redisenterprise operation-status show --operation-id "testoperationid" --location "West US"
"""
_aaz_info = {
"version": "2023-03-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.cache/locations/{}/operationsstatus/{}", "2023-03-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
required=True,
id_part="name",
)
_args_schema.operation_id = AAZStrArg(
options=["-n", "--name", "--operation-id"],
help="The ID of an ongoing async operation.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
min_length=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
self.OperationsStatusGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class OperationsStatusGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Cache/locations/{location}/operationsStatus/{operationId}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"operationId", self.ctx.args.operation_id,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-03-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.end_time = AAZStrType(
serialized_name="endTime",
)
_schema_on_200.error = AAZObjectType()
_ShowHelper._build_schema_error_response_read(_schema_on_200.error)
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.start_time = AAZStrType(
serialized_name="startTime",
)
_schema_on_200.status = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_error_detail_read = None
@classmethod
def _build_schema_error_detail_read(cls, _schema):
if cls._schema_error_detail_read is not None:
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
return
cls._schema_error_detail_read = _schema_error_detail_read = AAZObjectType()
error_detail_read = _schema_error_detail_read
error_detail_read.additional_info = AAZListType(
serialized_name="additionalInfo",
flags={"read_only": True},
)
error_detail_read.code = AAZStrType(
flags={"read_only": True},
)
error_detail_read.details = AAZListType(
flags={"read_only": True},
)
error_detail_read.message = AAZStrType(
flags={"read_only": True},
)
error_detail_read.target = AAZStrType(
flags={"read_only": True},
)
additional_info = _schema_error_detail_read.additional_info
additional_info.Element = AAZObjectType()
_element = _schema_error_detail_read.additional_info.Element
_element.type = AAZStrType(
flags={"read_only": True},
)
details = _schema_error_detail_read.details
details.Element = AAZObjectType()
cls._build_schema_error_detail_read(details.Element)
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
_schema_error_response_read = None
@classmethod
def _build_schema_error_response_read(cls, _schema):
if cls._schema_error_response_read is not None:
_schema.error = cls._schema_error_response_read.error
return
cls._schema_error_response_read = _schema_error_response_read = AAZObjectType()
error_response_read = _schema_error_response_read
error_response_read.error = AAZObjectType()
cls._build_schema_error_detail_read(error_response_read.error)
_schema.error = cls._schema_error_response_read.error
__all__ = ["Show"]
|
2,261 |
test conv3d ndhwc tensorcore
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv3d_ndhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv3d_ndhwc_tensorcore, topi.cuda.schedule_conv3d_ndhwc_tensorcore)
}
def verify_conv3d_ndhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv3d with tensorcore for ndhwc layout"""
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel, kernel, kernel)
)
padding_sum = pad_front + pad_top + pad_left + pad_back + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
dtype = "float16"
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), dtype, name="A")
W = te.placeholder((kernel, kernel, kernel, in_channel, num_filter), dtype, name="W")
bias = te.placeholder((1, 1, 1, 1, num_filter), dtype, name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_conv3d_ndhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv3d_ndhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, 1, "float16")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
# Tensorcores are very inaccurate, with large shapes, the accumulation
# error is high especially away from 1. We disable atol as it is very
# large for these numbers that are far away from 1.
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e200, rtol=0.01)
check_device(devices)
@tvm.testing.requires_tensorcore
@tvm.testing.requires_cuda
def METHOD_NAME():
"""Test the conv3d with tensorcore for ndhwc layout"""
verify_conv3d_ndhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv3d_ndhwc(16, 64, 7, 64, 7, 1, 3)
verify_conv3d_ndhwc(16, 32, 7, 32, 7, 1, 3)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_bias=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, (3, 3, 3, 2, 2, 2))
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, "SAME")
verify_conv3d_ndhwc(8, 16, 35, 32, 5, 1, "VALID")
verify_conv3d_ndhwc(16, 32, 16, 32, 3, 1, (1, 1, 1, 1, 1, 1))
verify_conv3d_ndhwc(16, 16, 12, 16, 3, 1, (1, 1, 1, 1, 1, 1))
if __name__ == "__main__":
METHOD_NAME()
|
2,262 |
aci any targetattr ne
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2015 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
"""Aci class to help parse and create ACIs.
You will access this via the Entry Class.
"""
import ldap
# Helpers to detect common patterns in aci
def METHOD_NAME(aci):
"""Returns True if any of the targetattr types is a != type"""
potential = False
if 'targetattr' in aci.acidata:
for ta in aci.acidata['targetattr']:
if ta['equal'] is False:
# Got, it lets do this
potential = True
return potential
class Aci(object):
"""An object that helps to work with agreement entry
:param conn: An instance
:type conn: lib389.DirSrv
"""
def __init__(self, conn):
self.conn = conn
self.log = conn.log
def list(self, basedn, scope=ldap.SCOPE_SUBTREE):
"""List all acis in the directory server below the basedn confined by
scope.
:param basedn: Base DN
:type basedn: str
:param scope: ldap.SCOPE_SUBTREE, ldap.SCOPE_BASE,
ldap.SCOPE_ONELEVEL, ldap.SCOPE_SUBORDINATE
:type scope: int
:returns: A list of EntryAci objects
"""
acis = []
rawacientries = self.conn.search_s(basedn, scope, 'aci=*', ['aci'])
for rawacientry in rawacientries:
acis += rawacientry.getAcis()
return acis
def lint(self, basedn, scope=ldap.SCOPE_SUBTREE):
"""Validate and check for potential aci issues.
Given a scope and basedn, this will retrieve all the aci's below.
A number of checks are then run on the aci in isolation, and
in groups.
:param basedn: Base DN
:type basedn: str
:param scope: ldap.SCOPE_SUBTREE, ldap.SCOPE_BASE,
ldap.SCOPE_ONELEVEL, ldap.SCOPE_SUBORDINATE
:type scope: int
:returns: A tuple of (bool, list( dict ))
- Bool represents if the acis pass or fail as a whole.
- The list contains a list of warnings about your acis.
- The dict is structured as::
{
name: "" # DSALEXXXX
severity: "" # LOW MEDIUM HIGH
detail: "" # explination
}
"""
result = True
# Not thread safe!!!
self.warnings = []
acis = self.list(basedn, scope)
# Checks again "all acis" go here.
self._lint_dsale_0001_ne_internal(acis)
self._lint_dsale_0002_ne_mult_subtree(acis)
# checks again individual here
if len(self.warnings) > 0:
result = False
return (result, self.warnings)
def format_lint(self, warnings):
"""Takes the array of warnings and returns a formatted string.
:param warnings: The array of warnings
:type warnings: dict
:returns: Formatted string or warnings
"""
buf = "-------------------------------------------------------------------------------"
for warning in warnings:
buf += """
Directory Server Aci Lint Error: {DSALE}
Severity: {SEVERITY}
Affected Acis:
{ACIS}
Details: {DETAIL}
Advice: {FIX}
-------------------------------------------------------------------------------""".format(
DSALE=warning['dsale'],
SEVERITY=warning['severity'],
ACIS=warning['acis'],
DETAIL=warning['detail'],
FIX=warning['fix'],
)
return buf
# These are the aci lint checks.
def _lint_dsale_0001_ne_internal(self, acis):
"""Check for the presence of "not equals" attributes that will inadvertantly
allow the return / modification of internal attributes.
"""
affected = []
for aci in acis:
if 'targetattr' in aci.acidata:
for ta in aci.acidata['targetattr']:
if ta['equal'] is False:
affected.append(aci.acidata['rawaci'])
if len(affected) > 0:
self.warnings.append(
{
'dsale': 'DSALE0001',
'severity': 'HIGH',
'acis': "\n".join(affected),
'detail': """
An aci of the form "(targetAttr!="attr")" exists on your system. This aci
will internally be expanded to mean "all possible attributes including system,
excluding the listed attributes".
This may allow access to a bound user or anonymous to read more data about
directory internals, including aci state or user limits. In the case of write
acis it may allow a dn to set their own resource limits, unlock passwords or
their own aci.
The ability to change the aci on the object may lead to privilege escalation in
some cases.
""",
'fix': """
Convert the aci to the form "(targetAttr="x || y || z")".
"""
}
)
def _lint_dsale_0002_ne_mult_subtree(self, acis):
"""This check will show pairs or more of aci that match the same subtree
with a != rute. These can cause the other rule to be invalidated!
"""
affected = []
for aci in acis:
# The aci has to be a NE, else don't bother checking
if not METHOD_NAME(aci):
continue
affect = False
buf = "%s %s\n" % (aci.entry.dn, aci.acidata['rawaci'])
for aci_inner in acis:
if aci_inner == aci:
# Don't compare to self!
continue
# Check the inner is a not equal also
if not METHOD_NAME(aci_inner):
continue
# Check if the dn is a substring, ie child, or equal.
if aci.entry.dn.endswith(aci_inner.entry.dn):
# alias the allow rules
allow_inner = set(aci_inner.acidata['allow'][0]['values'])
allow_outer = set(aci.acidata['allow'][0]['values'])
if len(allow_inner & allow_outer):
buf += "|- %s %s\n" % (aci_inner.entry.dn, aci_inner.acidata['rawaci'])
affect = True
if affect:
affected.append(buf)
if len(affected) > 0:
self.warnings.append(
{
'dsale': 'DSALE0002',
'severity': 'HIGH',
'acis': "\n".join(affected),
'detail': """
Acis on your system exist which are both not equals targetattr, and overlap in
scope.
The way that directory server processes these, is to invert them to to white
lists, then union the results.
As a result, these acis *may* allow access to the attributes you want them to
exclude.
Consider:
aci: (targetattr !="cn")(version 3.0;acl "Self write all but cn";allow (write)
(userdn = "ldap:///self");)
aci: (targetattr !="sn")(version 3.0;acl "Self write all but sn";allow (write)
(userdn = "ldap:///self");)
This combination allows self write to *all* attributes within the subtree.
In cases where the target is members of a group, it may allow a member who is
within two groups to have elevated privilege.
""",
'fix': """
Convert the aci to the form "(targetAttr="x || y || z")".
Prevent the acis from overlapping, and have them on unique subtrees.
"""
}
)
|
2,263 |
to ranges
|
from datetime import date, timedelta
from typing import (
Callable,
Optional,
Sequence,
Tuple,
Union
)
from epiweeks import Week, Year
from typing_extensions import TypeAlias
from delphi.epidata.common.logger import get_structured_logger
# Alias for a sequence of date ranges (int, int) or date integers
IntRange: TypeAlias = Union[Tuple[int, int], int]
TimeValues: TypeAlias = Sequence[IntRange]
def time_value_to_day(value: int) -> date:
year, month, day = value // 10000, (value % 10000) // 100, value % 100
if year < date.min.year:
return date.min
if year > date.max.year:
return date.max
return date(year=year, month=month, day=day)
def time_value_to_week(value: int) -> Week:
year, week = value // 100, value % 100
if year < date.min.year:
return Week(date.min.year, 1)
if year > date.max.year - 1:
return Week(date.max.year - 1, 1) # minus 1 since internally it does some checks with a year + 1
return Week(year=year, week=week)
def guess_time_value_is_day(value: int) -> bool:
# YYYYMMDD type and not YYYYMM
return len(str(value)) == 8
def guess_time_value_is_week(value: int) -> bool:
# YYYYWW type and not YYYYMMDD
return len(str(value)) == 6
def day_to_time_value(d: date) -> int:
return int(d.strftime("%Y%m%d"))
def week_to_time_value(w: Week) -> int:
return w.year * 100 + w.week
def time_value_to_iso(value: int) -> str:
return time_value_to_day(value).strftime("%Y-%m-%d")
def shift_day_value(time_value: int, days: int) -> int:
if days == 0:
return time_value
d = time_value_to_day(time_value)
shifted = d + timedelta(days=days)
return day_to_time_value(shifted)
def shift_week_value(week_value: int, weeks: int) -> int:
if weeks == 0:
return week_value
week = time_value_to_week(week_value)
shifted = week + weeks
return week_to_time_value(shifted)
def days_in_range(range: Tuple[int, int]) -> int:
"""
returns the days within this time range
"""
start = time_value_to_day(range[0])
end = time_value_to_day(range[1])
delta = end - start
return delta.days + 1 # same date should lead to 1 day that will be queried
def weeks_in_range(week_range: Tuple[int, int]) -> int:
start = time_value_to_week(week_range[0])
end = time_value_to_week(week_range[1])
acc = end.week - start.week
# accumulate the number of weeks in the years between
for y in range(start.year, end.year):
year = Year(y)
acc += year.totalweeks()
return acc + 1 # same week should lead to 1 week that will be queried
def time_values_to_ranges(values: Optional[TimeValues]) -> Optional[TimeValues]:
"""
Converts a mixed list of dates and date ranges to an optimized list where dates are merged into ranges where possible.
e.g. [20200101, 20200102, (20200101, 20200104), 20200106] -> [(20200101, 20200104), 20200106]
(the first two values of the original list are merged into a single range)
"""
logger = get_structured_logger('server_utils')
if not values or len(values) <= 1:
logger.debug("List of dates looks like 0-1 elements, nothing to optimize", time_values=values)
return values
# determine whether the list is of days (YYYYMMDD) or weeks (YYYYWW) based on first element
first_element = values[0][0] if isinstance(values[0], tuple) else values[0]
if guess_time_value_is_day(first_element):
logger.debug("Treating time value as day", time_value=first_element)
return days_to_ranges(values)
elif guess_time_value_is_week(first_element):
logger.debug("Treating time value as week", time_value=first_element)
return weeks_to_ranges(values)
else:
logger.debug("Time value unclear, not optimizing", time_value=first_element)
return values
def days_to_ranges(values: TimeValues) -> TimeValues:
return METHOD_NAME(values, time_value_to_day, day_to_time_value, timedelta(days=1))
def weeks_to_ranges(values: TimeValues) -> TimeValues:
return METHOD_NAME(values, time_value_to_week, week_to_time_value, 1)
def METHOD_NAME(values: TimeValues, value_to_date: Callable, date_to_value: Callable, time_unit: Union[int, timedelta]) -> TimeValues:
try:
intervals = []
# populate list of intervals based on original date/week values
for v in values:
if isinstance(v, int):
# 20200101 -> [20200101, 20200101]
intervals.append([value_to_date(v), value_to_date(v)])
else: # tuple
# (20200101, 20200102) -> [20200101, 20200102]
intervals.append([value_to_date(v[0]), value_to_date(v[1])])
intervals.sort()
# merge overlapping intervals https://leetcode.com/problems/merge-intervals/
merged = []
for interval in intervals:
# no overlap, append the interval
# caveat: we subtract 1 from interval[0] so that contiguous intervals are considered overlapping. i.e. [1, 1], [2, 2] -> [1, 2]
if not merged or merged[-1][1] < interval[0] - time_unit:
merged.append(interval)
# overlap, merge the current and previous intervals
else:
merged[-1][1] = max(merged[-1][1], interval[1])
# convert intervals from dates/weeks back to integers
ranges = []
for m in merged:
if m[0] == m[1]:
ranges.append(date_to_value(m[0]))
else:
ranges.append((date_to_value(m[0]), date_to_value(m[1])))
get_structured_logger('server_utils').debug("Optimized list of date values", original=values, optimized=ranges, original_length=len(values), optimized_length=len(ranges))
return ranges
except Exception as e:
get_structured_logger('server_utils').debug('bad input to date ranges', time_values=values, exception=e)
return values
|
2,264 |
test update individual data
|
from django.test import TestCase, override_settings
from mock import Mock, PropertyMock, call, patch
from people.tests.factories import PersonFactory
from twitterbot.management.twitter import TwitterAPIData
def fake_twitter_api_post(*args, **kwargs):
data = kwargs["data"]
mock_result = Mock()
if "screen_name" in data:
if data["screen_name"] == "mhl20,struan":
mock_result.json.return_value = [
{"id": 1234, "screen_name": "mhl20"},
{"id": 5678, "screen_name": "struan"},
]
return mock_result
if data["screen_name"] == "symroe":
mock_result.json.return_value = [
{"id": 9012, "screen_name": "symroe"}
]
return mock_result
if data["screen_name"] == "onlynonexistent":
mock_result.json.return_value = {
"errors": [
{
"code": 17,
"message": "No user matches for specified terms.",
}
]
}
return mock_result
elif "user_id" in data:
if data["user_id"] == "42":
mock_result.json.return_value = [
{"id": 42, "screen_name": "FooBarBazQuux"}
]
return mock_result
if data["user_id"] == "13984716923847632":
mock_result.json.return_value = {
"errors": [
{
"code": 17,
"message": "No user matches for specified terms.",
}
]
}
return mock_result
raise Exception("No Twitter API stub for {} {}".format(args, kwargs))
class TestTwitterData(TestCase):
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN=None)
def test_error_on_missing_token(self):
with self.assertRaisesRegex(
Exception, r"TWITTER_APP_ONLY_BEARER_TOKEN was not set"
):
TwitterAPIData()
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
@patch("twitterbot.management.twitter.requests")
def test_makes_requests(self, mock_requests):
TwitterAPIData.MAX_IN_A_REQUEST = 2
twitter_data = TwitterAPIData()
mock_requests.post.side_effect = fake_twitter_api_post
twitter_results = list(
twitter_data.twitter_results(
"screen_name", ["mhl20", "struan", "symroe"]
)
)
self.assertEqual(
mock_requests.post.mock_calls,
[
call(
"https://api.twitter.com/1.1/users/lookup.json",
headers={"Authorization": "Bearer madeuptoken"},
data={"screen_name": "mhl20,struan"},
),
call(
"https://api.twitter.com/1.1/users/lookup.json",
headers={"Authorization": "Bearer madeuptoken"},
data={"screen_name": "symroe"},
),
],
)
self.assertEqual(
twitter_results,
[
{"id": 1234, "screen_name": "mhl20"},
{"id": 5678, "screen_name": "struan"},
{"id": 9012, "screen_name": "symroe"},
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
@patch("twitterbot.management.twitter.requests")
def test_zero_results_for_screen_name_lookup(self, mock_requests):
twitter_data = TwitterAPIData()
mock_requests.post.side_effect = fake_twitter_api_post
twitter_results = list(
twitter_data.twitter_results("screen_name", ["onlynonexistent"])
)
self.assertEqual(twitter_results, [])
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
@patch("twitterbot.management.twitter.requests")
def test_zero_results_for_user_id_lookup(self, mock_requests):
twitter_data = TwitterAPIData()
mock_requests.post.side_effect = fake_twitter_api_post
twitter_results = list(
twitter_data.twitter_results("user_id", ["13984716923847632"])
)
self.assertEqual(twitter_results, [])
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_all_screen_names(self):
joe = PersonFactory.create(id=1, name="Joe Bloggs")
joe.tmp_person_identifiers.create(
value="joenotreallyatwitteraccount", value_type="twitter_username"
)
jane = PersonFactory.create(id=2, name="Jane Bloggs")
jane.tmp_person_identifiers.create(
value="janenotreallyatwitteraccount", value_type="twitter_username"
)
twitter_data = TwitterAPIData()
self.assertEqual(
["janenotreallyatwitteraccount", "joenotreallyatwitteraccount"],
sorted(twitter_data.all_screen_names),
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def tests_all_user_ids(self):
joe = PersonFactory.create(id=1, name="Joe Bloggs")
joe.tmp_person_identifiers.create(
internal_identifier="246", value_type="twitter_username"
)
jane = PersonFactory.create(id=2, name="Jane Bloggs")
jane.tmp_person_identifiers.create(
internal_identifier="357", value_type="twitter_username"
)
twitter_data = TwitterAPIData()
self.assertEqual(["246", "357"], sorted(twitter_data.all_user_ids))
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def METHOD_NAME(self):
twitter_data = TwitterAPIData()
twitter_data.update_id_mapping(
{
"id": 42,
"screen_name": "FooBarBazQuux",
"profile_image_url_https": "https://example.com/foo.jpg",
}
)
self.assertEqual(
twitter_data.screen_name_to_user_id, {"foobarbazquux": "42"}
)
self.assertEqual(
twitter_data.user_id_to_screen_name, {"42": "FooBarBazQuux"}
)
self.assertEqual(
twitter_data.user_id_to_photo_url,
{"42": "https://example.com/foo.jpg"},
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
@patch("twitterbot.management.twitter.requests")
@patch("twitterbot.management.twitter.TwitterAPIData.update_id_mapping")
@patch(
"twitterbot.management.twitter.TwitterAPIData.all_user_ids",
new_callable=PropertyMock,
)
@patch(
"twitterbot.management.twitter.TwitterAPIData.all_screen_names",
new_callable=PropertyMock,
)
def test_update_from_api(
self,
mock_all_screen_names,
mock_all_user_ids,
mock_update_id_mapping,
mock_requests,
):
mock_requests.post.side_effect = fake_twitter_api_post
twitter_data = TwitterAPIData()
mock_all_user_ids.return_value = ["1234", "42"]
mock_all_screen_names.return_value = ["mhl20", "struan"]
twitter_data.user_id_to_screen_name = {"1234": "mhl20"}
twitter_data.update_from_api()
self.assertEqual(
mock_update_id_mapping.mock_calls,
[
call({"id": 1234, "screen_name": "mhl20"}),
call({"id": 5678, "screen_name": "struan"}),
call({"id": 42, "screen_name": "FooBarBazQuux"}),
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
@patch("twitterbot.management.twitter.requests")
def test_unfaked_urls_raise_exception(self, mock_requests):
TwitterAPIData.MAX_IN_A_REQUEST = 2
twitter_data = TwitterAPIData()
mock_requests.post.side_effect = fake_twitter_api_post
with self.assertRaises(Exception):
list(twitter_data.twitter_results("screen_name", ["foo", "bar"]))
|
2,265 |
test fuse double array
|
#!/usr/env/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2023 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from enum import Enum
import numpy as np
import pandas as pd
import pyarrow as pa
class Type(Enum):
STRING = 1
INT64 = 2
DOUBLE = 3
def generate_dataframe(size=(3, 4)):
height, width = size
ldf = pd.DataFrame(
np.random.randint(0, 100, size=(height, width)) * 2.3,
columns=[''.join(['a'] * i) for i in range(1, width + 1)],
)
rdf = pd.DataFrame(
np.random.randint(0, 100, size=(height, width)),
columns=[''.join(['b'] * i) for i in range(1, width + 1)],
)
return pd.concat([ldf, rdf], axis=1, join="inner")
def generate_string_array(length=20):
res = []
alphabet = [
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
' ',
]
for _ in range(1, length):
s_length = np.random.randint(1, length)
res.append(''.join(np.random.choice(alphabet, s_length)))
return res
def generate_array(type: Type, length=20):
f = {
Type.INT64: lambda x: np.random.randint(0, 1000, x),
Type.DOUBLE: lambda x: np.random.uniform(low=0, high=1000, size=x),
Type.STRING: generate_string_array,
}
return pa.array(f[type](length))
def assert_dataframe(stored_df: pd.DataFrame, extracted_df: pa.Table):
pdf = pa.Table.from_pandas(stored_df)
assert extracted_df.equals(pdf), "data frame unmatch"
def assert_array(stored_arr: pa.Array, extracted_array: pa.Array):
assert stored_arr.equals(extracted_array), "array unmatch"
def read_data_from_fuse(vid, test_mount_dir):
with open(os.path.join(test_mount_dir, vid), 'rb') as source:
with pa.ipc.open_file(source) as reader:
data = reader.read_all()
return data
def compare_two_string_array(arr_str_1, arr_str_2):
a = arr_str_1
b = arr_str_2
if len(a) != len(b):
return False
else:
for i, j in zip(a, b):
if str(i) != str(j):
return False
return True
def test_fuse_int64_array(vineyard_client, vineyard_fuse_mount_dir):
data = generate_array(Type.INT64)
id = vineyard_client.put(data)
extracted_data = read_data_from_fuse(
str(id)[11:28] + ".arrow", vineyard_fuse_mount_dir
)
extracted_data = extracted_data.column("a").chunk(0)
assert_array(data, extracted_data)
def METHOD_NAME(vineyard_client, vineyard_fuse_mount_dir):
data = generate_array(Type.DOUBLE)
id = vineyard_client.put(data)
extracted_data = read_data_from_fuse(
str(id)[11:28] + ".arrow", vineyard_fuse_mount_dir
)
extracted_data = extracted_data.column("a").chunk(0)
assert_array(data, extracted_data)
def test_fuse_string_array(vineyard_client, vineyard_fuse_mount_dir):
data = generate_array(Type.STRING)
id = vineyard_client.put(data)
extracted_data = read_data_from_fuse(
str(id)[11:28] + ".arrow", vineyard_fuse_mount_dir
)
extracted_data = extracted_data.column("a").chunk(0)
assert compare_two_string_array(data, extracted_data), "string array not the same"
def test_fuse_df(vineyard_client, vineyard_fuse_mount_dir):
data = generate_dataframe()
id = vineyard_client.put(data)
extracted_data = read_data_from_fuse(
str(id)[11:28] + ".arrow", vineyard_fuse_mount_dir
)
assert_dataframe(data, extracted_data)
|
2,266 |
license
|
#!/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Based on: https://github.com/enarx/spdx
import os
import re
SLUG = re.compile('[a-zA-Z0-9.-]+')
SPDX = re.compile(f'SPDX-License-Identifier:\s+({SLUG.pattern})')
class Language:
def __init__(self, *comments, shebang=False):
assert(isinstance(shebang, bool))
self.__shebang = shebang
self.__match = []
for comment in comments:
(init, fini) = (comment, '')
if isinstance(comment, tuple):
(init, fini) = comment
pattern = f"^{init}\s*{SPDX.pattern}\s*{fini}\s*$"
self.__match.append(re.compile(pattern))
def METHOD_NAME(self, path):
"Find the license from the SPDX header."
with open(path) as f:
line = f.readline()
if self.__shebang and line.startswith('#!'):
line = f.readline()
for matcher in self.__match:
match = matcher.match(line)
if match:
return match.group(1)
return None
class Index:
INTERPRETERS = {
'python3': 'python',
'python2': 'python',
'python': 'python',
'ruby': 'ruby',
'lua': 'lua',
}
EXTENSIONS = {
'.py': 'python',
'.proto': 'protobuf',
'.rs': 'rust',
'.yml': 'yaml',
'.yaml': 'yaml',
'.json': 'json',
'.toml': 'toml',
'.md': 'md',
'.rb': 'ruby',
'.c': 'c',
'.h': 'c',
'.cpp': 'c++',
'.hpp': 'c++',
'.cc': 'c++',
'.hh': 'c++',
'.lua': 'lua',
}
def __init__(self):
self.__languages = {
'python': Language('#+', shebang=True),
'ruby': Language('#+', shebang=True),
'c': Language('//+', ('/\\*', '\\*/')),
'c++': Language('//+', ('/\\*', '\\*/')),
'rust': Language('//+', '//!', ('/\\*', '\\*/')),
'protobuf': Language('//+', '//!', ('/\\*', '\\*/')),
'lua': Language('--+', ('--\[\[', '--\]\]')),
}
def language(self, path):
name = self.EXTENSIONS.get(os.path.splitext(path)[1])
if name is None:
interpreter = None
with open(path, "rb") as f:
if f.read(2) == bytearray('#!'.encode('ascii')):
# assume a text file and retry as text file
try:
with open(path, "r") as t:
interpreter = t.readline().rstrip().rsplit(
os.path.sep)[-1]
except:
pass
name = self.INTERPRETERS.get(interpreter)
return self.__languages.get(name)
def scan(self, root):
IGNORE_DIRS = {".git", "target"}
for root, dirs, files in os.walk(root):
# Ignore the specified directories.
for dir in IGNORE_DIRS.intersection(dirs):
dirs.remove(dir)
for file in files:
path = os.path.join(root, file)
# Find the language of the file.
language = self.language(path)
if language is None:
continue
# Parse the SPDX header for the language.
yield (path, language.METHOD_NAME(path))
if __name__ == '__main__':
import sys
import json
# Validate the arguments
licenses = os.getenv('INPUT_LICENSES')
if licenses is None:
licenses = sys.argv[1:]
else:
licenses = json.loads(licenses)
for METHOD_NAME in licenses:
if not SLUG.match(METHOD_NAME):
print("Invalid license '%s'!" % METHOD_NAME)
raise SystemExit(1)
rv = 0
index = Index()
for (path, METHOD_NAME) in index.scan("."):
if METHOD_NAME not in licenses:
if METHOD_NAME == None:
print(f"NO SPDX HEADER\t {path}")
else:
print(f"{METHOD_NAME:16} {path}")
rv = 1
raise SystemExit(rv)
|
2,267 |
test out file
|
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import json
import os
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_OUTPUT_SUBDIR = 'json_outfiles'
GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyOne',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'file': u'gtest_xml_outfile1_test_.cc',
u'line': 41,
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyOne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
u'TearDownProp': u'1',
}],
}],
}
EXPECTED_2 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyTwo',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'file': u'gtest_xml_outfile2_test_.cc',
u'line': 41,
u'status': u'RUN',
u'result': u'COMPLETED',
u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
u'TestSomeProperty': u'2',
u'TearDownProp': u'2',
}],
}],
}
class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, '')
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json'))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json'))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self.METHOD_NAME(GTEST_OUTPUT_1_TEST, EXPECTED_1)
def testOutfile2(self):
self.METHOD_NAME(GTEST_OUTPUT_2_TEST, EXPECTED_2)
def METHOD_NAME(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + '.json'
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
if os.path.isfile(output_file1):
with open(output_file1) as f:
actual = json.load(f)
else:
with open(output_file2) as f:
actual = json.load(f)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '0'
gtest_test_utils.Main()
|
2,268 |
test wrong exception order
|
"""Tests for distutils.command.upload."""
import os
import unittest
import unittest.mock as mock
from urllib.request import HTTPError
from test.support import run_unittest
from distutils.command import upload as upload_mod
from distutils.command.upload import upload
from distutils.core import Distribution
from distutils.errors import DistutilsError
from distutils.log import ERROR, INFO
from distutils.tests.test_config import PYPIRC, BasePyPIRCCommandTestCase
PYPIRC_LONG_PASSWORD = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_NOPASSWORD = """\
[distutils]
index-servers =
server1
[server1]
username:me
"""
class FakeOpen(object):
def __init__(self, url, msg=None, code=None):
self.url = url
if not isinstance(url, str):
self.req = url
else:
self.req = None
self.msg = msg or 'OK'
self.code = code or 200
def getheader(self, name, default=None):
return {
'content-type': 'text/plain; charset=utf-8',
}.get(name.lower(), default)
def read(self):
return b'xyzzy'
def getcode(self):
return self.code
class uploadTestCase(BasePyPIRCCommandTestCase):
def setUp(self):
super(uploadTestCase, self).setUp()
self.old_open = upload_mod.urlopen
upload_mod.urlopen = self._urlopen
self.last_open = None
self.next_msg = None
self.next_code = None
def tearDown(self):
upload_mod.urlopen = self.old_open
super(uploadTestCase, self).tearDown()
def _urlopen(self, url):
self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code)
return self.last_open
def test_finalize_options(self):
# new format
self.write_file(self.rc, PYPIRC)
dist = Distribution()
cmd = upload(dist)
cmd.finalize_options()
for attr, waited in (('username', 'me'), ('password', 'secret'),
('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/')):
self.assertEqual(getattr(cmd, attr), waited)
def test_saved_password(self):
# file with no password
self.write_file(self.rc, PYPIRC_NOPASSWORD)
# make sure it passes
dist = Distribution()
cmd = upload(dist)
cmd.finalize_options()
self.assertEqual(cmd.password, None)
# make sure we get it as well, if another command
# initialized it at the dist level
dist.password = 'xxx'
cmd = upload(dist)
cmd.finalize_options()
self.assertEqual(cmd.password, 'xxx')
def test_upload(self):
tmp = self.mkdtemp()
path = os.path.join(tmp, 'xxx')
self.write_file(path)
command, pyversion, filename = 'xxx', '2.6', path
dist_files = [(command, pyversion, filename)]
self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
# lets run it
pkg_dir, dist = self.create_dist(dist_files=dist_files)
cmd = upload(dist)
cmd.show_response = 1
cmd.ensure_finalized()
cmd.run()
# what did we send ?
headers = dict(self.last_open.req.headers)
self.assertEqual(headers['Content-length'], '2162')
content_type = headers['Content-type']
self.assertTrue(content_type.startswith('multipart/form-data'))
self.assertEqual(self.last_open.req.get_method(), 'POST')
expected_url = 'https://upload.pypi.org/legacy/'
self.assertEqual(self.last_open.req.get_full_url(), expected_url)
self.assertTrue(b'xxx' in self.last_open.req.data)
self.assertIn(b'protocol_version', self.last_open.req.data)
# The PyPI response body was echoed
results = self.get_logs(INFO)
self.assertEqual(results[-1], 75 * '-' + '\nxyzzy\n' + 75 * '-')
# bpo-32304: archives whose last byte was b'\r' were corrupted due to
# normalization intended for Mac OS 9.
def test_upload_correct_cr(self):
# content that ends with \r should not be modified.
tmp = self.mkdtemp()
path = os.path.join(tmp, 'xxx')
self.write_file(path, content='yy\r')
command, pyversion, filename = 'xxx', '2.6', path
dist_files = [(command, pyversion, filename)]
self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
# other fields that ended with \r used to be modified, now are
# preserved.
pkg_dir, dist = self.create_dist(
dist_files=dist_files,
description='long description\r'
)
cmd = upload(dist)
cmd.show_response = 1
cmd.ensure_finalized()
cmd.run()
headers = dict(self.last_open.req.headers)
self.assertEqual(headers['Content-length'], '2172')
self.assertIn(b'long description\r', self.last_open.req.data)
def test_upload_fails(self):
self.next_msg = "Not Found"
self.next_code = 404
self.assertRaises(DistutilsError, self.test_upload)
def METHOD_NAME(self):
tmp = self.mkdtemp()
path = os.path.join(tmp, 'xxx')
self.write_file(path)
dist_files = [('xxx', '2.6', path)] # command, pyversion, filename
self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
pkg_dir, dist = self.create_dist(dist_files=dist_files)
tests = [
(OSError('oserror'), 'oserror', OSError),
(HTTPError('url', 400, 'httperror', {}, None),
'Upload failed (400): httperror', DistutilsError),
]
for exception, expected, raised_exception in tests:
with self.subTest(exception=type(exception).__name__):
with mock.patch('distutils.command.upload.urlopen',
new=mock.Mock(side_effect=exception)):
with self.assertRaises(raised_exception):
cmd = upload(dist)
cmd.ensure_finalized()
cmd.run()
results = self.get_logs(ERROR)
self.assertIn(expected, results[-1])
self.clear_logs()
def test_suite():
return unittest.makeSuite(uploadTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
2,269 |
stub context
|
import json
from pathlib import Path
import pylint.lint
import pytest
from micropy.project.template import Template, TemplateProvider
@pytest.fixture
def METHOD_NAME(mock_mp_stubs):
stubs = list(mock_mp_stubs.stubs)[:3]
stub_paths = [stub.stubs for stub in stubs]
frozen_paths = [stub.frozen for stub in stubs]
fware_paths = [stub.firmware.frozen for stub in stubs]
ctx_paths = [*stub_paths, *frozen_paths, *fware_paths]
return (stubs, (stub_paths, frozen_paths, fware_paths), ctx_paths)
def test_vscode_template(METHOD_NAME, shared_datadir, tmp_path, mock_checks):
stubs, paths, ctx_paths = METHOD_NAME
prov = TemplateProvider(["vscode"])
ctx_datadir = tmp_path / "ctx_cata"
ctx_datadir.mkdir(exist_ok=True)
# Add test local path
ctx_local = ctx_datadir / "src" / "lib" / "somelib"
ctx_local.mkdir(parents=True)
ctx_absolute = Path("/fakedir/notinprojectdir/somelib")
ctx_local_paths = [ctx_local, ctx_absolute]
prov.render_to(
"vscode",
tmp_path,
stubs=stubs,
paths=ctx_paths,
datadir=ctx_datadir,
local_paths=ctx_local_paths,
)
expected_path = tmp_path / ".vscode" / "settings.json"
out_content = expected_path.read_text()
print(out_content)
# Get rid of comments
with expected_path.open() as f:
lines = [line.strip() for line in f.readlines() if line]
valid = [line for line in lines if "//" not in line[:2]]
# Valid JSON?
expect_paths = [str(p.relative_to(tmp_path)) for p in ctx_paths]
expect_paths.append(str(ctx_local.relative_to(tmp_path))) # add local path (should be relative)
# local path outside of project dir (must be absolute)
expect_paths.append(str(ctx_absolute.absolute()))
content = json.loads("\n".join(valid))
assert sorted(expect_paths) == sorted(content["python.autoComplete.extraPaths"])
assert expected_path.exists()
# Test Update
ctx_paths.append(tmp_path / "foobar" / "foo.py")
prov.update(
"vscode",
tmp_path,
stubs=stubs,
paths=ctx_paths,
datadir=ctx_datadir,
local_paths=ctx_local_paths,
)
content = json.loads(expected_path.read_text())
expect_paths.append(str((tmp_path / "foobar" / "foo.py").relative_to(tmp_path)))
assert sorted(expect_paths) == sorted(content["python.autoComplete.extraPaths"])
# Test update with missing file
expected_path.unlink() # delete file
prov.update("vscode", tmp_path, stubs=stubs, paths=ctx_paths, datadir=ctx_datadir)
assert expected_path.exists()
def test_pylint_template(METHOD_NAME, tmp_path):
def test_pylint_load():
try:
lint_args = ["--rcfile", str(expected_path.absolute())]
pylint.lint.Run(lint_args)
except SyntaxError:
pytest.fail(str(SyntaxError))
except: # noqa
pass
stubs, paths, ctx_paths = METHOD_NAME
ctx_datadir = tmp_path / "ctx_cata"
ctx_datadir.mkdir(exist_ok=True)
prov = TemplateProvider(["pylint"])
prov.render_to("pylint", tmp_path, stubs=stubs, paths=ctx_paths, datadir=ctx_datadir)
expected_path = tmp_path / ".pylintrc"
assert expected_path.exists()
# Will Pylint load it?
test_pylint_load()
# Test Update
new_path = tmp_path / ".micropy" / "foobar" / "foo"
ctx_paths.append(new_path)
prov.update("pylint", tmp_path, stubs=stubs, paths=ctx_paths, datadir=ctx_datadir)
init_hook = expected_path.read_text().splitlines(True)[2]
hook_imports = init_hook.split(",")
hook_path = str(Path(".micropy/foobar/foo")).replace(
"\\", "/"
) # no need to use \\ on pylint Windows
assert f' "{hook_path}"' in hook_imports
test_pylint_load()
def test_generic_template(mock_mp_stubs, tmp_path):
prov = TemplateProvider(["bootstrap", "pymakr"])
prov.render_to("boot", tmp_path)
expected_path = tmp_path / "src" / "boot.py"
assert expected_path.exists()
expected_content = (prov.TEMPLATE_DIR / "src" / "boot.py").read_text()
out_content = expected_path.read_text()
print(out_content)
assert expected_content.strip() == out_content.strip()
templ = prov.get("boot")
assert templ.update(tmp_path) is None
def test_no_context():
class BadTemplate(Template):
def __init__(self, template, **kwargs):
return super().__init__(template, **kwargs)
with pytest.raises(NotImplementedError):
x = BadTemplate("abc")
print(x.context)
|
2,270 |
to json
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
""" # noqa: E501
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, StrictFloat, StrictInt, StrictStr, validator
from petstore_api.models.outer_enum import OuterEnum
from petstore_api.models.outer_enum_default_value import OuterEnumDefaultValue
from petstore_api.models.outer_enum_integer import OuterEnumInteger
from petstore_api.models.outer_enum_integer_default_value import OuterEnumIntegerDefaultValue
class EnumTest(BaseModel):
"""
EnumTest
"""
enum_string: Optional[StrictStr] = None
enum_string_required: StrictStr = Field(...)
enum_integer_default: Optional[StrictInt] = 5
enum_integer: Optional[StrictInt] = None
enum_number: Optional[StrictFloat] = None
outer_enum: Optional[OuterEnum] = Field(None, alias="outerEnum")
outer_enum_integer: Optional[OuterEnumInteger] = Field(None, alias="outerEnumInteger")
outer_enum_default_value: Optional[OuterEnumDefaultValue] = Field(None, alias="outerEnumDefaultValue")
outer_enum_integer_default_value: Optional[OuterEnumIntegerDefaultValue] = Field(None, alias="outerEnumIntegerDefaultValue")
additional_properties: Dict[str, Any] = {}
__properties = ["enum_string", "enum_string_required", "enum_integer_default", "enum_integer", "enum_number", "outerEnum", "outerEnumInteger", "outerEnumDefaultValue", "outerEnumIntegerDefaultValue"]
@validator('enum_string')
def enum_string_validate_enum(cls, value):
"""Validates the enum"""
if value is None:
return value
if value not in ('UPPER', 'lower', ''):
raise ValueError("must be one of enum values ('UPPER', 'lower', '')")
return value
@validator('enum_string_required')
def enum_string_required_validate_enum(cls, value):
"""Validates the enum"""
if value not in ('UPPER', 'lower', ''):
raise ValueError("must be one of enum values ('UPPER', 'lower', '')")
return value
@validator('enum_integer_default')
def enum_integer_default_validate_enum(cls, value):
"""Validates the enum"""
if value is None:
return value
if value not in (1, 5, 14):
raise ValueError("must be one of enum values (1, 5, 14)")
return value
@validator('enum_integer')
def enum_integer_validate_enum(cls, value):
"""Validates the enum"""
if value is None:
return value
if value not in (1, -1):
raise ValueError("must be one of enum values (1, -1)")
return value
@validator('enum_number')
def enum_number_validate_enum(cls, value):
"""Validates the enum"""
if value is None:
return value
if value not in (1.1, -1.2):
raise ValueError("must be one of enum values (1.1, -1.2)")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def METHOD_NAME(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> EnumTest:
"""Create an instance of EnumTest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
"additional_properties"
},
exclude_none=True)
# puts key-value pairs in additional_properties in the top level
if self.additional_properties is not None:
for _key, _value in self.additional_properties.items():
_dict[_key] = _value
# set to None if outer_enum (nullable) is None
# and __fields_set__ contains the field
if self.outer_enum is None and "outer_enum" in self.__fields_set__:
_dict['outerEnum'] = None
return _dict
@classmethod
def from_dict(cls, obj: dict) -> EnumTest:
"""Create an instance of EnumTest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return EnumTest.parse_obj(obj)
_obj = EnumTest.parse_obj({
"enum_string": obj.get("enum_string"),
"enum_string_required": obj.get("enum_string_required"),
"enum_integer_default": obj.get("enum_integer_default") if obj.get("enum_integer_default") is not None else 5,
"enum_integer": obj.get("enum_integer"),
"enum_number": obj.get("enum_number"),
"outer_enum": obj.get("outerEnum"),
"outer_enum_integer": obj.get("outerEnumInteger"),
"outer_enum_default_value": obj.get("outerEnumDefaultValue"),
"outer_enum_integer_default_value": obj.get("outerEnumIntegerDefaultValue")
})
# store additional fields in additional_properties
for _key in obj.keys():
if _key not in cls.__properties:
_obj.additional_properties[_key] = obj.get(_key)
return _obj
|
2,271 |
sv init
|
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import math
from random import random
import bpy
from bpy.props import StringProperty, BoolProperty, EnumProperty
import bmesh
from mathutils import Vector
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_viewer_utils import matrix_sanitizer
def wipe_object(ob):
''' this removes all geometry '''
# this can be done with the new `ob.data.clear_geometry()` i think..
bm = bmesh.new()
bm.to_mesh(ob.data)
bm.free()
class SvDupliInstancesMK4(SverchCustomTreeNode, bpy.types.Node):
'''Copy by Dupli Faces'''
bl_idname = 'SvDupliInstancesMK4'
bl_label = 'Dupli instancer mk4'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_DUPLI_INSTANCER'
replacement_nodes= [('SvDupliInstancesMK5', None, None)]
def set_child_quota(self, context):
# was used for string child property
updateNode(self, context)
# post update check
if self.auto_release:
parent = self.name_node_generated_parent
if parent:
for obj in bpy.data.objects[parent].children:
if not obj.name == self.name_child:
obj.parent = None
name_node_generated_parent: StringProperty(
description="name of the parent that this node generates",
update=updateNode)
scale: BoolProperty(default=False,
description="scale children", update=updateNode)
auto_release: BoolProperty(update=set_child_quota)
modes = [
("VERTS", "Verts", "On vertices", "", 1),
("FACES", "Polys", "On polygons", "", 2)]
mode: EnumProperty(items=modes, default='VERTS', update=updateNode)
name_child: StringProperty(description="named child")
def METHOD_NAME(self, context):
#self.inputs.new("SvObjectSocket", "parent")
self.inputs.new("SvObjectSocket", "child")
self.inputs.new("SvMatrixSocket", "matr/vert")
self.name_node_generated_parent = 'parent'
def draw_buttons(self, context, layout):
layout.prop(self, "mode", expand=True)
col = layout.column(align=True)
col.prop(self, 'name_node_generated_parent', text='', icon='EDITMODE_HLT')
col.prop(self, 'scale', text='Scale children', toggle=True)
col.prop(self, 'auto_release', text='One Object only', toggle=True)
def draw_buttons_ext(self, context, layout):
col = layout.column()
try:
ob = bpy.data.objects.get(self.name_node_generated_parent)
if ob.instance_type == "FACES":
row = col.row()
row.prop(ob, "show_instancer_for_viewport", text="Display Instancer") # bool
row2 = col.row()
row2.prop(ob, "show_instancer_for_render", text="Render Instancer") # bool
row3 = col.row()
row3.prop(self, "scale", text="Scale by Face Size") # bool
row4 = col.row()
row4.enabled = ob.use_instance_faces_scale
row4.prop(ob, "instance_faces_scale", text="Factor") #float
finally:
pass
def process(self):
#objectsP = self.inputs['parent'].sv_get(default=None)
objectsC = self.inputs['child'].sv_get()
transforms = self.inputs['matr/vert'].sv_get()
objects = bpy.data.objects
#if any([x.name == self.name_node_generated_parent for x in objects]):
ob = objects.get(self.name_node_generated_parent)
#self.name_node_generated_parent = ob.name
if ob:
wipe_object(ob)
# minimum requirements.
if (not transforms) and (not objectsC):
if ob:
ob.instance_type = 'NONE'
return
if not ob:
name = self.name_node_generated_parent
mesh = bpy.data.meshes.new(name + '_mesh')
ob = bpy.data.objects.new(name, mesh)
bpy.context.scene.collection.objects.link(ob)
# at this point there's a reference to an ob, and the mesh is empty.
child = self.inputs['child'].sv_get()[0]
#print('checking',child)
if transforms and transforms[0]:
sin, cos = math.sin, math.cos
theta = 2 * math.pi / 3
thetb = theta * 2
ofs = 0.5 * math.pi + theta
A = Vector((cos(0 + ofs), sin(0 + ofs), 0))
B = Vector((cos(theta + ofs), sin(theta + ofs), 0))
C = Vector((cos(thetb + ofs), sin(thetb + ofs), 0))
if self.mode == "FACES":
verts = []
add_verts = verts.extend
for M in transforms:
add_verts([(M @ A), (M @ B), (M @ C)])
faces = [[i, i + 1, i + 2] for i in range(0, len(transforms) * 3, 3)]
elif self.mode == "VERTS":
verts = [M.to_translation() for M in transforms]
faces = []
ob.data.from_pydata(verts, [], faces)
ob.instance_type = self.mode
ob.use_instance_faces_scale = self.scale
child.parent = ob
def register():
bpy.utils.register_class(SvDupliInstancesMK4)
def unregister():
bpy.utils.unregister_class(SvDupliInstancesMK4)
|
2,272 |
test get secret in file suffixes
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import stat
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from buildbot.secrets.providers.file import SecretInAFile
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.util.misc import writeLocalFile
class TestSecretInFile(ConfigErrorsMixin, unittest.TestCase):
def createTempDir(self, dirname):
tempdir = FilePath(self.mktemp())
tempdir.createDirectory()
return tempdir.path
def createFileTemp(self, tempdir, filename, text="", chmodRights=0o700):
file_path = os.path.join(tempdir, filename)
writeLocalFile(file_path, text, chmodRights)
return file_path
@defer.inlineCallbacks
def setUp(self):
self.tmp_dir = self.createTempDir("temp")
self.filepath = self.createFileTemp(self.tmp_dir, "tempfile.txt",
text="key value\n")
self.srvfile = SecretInAFile(self.tmp_dir)
yield self.srvfile.startService()
@defer.inlineCallbacks
def tearDown(self):
yield self.srvfile.stopService()
def testCheckConfigSecretInAFileService(self):
self.assertEqual(self.srvfile.name, "SecretInAFile")
self.assertEqual(self.srvfile._dirname, self.tmp_dir)
def testCheckConfigErrorSecretInAFileService(self):
if os.name != "posix":
self.skipTest("Permission checks only works on posix systems")
filepath = self.createFileTemp(self.tmp_dir, "tempfile2.txt",
chmodRights=stat.S_IRGRP)
expctd_msg_error = " on file tempfile2.txt are too " \
"open. It is required that your secret files are" \
" NOT accessible by others!"
with self.assertRaisesConfigError(expctd_msg_error):
self.srvfile.checkConfig(self.tmp_dir)
os.remove(filepath)
@defer.inlineCallbacks
def testCheckConfigfileExtension(self):
filepath = self.createFileTemp(self.tmp_dir, "tempfile2.ini",
text="test suffix",
chmodRights=stat.S_IRWXU)
filepath2 = self.createFileTemp(self.tmp_dir, "tempfile2.txt",
text="some text",
chmodRights=stat.S_IRWXU)
yield self.srvfile.reconfigService(self.tmp_dir, suffixes=[".ini"])
self.assertEqual(self.srvfile.get("tempfile2"), "test suffix")
self.assertEqual(self.srvfile.get("tempfile3"), None)
os.remove(filepath)
os.remove(filepath2)
@defer.inlineCallbacks
def testReconfigSecretInAFileService(self):
otherdir = self.createTempDir("temp2")
yield self.srvfile.reconfigService(otherdir)
self.assertEqual(self.srvfile.name, "SecretInAFile")
self.assertEqual(self.srvfile._dirname, otherdir)
def testGetSecretInFile(self):
value = self.srvfile.get("tempfile.txt")
self.assertEqual(value, "key value")
@defer.inlineCallbacks
def METHOD_NAME(self):
yield self.srvfile.reconfigService(self.tmp_dir, suffixes=[".txt"])
value = self.srvfile.get("tempfile")
self.assertEqual(value, "key value")
def testGetSecretInFileNotFound(self):
value = self.srvfile.get("tempfile2.txt")
self.assertEqual(value, None)
@defer.inlineCallbacks
def testGetSecretInFileNoStrip(self):
yield self.srvfile.reconfigService(self.tmp_dir, strip=False)
value = self.srvfile.get("tempfile.txt")
self.assertEqual(value, "key value\n")
|
2,273 |
set str
|
class AttributeParser:
"""
:type string: str
:type current_pos: int
"""
def __init__(self, string=""):
self.str = string
self.current_pos = 0
def METHOD_NAME(self, string):
self.str = string
return self
def current_char(self):
return self.str[self.current_pos]
def get_char(self):
result = self.current_char()
self.current_pos += 1
return result
def has_chars(self):
return self.current_pos < len(self.str)
def parse_code_type(self):
if not self.has_chars() or self.current_char() != "`":
return {}
while self.has_chars() and self.current_char() == "`":
self.current_pos += 1
self.eat_whitespace()
start = self.current_pos
while self.has_chars() and self.current_char() not in (" ", "{"):
self.current_pos += 1
end = self.current_pos
self.current_pos = 0
if start != end:
return {"code_lang": self.str[start:end]}
return {}
def get_attributes(self):
code_info = self.parse_code_type()
potential_start_indices = [
i
for i, x in enumerate(self.str)
if x == AttributeParser.attr_list_start_char()
]
for i in potential_start_indices:
tokens = self.try_get_attributes(i + 1)
if tokens is not None:
return tokens | code_info, i
return code_info, None
def try_get_attributes(self, pos):
self.current_pos = pos
tokens = {}
end_found = False
while self.has_chars():
self.eat_whitespace()
if not self.has_chars():
break
if self.try_parse_attr_list_end_char():
end_found = True
self.get_char()
break
token = self.try_parse_hash()
if token:
if "taskId" in tokens:
break
tokens["taskId"] = token
continue
token = self.try_parse_class()
if token:
if not tokens.get("classes"):
tokens["classes"] = []
tokens["classes"].append(token)
continue
key, val = self.try_parse_keyvaluepair()
if key:
tokens[key] = val
continue
break
if end_found:
self.eat_whitespace()
return None if self.has_chars() else tokens
else:
return None
@staticmethod
def attr_list_start_char():
return "{"
def try_parse_attr_list_end_char(self):
if self.current_char() == self.attr_list_end_char():
return True
return False
def try_parse_helper(self, char):
if self.current_char() != char:
return None
find_pos = self.current_pos + 1
i = self.str.find(" ", find_pos)
i2 = self.str.find(self.attr_list_end_char(), find_pos)
if 0 <= i2 < i or i < 0:
i = i2
if i < 0:
return None
value = self.str[find_pos:i]
self.current_pos = i
return value
def try_parse_hash(self):
return self.try_parse_helper("#")
def try_parse_class(self):
return self.try_parse_helper(".")
def try_parse_keyvaluepair(self):
key_name = ""
saved_pos = self.current_pos
while True:
if not self.has_chars():
self.current_pos = saved_pos
return None, None
curr = self.get_char()
if curr == " ":
self.current_pos = saved_pos
return None, None
if curr == "=":
break
key_name += curr
if key_name == "":
self.current_pos = saved_pos
return None, None
value = ""
quote_enabled = False
if self.has_chars() and self.current_char() == '"':
quote_enabled = True
self.get_char()
while self.has_chars():
curr = self.get_char()
if not quote_enabled:
if curr == " ":
break
elif curr == self.attr_list_end_char():
self.current_pos -= 1
break
if quote_enabled and curr == "\\":
curr = self.get_char()
if curr != '"' and curr != "\\":
self.current_pos = saved_pos
return None, None
elif quote_enabled and curr == '"':
return key_name, value
value += curr
if not quote_enabled:
return key_name, value
else:
self.current_pos = saved_pos
return None, None
def eat_whitespace(self):
while self.has_chars() and self.current_char() == " ":
self.current_pos += 1
@staticmethod
def attr_list_end_char():
return "}"
class AttributeParserException(Exception):
pass
|
2,274 |
path
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from datetime import datetime, timedelta
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import NoAuth
class ExchangeRates(HttpStream):
url_base = "https://api.apilayer.com/exchangerates_data/"
cursor_field = "date"
primary_key = "date"
def __init__(self, config: Mapping[str, Any], start_date: datetime, **kwargs):
super().__init__()
self.base = config["base"]
self.access_key = config["access_key"]
self.start_date = start_date
self._cursor_value = None
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# The API does not offer pagination, so we return None to indicate there are no more pages in the response
return None
def METHOD_NAME(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return stream_slice["date"]
def request_headers(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Mapping[str, Any]:
# The api requires that we include apikey as a header so we do that in this method
return {"apikey": self.apikey}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
# The api requires that we include the base currency as a query param so we do that in this method
return {"base": self.base}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
# The response is a simple JSON whose schema matches our stream's schema exactly,
# so we just return a list containing the response
return [response.json()]
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, any]:
# This method is called once for each record returned from the API to compare the cursor field value in that record with the current state
# we then return an updated state object. If this is the first time we run a sync or no state was passed, current_stream_state will be None.
if current_stream_state is not None and "date" in current_stream_state:
current_parsed_date = datetime.strptime(current_stream_state["date"], "%Y-%m-%d")
latest_record_date = datetime.strptime(latest_record["date"], "%Y-%m-%d")
return {"date": max(current_parsed_date, latest_record_date).strftime("%Y-%m-%d")}
else:
return {"date": self.start_date.strftime("%Y-%m-%d")}
def _chunk_date_range(self, start_date: datetime) -> List[Mapping[str, any]]:
"""
Returns a list of each day between the start date and now.
The return value is a list of dicts {'date': date_string}.
"""
dates = []
while start_date < datetime.now():
self.logger.info(start_date.strftime("%Y-%m-%d"))
dates.append({"date": start_date.strftime("%Y-%m-%d")})
start_date += timedelta(days=1)
return dates
def stream_slices(
self, sync_mode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, any]]]:
start_date = datetime.strptime(stream_state["date"], "%Y-%m-%d") if stream_state and "date" in stream_state else self.start_date
return self._chunk_date_range(start_date)
class SourcePythonHttpTutorial(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
accepted_currencies = {
"USD",
"JPY",
"BGN",
"CZK",
"DKK",
} # there are more currencies but let's assume these are the only allowed ones
input_currency = config["base"]
if input_currency not in accepted_currencies:
return False, f"Input currency {input_currency} is invalid. Please input one of the following currencies: {accepted_currencies}"
else:
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
# NoAuth just means there is no authentication required for this API. It's only included for completeness
# of the example, but if you don't need authentication, you don't need to pass an authenticator at all.
# Other authenticators are available for API token-based auth and Oauth2.
auth = NoAuth()
# Parse the date from a string into a datetime object
start_date = datetime.strptime(config["start_date"], "%Y-%m-%d")
return [ExchangeRates(authenticator=auth, config=config, start_date=start_date)]
|
2,275 |
try update access time
|
import argparse
import filecmp
import logging
import os
import pprint
import shutil
import subprocess
import sys
import time
from pathlib import PosixPath, WindowsPath
from common import OperatingSystem
from common.utils.argparse_types import positive_int
from common.utils.environment import get_os
from infection_monkey.utils.commands import (
build_monkey_commandline_parameters,
get_monkey_commandline_linux,
get_monkey_commandline_windows,
)
from infection_monkey.utils.file_utils import mark_file_for_deletion_on_windows
# Linux doesn't have WindowsError
try:
WindowsError
except NameError:
# noinspection PyShadowingBuiltins
WindowsError = IOError
logger = logging.getLogger(__name__)
MOVEFILE_DELAY_UNTIL_REBOOT = 4
def file_exists_at_destination(source_path, destination_path) -> bool:
try:
return filecmp.cmp(source_path, destination_path)
except OSError:
return False
def get_date_reference_path():
if get_os() == OperatingSystem.WINDOWS:
return os.path.expandvars(WindowsPath(r"%windir%\system32\kernel32.dll"))
else:
return PosixPath("/bin/sh")
class MonkeyDrops(object):
def __init__(self, args):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-p", "--parent")
arg_parser.add_argument("-s", "--servers", type=lambda arg: arg.strip().split(","))
arg_parser.add_argument("-d", "--depth", type=positive_int, default=0)
arg_parser.add_argument("-l", "--location")
arg_parser.add_argument("-vp", "--vulnerable-port")
self.opts = arg_parser.parse_args(args)
self._config = {
"source_path": os.path.abspath(sys.argv[0]),
"destination_path": self.opts.location,
}
logger.debug("Dropper is running with config:\n%s", pprint.pformat(self._config))
def start(self):
if self._config["destination_path"] is None:
logger.error("No destination path specified")
return False
source_path = self._config["source_path"]
destination_path = self._config["destination_path"]
# we copy/move only in case path is different
file_exists = file_exists_at_destination(source_path, destination_path)
if not file_exists and os.path.exists(destination_path):
os.remove(destination_path)
if (
not file_exists
and not self._move_file(source_path, destination_path)
and not self._copy_file(source_path, destination_path)
):
return False
MonkeyDrops.METHOD_NAME(destination_path)
monkey_process = self._run_monkey(destination_path)
time.sleep(3)
if monkey_process.poll() is not None:
logger.warning("Seems like monkey died too soon")
def _move_file(self, source_path, destination_path) -> bool:
try:
shutil.move(source_path, destination_path)
logger.info(f"Moved source file '{source_path}' into '{destination_path}'")
except (WindowsError, IOError, OSError) as exc:
logger.debug(
f"Error moving source file '{source_path}' into '{destination_path}': {exc}"
)
return False
return True
def _copy_file(self, source_path, destination_path) -> bool:
try:
shutil.copy(source_path, destination_path)
logger.info(f"Copied source file '{source_path}' into '{destination_path}'")
except (WindowsError, IOError, OSError) as exc:
logger.debug(
f"Error copying source file '{source_path}' into '{destination_path}': {exc}"
)
return False
return True
@staticmethod
def METHOD_NAME(destination_path):
dropper_date_reference_path = get_date_reference_path()
try:
ref_stat = os.stat(dropper_date_reference_path)
except OSError:
logger.warning(
f"Cannot set reference date using '{dropper_date_reference_path}', file not found"
)
else:
try:
os.utime(destination_path, (ref_stat.st_atime, ref_stat.st_mtime))
except OSError:
logger.warning("Cannot set reference date to destination file")
def _run_monkey(self, destination_path) -> subprocess.Popen:
monkey_options = build_monkey_commandline_parameters(
parent=self.opts.parent,
servers=self.opts.servers,
depth=self.opts.depth,
location=None,
)
if get_os() == OperatingSystem.WINDOWS:
from win32process import DETACHED_PROCESS
monkey_commandline = get_monkey_commandline_windows(destination_path, monkey_options)
monkey_process = subprocess.Popen(
monkey_commandline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
creationflags=DETACHED_PROCESS,
)
else:
# In Linux, we need to change the directory first, which is done
# using thw `cwd` argument in `subprocess.Popen` below
monkey_commandline = get_monkey_commandline_linux(destination_path, monkey_options)
monkey_process = subprocess.Popen(
monkey_commandline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd="/".join(destination_path.split("/")[0:-1]),
)
logger.info(
f"Executed monkey process (PID={monkey_process.pid}) "
f"with command line: {' '.join(monkey_commandline)}"
)
return monkey_process
def cleanup(self):
logger.info("Cleaning up the dropper")
source_path = self._config["source_path"]
try:
if source_path.lower() != self._config["destination_path"].lower() and os.path.exists(
source_path
):
self._remove_file(source_path)
logger.info("Dropper cleanup complete")
except AttributeError:
logger.error("Invalid configuration options. Failing")
def _remove_file(self, path):
try:
os.remove(path)
except Exception as exc:
logger.debug(f"Error removing source file '{path}': {exc}")
# mark the file for removal on next boot
mark_file_for_deletion_on_windows(WindowsPath(path))
|
2,276 |
get lookup
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.core import exceptions
from django.db.models import TextField, lookups as builtin_lookups
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
DELIMITER = "\x1F"
class MultiStringField(TextField):
default_error_messages = {
'delimiter_found': _('No value can contain the delimiter character.')
}
def __init__(self, verbose_name=None, name=None, delimiter=DELIMITER, **kwargs):
self.delimiter = delimiter
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, (list, tuple)):
return value
elif value:
return [v for v in value.split(self.delimiter) if v]
else:
return []
def get_prep_value(self, value):
if isinstance(value, (list, tuple)):
return self.delimiter + self.delimiter.join(value) + self.delimiter
elif value is None:
if self.null:
return None
else:
return ""
raise TypeError("Invalid data type passed.")
def get_prep_lookup(self, lookup_type, value): # NOQA
raise TypeError('Lookups on multi strings are currently not supported.')
def from_db_value(self, value, expression, connection):
if value:
return [v for v in value.split(self.delimiter) if v]
else:
return []
def validate(self, value, model_instance):
super().validate(value, model_instance)
for l in value:
if self.delimiter in l:
raise exceptions.ValidationError(
self.error_messages['delimiter_found'],
code='delimiter_found',
)
def METHOD_NAME(self, lookup_name):
if lookup_name == 'contains':
return make_multistring_contains_lookup(self.delimiter)
elif lookup_name == 'icontains':
return make_multistring_icontains_lookup(self.delimiter)
elif lookup_name == 'isnull':
return builtin_lookups.IsNull
raise NotImplementedError(
"Lookup '{}' doesn't work with MultiStringField".format(lookup_name),
)
def make_multistring_contains_lookup(delimiter):
class Cls(builtin_lookups.Contains):
def process_rhs(self, qn, connection):
sql, params = super().process_rhs(qn, connection)
params[0] = "%" + delimiter + params[0][1:-1] + delimiter + "%"
return sql, params
return Cls
def make_multistring_icontains_lookup(delimiter):
class Cls(builtin_lookups.IContains):
def process_rhs(self, qn, connection):
sql, params = super().process_rhs(qn, connection)
params[0] = "%" + delimiter + params[0][1:-1] + delimiter + "%"
return sql, params
return Cls
class MultiStringSerializer(serializers.Field):
def __init__(self, **kwargs):
self.allow_blank = kwargs.pop('allow_blank', False)
super().__init__(**kwargs)
def to_representation(self, value):
return value
def to_internal_value(self, data):
if isinstance(data, list):
return data
else:
raise ValidationError('Invalid data type.')
serializers.ModelSerializer.serializer_field_mapping[MultiStringField] = MultiStringSerializer
|
2,277 |
get command result
|
########################################################################
# NOKIA IXS7215
#
# Module contains an implementation of SONiC Platform Base API and
# provides the Components' (e.g., BIOS, CPLD, FPGA, etc.) available in
# the platform
#
########################################################################
try:
import sys
import os
import time
import subprocess
import ntpath
from sonic_platform_base.component_base import ComponentBase
from sonic_py_common.general import getstatusoutput_noshell, getstatusoutput_noshell_pipe
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
if sys.version_info[0] < 3:
import commands as cmd
else:
import subprocess as cmd
CPLD_DIR = "/sys/bus/i2c/devices/0-0041/"
class Component(ComponentBase):
"""Nokia platform-specific Component class"""
CHASSIS_COMPONENTS = [
["System-CPLD", "Used for managing SFPs, LEDs, PSUs and FANs "],
["U-Boot", "Performs initialization during booting"],
]
CPLD_UPDATE_COMMAND = ['./cpldupd_A1', '']
def __init__(self, component_index):
self.index = component_index
self.name = self.CHASSIS_COMPONENTS[self.index][0]
self.description = self.CHASSIS_COMPONENTS[self.index][1]
def METHOD_NAME(self, cmdline):
try:
proc = subprocess.Popen(cmdline.split(), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = proc.communicate()[0]
proc.wait()
result = stdout.rstrip('\n')
except OSError:
result = None
return result
def _read_sysfs_file(self, sysfs_file):
# On successful read, returns the value read from given
# reg_name and on failure returns 'ERR'
rv = 'ERR'
if (not os.path.isfile(sysfs_file)):
return rv
try:
with open(sysfs_file, 'r') as fd:
rv = fd.read()
except Exception as e:
rv = 'ERR'
rv = rv.rstrip('\r\n')
rv = rv.lstrip(" ")
return rv
def _write_sysfs_file(self, sysfs_file, value):
# On successful write, the value read will be written on
# reg_name and on failure returns 'ERR'
rv = 'ERR'
if (not os.path.isfile(sysfs_file)):
return rv
try:
with open(sysfs_file, 'w') as fd:
rv = fd.write(str(value))
except Exception as e:
rv = 'ERR'
# Ensure that the write operation has succeeded
if (int(self._read_sysfs_file(sysfs_file)) != value ):
time.sleep(3)
if (int(self._read_sysfs_file(sysfs_file)) != value ):
rv = 'ERR'
return rv
def _get_cpld_version(self, cpld_number):
cpld_version = self._read_sysfs_file(CPLD_DIR+"cpldversion")
return str(int(cpld_version, 16))
def get_name(self):
"""
Retrieves the name of the component
Returns:
A string containing the name of the component
"""
return self.name
def get_model(self):
"""
Retrieves the part number of the component
Returns:
string: Part number of component
"""
return 'NA'
def get_serial(self):
"""
Retrieves the serial number of the component
Returns:
string: Serial number of component
"""
return 'NA'
def get_presence(self):
"""
Retrieves the presence of the component
Returns:
bool: True if present, False if not
"""
return True
def get_status(self):
"""
Retrieves the operational status of the component
Returns:
bool: True if component is operating properly, False if not
"""
return True
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device.
Returns:
integer: The 1-based relative physical position in parent
device or -1 if cannot determine the position
"""
return -1
def is_replaceable(self):
"""
Indicate whether component is replaceable.
Returns:
bool: True if it is replaceable.
"""
return False
def get_description(self):
"""
Retrieves the description of the component
Returns:
A string containing the description of the component
"""
return self.description
def get_firmware_version(self):
"""
Retrieves the firmware version of the component
Returns:
A string containing the firmware version of the component
"""
if self.index == 0:
return self._get_cpld_version(self.index)
if self.index == 1:
cmdstatus, uboot_version = cmd.getstatusoutput('grep --null-data ^U-Boot /dev/mtd0ro | cut -d" " -f2')
return uboot_version
def install_firmware(self, image_path):
"""
Installs firmware to the component
Args:
image_path: A string, path to firmware image
Returns:
A boolean, True if install was successful, False if not
"""
image_name = ntpath.basename(image_path)
print(" ixs-7215-A1 - install cpld {}".format(image_name))
# check whether the image file exists
if not os.path.isfile(image_path):
print("ERROR: the cpld image {} doesn't exist ".format(image_path))
return False
# check whether the cpld exe exists
if not os.path.isfile('/tmp/cpldupd_A1'):
print("ERROR: the cpld exe {} doesn't exist ".format('/tmp/cpldupd_A1'))
return False
self.CPLD_UPDATE_COMMAND[1] = image_name
success_flag = False
try:
subprocess.check_call(self.CPLD_UPDATE_COMMAND, stderr=subprocess.STDOUT)
success_flag = True
except subprocess.CalledProcessError as e:
print("ERROR: Failed to upgrade CPLD: rc={}".format(e.returncode))
if success_flag:
print("INFO: Refresh or power cycle is required to finish CPLD installation")
return success_flag
|
2,278 |
get ballot paper id
|
import urllib
from addressbase.models import Address
from data_finder.helpers import (
EveryElectionWrapper,
PostcodeError,
geocode_point_only,
)
from data_finder.views import LogLookUpMixin
from django.core.exceptions import ObjectDoesNotExist
from pollingstations.models import AdvanceVotingStation
from rest_framework import serializers
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from uk_geo_utils.helpers import Postcode
from .councils import CouncilDataSerializer
from .fields import PointField
from .mixins import parse_qs_to_python
from .pollingstations import PollingStationGeoSerializer
def get_bug_report_url(request, station_known):
if not station_known:
return None
return request.build_absolute_uri(
"/report_problem/?"
+ urllib.parse.urlencode({"source": "api", "source_url": request.path})
)
class AddressSerializer(serializers.HyperlinkedModelSerializer):
council = serializers.CharField(source="council_name")
polling_station_id = serializers.CharField()
class Meta:
model = Address
extra_kwargs = {"url": {"view_name": "address-detail", "lookup_field": "uprn"}}
fields = ("url", "address", "postcode", "council", "polling_station_id", "uprn")
class AdvanceVotingStationSerializer(serializers.ModelSerializer):
class Meta:
model = AdvanceVotingStation
fields = ("name", "address", "postcode", "location", "opening_times")
opening_times = serializers.SerializerMethodField()
def get_opening_times(self, obj: AdvanceVotingStation):
return obj.opening_times_table
class BallotSerializer(serializers.Serializer):
ballot_paper_id = serializers.SerializerMethodField()
ballot_title = serializers.SerializerMethodField()
poll_open_date = serializers.CharField(read_only=True)
elected_role = serializers.CharField(read_only=True, allow_null=True)
metadata = serializers.DictField(read_only=True, allow_null=True)
cancelled = serializers.BooleanField(read_only=True)
cancellation_reason = serializers.CharField(read_only=True, allow_null=True)
replaced_by = serializers.CharField(read_only=True, allow_null=True)
replaces = serializers.CharField(read_only=True, allow_null=True)
def METHOD_NAME(self, obj):
return obj["election_id"]
def get_ballot_title(self, obj):
return obj["election_title"]
class PostcodeResponseSerializer(serializers.Serializer):
polling_station_known = serializers.BooleanField(read_only=True)
postcode_location = PointField(read_only=True)
custom_finder = serializers.CharField(read_only=True)
advance_voting_station = AdvanceVotingStationSerializer(read_only=True)
council = CouncilDataSerializer(read_only=True)
polling_station = PollingStationGeoSerializer(read_only=True)
addresses = AddressSerializer(read_only=True, many=True)
report_problem_url = serializers.CharField(read_only=True)
metadata = serializers.DictField(read_only=True)
ballots = BallotSerializer(read_only=True, many=True)
class AddressViewSet(ViewSet, LogLookUpMixin):
permission_classes = [IsAuthenticatedOrReadOnly]
http_method_names = ["get", "post", "head", "options"]
lookup_field = "uprn"
serializer_class = PostcodeResponseSerializer
def get_object(self, **kwargs):
assert "uprn" in kwargs
return Address.objects.get(uprn=kwargs["uprn"])
def get_ee_wrapper(self, address, query_params):
kwargs = {}
query_params = parse_qs_to_python(query_params)
if include_current := query_params.get("include_current", False):
kwargs["include_current"] = any(include_current)
return EveryElectionWrapper(point=address.location, **kwargs)
def retrieve(
self, request, uprn=None, format=None, geocoder=geocode_point_only, log=True
):
ret = {}
ret["custom_finder"] = None
# attempt to get address based on uprn
# if we fail, return an error response
try:
address = self.get_object(uprn=uprn)
except ObjectDoesNotExist:
return Response({"detail": "Address not found"}, status=404)
# create singleton list for consistency with /postcode endpoint
ret["addresses"] = [address]
# council object
ret["council"] = address.council
ret["advance_voting_station"] = address.uprntocouncil.advance_voting_station
# attempt to attach point
# in this situation, failure to geocode is non-fatal
try:
geocoded_postcode = geocoder(address.postcode)
location = geocoded_postcode.centroid
except PostcodeError:
location = None
ret["postcode_location"] = location
ret["polling_station_known"] = False
ret["polling_station"] = None
ee = self.get_ee_wrapper(address, request.query_params)
has_election = ee.has_election()
# An address might have an election but we might not know the polling station.
if has_election and address.polling_station_id:
# get polling station if there is an election in this area
polling_station = address.polling_station
if polling_station:
ret["polling_station"] = polling_station
ret["polling_station_known"] = True
ret["metadata"] = ee.get_metadata()
if request.query_params.get("all_future_ballots", None):
ret["ballots"] = ee.get_all_ballots()
else:
ret["ballots"] = ee.get_ballots_for_next_date()
# create log entry
log_data = {}
log_data["we_know_where_you_should_vote"] = ret["polling_station_known"]
log_data["location"] = address.location
log_data["council"] = ret["council"]
log_data["brand"] = "api"
log_data["language"] = ""
log_data["api_user"] = request.user
log_data["has_election"] = has_election
if log:
self.log_postcode(Postcode(address.postcode), log_data, "api")
ret["report_problem_url"] = get_bug_report_url(
request, ret["polling_station_known"]
)
serializer = PostcodeResponseSerializer(
ret, read_only=True, context={"request": request}
)
return Response(serializer.data)
|
2,279 |
rank
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import JSONField
from django_extensions.db.models import TimeStampedModel
class ResultSet(TimeStampedModel):
ballot = models.OneToOneField("candidates.Ballot", on_delete=models.CASCADE)
num_turnout_reported = models.PositiveIntegerField(
null=True, verbose_name="Reported Turnout"
)
turnout_percentage = models.FloatField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
blank=True,
null=True,
)
num_spoilt_ballots = models.PositiveIntegerField(
null=True, verbose_name="Spoilt Ballots"
)
total_electorate = models.PositiveIntegerField(
verbose_name="Total Electorate", null=True, blank=True
)
source = models.TextField(null=True)
user = models.ForeignKey(
"auth.User",
related_name="result_sets",
null=True,
on_delete=models.CASCADE,
)
versions = JSONField(default=list)
ip_address = models.GenericIPAddressField(null=True)
def __str__(self):
return "Result for {}".format(self.ballot.ballot_paper_id)
@property
def METHOD_NAME(self):
self.ballot.result_sets.order_by("-num_turnout_reported").index(self)
def calculate_turnout_percentage(self):
"""
Return turnout as a percentage, rounded to two decimal places
"""
if not all([self.num_turnout_reported, self.total_electorate]):
return
percentage = (self.num_turnout_reported / self.total_electorate) * 100
self.turnout_percentage = min(round(percentage, 2), 100)
def as_dict(self):
"""
A representation of the model and related CandidateResult models
as JSON.
Used for storing versions.
# TODO use API serializer for this?
"""
data = {
"created": self.modified.isoformat(),
"ballot_paper_id": self.ballot.ballot_paper_id,
"turnout": self.num_turnout_reported,
"spoilt_ballots": self.num_spoilt_ballots,
"total_electorate": self.total_electorate,
"source": self.source,
"user": getattr(self.user, "username", None),
"candidate_results": [],
}
for result in self.candidate_results.all():
data["candidate_results"].append(
{
"num_ballots": result.num_ballots,
"elected": result.membership.elected,
"person_id": result.membership.person_id,
"person_name": result.membership.person.name,
}
)
return data
def versions_equal(self, v1, v2):
"""
Compare v1 and v2, ignoring the created key
"""
ignore_keys = ["created"]
comp1 = {k: v for k, v in v1.items() if k not in ignore_keys}
comp2 = {k: v for k, v in v2.items() if k not in ignore_keys}
return comp1 == comp2
def record_version(self, force=False, save=True):
existing = self.versions
this_version = self.as_dict()
changed = False
if existing:
latest = existing[0]
if force or not self.versions_equal(latest, this_version):
changed = True
existing.insert(0, this_version)
else:
changed = True
existing.insert(0, this_version)
self.versions = existing
if save:
self.save()
return (existing, changed)
def save(self, **kwargs):
self.calculate_turnout_percentage()
return super().save(**kwargs)
class CandidateResult(TimeStampedModel):
result_set = models.ForeignKey(
"ResultSet", related_name="candidate_results", on_delete=models.CASCADE
)
membership = models.OneToOneField(
"popolo.Membership", related_name="result", on_delete=models.CASCADE
)
num_ballots = models.PositiveIntegerField()
tied_vote_winner = models.BooleanField(
default=False,
help_text="Did this person win after receiving same votes as another candidate, via coin toss, lots etc",
)
METHOD_NAME = models.PositiveIntegerField(null=True, verbose_name="Results Rank")
class Meta:
ordering = ("-num_ballots",)
unique_together = (("result_set", "membership"),)
def __unicode__(self):
return "{} ({} votes)".format(self.membership.person, self.num_ballots)
|
2,280 |
is conductance based
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.overrides import overrides
from spynnaker.pyNN.models.neuron.input_types import InputTypeConductance
from .abstract_neuron_impl import AbstractNeuronImpl
from spinn_front_end_common.interface.ds import DataType
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
from spynnaker.pyNN.utilities.struct import Struct, StructRepeat
# The size of the n_steps_per_timestep parameter
_N_STEPS_PER_TIMESTEP_SIZE = 1 * BYTES_PER_WORD
# The default number of steps per timestep
_DEFAULT_N_STEPS_PER_TIMESTEP = 1
_STEPS_PER_TIMESTEP = "n_steps_per_timestep"
_STEPS_PER_TIMESTEP_STRUCT = Struct(
[(DataType.UINT32, _STEPS_PER_TIMESTEP)], repeat_type=StructRepeat.GLOBAL)
class NeuronImplStandard(AbstractNeuronImpl):
"""
The standard componentised neuron implementation.
"""
__slots__ = [
"__model_name",
"__binary",
"__neuron_model",
"__input_type",
"__synapse_type",
"__threshold_type",
"__additional_input_type",
"__components",
"__n_steps_per_timestep"
]
_RECORDABLES = ["v", "gsyn_exc", "gsyn_inh"]
_RECORDABLE_DATA_TYPES = {
"v": DataType.S1615,
"gsyn_exc": DataType.S1615,
"gsyn_inh": DataType.S1615
}
_RECORDABLE_UNITS = {
'v': 'mV',
'gsyn_exc': "uS",
'gsyn_inh': "uS"}
def __init__(
self, model_name, binary, neuron_model, input_type,
synapse_type, threshold_type, additional_input_type=None):
"""
:param str model_name:
:param str binary:
:param AbstractNeuronModel neuron_model:
:param AbstractInputType input_type:
:param AbstractSynapseType synapse_type:
:param AbstractThresholdType threshold_type:
:param additional_input_type:
:type additional_input_type: AbstractAdditionalInput or None
"""
self.__model_name = model_name
self.__binary = binary
self.__neuron_model = neuron_model
self.__input_type = input_type
self.__synapse_type = synapse_type
self.__threshold_type = threshold_type
self.__additional_input_type = additional_input_type
self.__n_steps_per_timestep = _DEFAULT_N_STEPS_PER_TIMESTEP
self.__components = [
self.__neuron_model, self.__input_type, self.__threshold_type,
self.__synapse_type]
if self.__additional_input_type is not None:
self.__components.append(self.__additional_input_type)
@property
def n_steps_per_timestep(self):
return self.__n_steps_per_timestep
@n_steps_per_timestep.setter
def n_steps_per_timestep(self, n_steps_per_timestep):
self.__n_steps_per_timestep = n_steps_per_timestep
@property
@overrides(AbstractNeuronImpl.model_name)
def model_name(self):
return self.__model_name
@property
@overrides(AbstractNeuronImpl.binary_name)
def binary_name(self):
return self.__binary
@property
@overrides(AbstractNeuronImpl.structs)
def structs(self):
structs = [_STEPS_PER_TIMESTEP_STRUCT]
structs.extend(s for c in self.__components for s in c.structs)
return structs
@overrides(AbstractNeuronImpl.get_global_weight_scale)
def get_global_weight_scale(self):
return self.__input_type.get_global_weight_scale()
@overrides(AbstractNeuronImpl.get_n_synapse_types)
def get_n_synapse_types(self):
return self.__synapse_type.get_n_synapse_types()
@overrides(AbstractNeuronImpl.get_synapse_id_by_target)
def get_synapse_id_by_target(self, target):
return self.__synapse_type.get_synapse_id_by_target(target)
@overrides(AbstractNeuronImpl.get_synapse_targets)
def get_synapse_targets(self):
return self.__synapse_type.get_synapse_targets()
@overrides(AbstractNeuronImpl.get_recordable_variables)
def get_recordable_variables(self):
return self._RECORDABLES
@overrides(AbstractNeuronImpl.get_recordable_units)
def get_recordable_units(self, variable):
return self._RECORDABLE_UNITS[variable]
@overrides(AbstractNeuronImpl.get_recordable_data_types)
def get_recordable_data_types(self):
return self._RECORDABLE_DATA_TYPES
@overrides(AbstractNeuronImpl.is_recordable)
def is_recordable(self, variable):
return variable in self._RECORDABLES
@overrides(AbstractNeuronImpl.get_recordable_variable_index)
def get_recordable_variable_index(self, variable):
return self._RECORDABLES.index(variable)
@overrides(AbstractNeuronImpl.add_parameters)
def add_parameters(self, parameters):
parameters[_STEPS_PER_TIMESTEP] = self.__n_steps_per_timestep
for component in self.__components:
component.add_parameters(parameters)
@overrides(AbstractNeuronImpl.add_state_variables)
def add_state_variables(self, state_variables):
for component in self.__components:
component.add_state_variables(state_variables)
@overrides(AbstractNeuronImpl.get_units)
def get_units(self, variable):
for component in self.__components:
if component.has_variable(variable):
return component.get_units(variable)
raise KeyError(
f"The parameter {variable} does not exist in this input "
"conductance component")
@property
@overrides(AbstractNeuronImpl.METHOD_NAME)
def METHOD_NAME(self):
return isinstance(self.__input_type, InputTypeConductance)
def __getitem__(self, key):
# Find the property in the components...
for component in self.__components:
if hasattr(component, key):
return getattr(component, key)
# ... or fail
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute {key}")
|
2,281 |
with flux
|
# Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import numpy as np
import math
from . import _galsim
from .gsobject import GSObject
from .gsparams import GSParams
from .utilities import lazy_property, doc_inherit
from .errors import GalSimIncompatibleValuesError
class Gaussian(GSObject):
r"""A class describing a 2D Gaussian surface brightness profile.
The Gaussian surface brightness profile is characterized by two properties, its ``flux``
and the characteristic size ``sigma`` where the radial profile of the circular Gaussian
drops off as
.. math::
I(r) \sim e^{-\frac{r^2}{2 \sigma^2}}
A Gaussian can be initialized using one (and only one) of three possible size parameters:
``sigma``, ``fwhm``, or ``half_light_radius``. Exactly one of these three is required.
Parameters:
sigma: The value of sigma of the profile. Typically given in arcsec.
[One of ``sigma``, ``fwhm``, or ``half_light_radius`` is required.]
fwhm: The full-width-half-max of the profile. Typically given in arcsec.
[One of ``sigma``, ``fwhm``, or ``half_light_radius`` is required.]
half_light_radius: The half-light radius of the profile. Typically given in arcsec.
[One of ``sigma``, ``fwhm``, or ``half_light_radius`` is required.]
flux: The flux (in photons/cm^2/s) of the profile. [default: 1]
gsparams: An optional `GSParams` argument. [default: None]
"""
_opt_params = { "flux" : float }
_single_params = [ { "sigma" : float, "half_light_radius" : float, "fwhm" : float } ]
# The FWHM of a Gaussian is 2 sqrt(2 ln2) sigma
_fwhm_factor = 2.3548200450309493
# The half-light-radius is sqrt(2 ln2) sigma
_hlr_factor = 1.1774100225154747
# 1/(2pi)
_inv_twopi = 0.15915494309189535
_has_hard_edges = False
_is_axisymmetric = True
_is_analytic_x = True
_is_analytic_k = True
def __init__(self, half_light_radius=None, sigma=None, fwhm=None, flux=1., gsparams=None):
if fwhm is not None :
if sigma is not None or half_light_radius is not None:
raise GalSimIncompatibleValuesError(
"Only one of sigma, fwhm, and half_light_radius may be specified",
fwhm=fwhm, sigma=sigma, half_light_radius=half_light_radius)
else:
sigma = fwhm / Gaussian._fwhm_factor
elif half_light_radius is not None:
if sigma is not None:
raise GalSimIncompatibleValuesError(
"Only one of sigma, fwhm, and half_light_radius may be specified",
fwhm=fwhm, sigma=sigma, half_light_radius=half_light_radius)
else:
sigma = half_light_radius / Gaussian._hlr_factor
elif sigma is None:
raise GalSimIncompatibleValuesError(
"One of sigma, fwhm, and half_light_radius must be specified",
fwhm=fwhm, sigma=sigma, half_light_radius=half_light_radius)
self._sigma = float(sigma)
self._flux = float(flux)
self._gsparams = GSParams.check(gsparams)
self._sigsq = sigma**2
self._inv_sigsq = 1./self._sigsq
self._norm = self.flux * self._inv_sigsq * Gaussian._inv_twopi
@lazy_property
def _sbp(self):
return _galsim.SBGaussian(self._sigma, self._flux, self.gsparams._gsp)
@property
def sigma(self):
"""The sigma of this Gaussian profile
"""
return self._sigma
@property
def half_light_radius(self):
"""The half-light radius of this Gaussian profile
"""
return self.sigma * Gaussian._hlr_factor
@property
def fwhm(self):
"""The FWHM of this Gaussian profile
"""
return self.sigma * Gaussian._fwhm_factor
def __eq__(self, other):
return (self is other or
(isinstance(other, Gaussian) and
self.sigma == other.sigma and
self.flux == other.flux and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.Gaussian", self.sigma, self.flux, self.gsparams))
def __repr__(self):
return 'galsim.Gaussian(sigma=%r, flux=%r, gsparams=%r)'%(
self.sigma, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.Gaussian(sigma=%s'%self.sigma
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return math.sqrt(-2.*math.log(self.gsparams.maxk_threshold))/self.sigma
@property
def _stepk(self):
R = max(math.sqrt(-2.*math.log(self.gsparams.folding_threshold)),
self.gsparams.stepk_minimum_hlr * Gaussian._hlr_factor)
return math.pi / (R * self.sigma)
@property
def _max_sb(self):
return self._norm
def _xValue(self, pos):
rsq = pos.x**2 + pos.y**2
return self._norm * math.exp(-0.5 * rsq * self._inv_sigsq)
def _kValue(self, kpos):
ksq = (kpos.x**2 + kpos.y**2) * self._sigsq
return self._flux * math.exp(-0.5 * ksq)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
dx,dy = offset
self._sbp.draw(image._image, image.scale, _jac, dx, dy, flux_scaling)
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
def _drawKImage(self, image, jac=None):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
self._sbp.drawK(image._image, image.scale, _jac)
@doc_inherit
def METHOD_NAME(self, flux):
return Gaussian(sigma=self.sigma, flux=flux, gsparams=self.gsparams)
|
2,282 |
test crt path expandvars
|
import os
from pathlib import Path
from monkey_island.cc.server_utils.consts import (
DEFAULT_CRT_PATH,
DEFAULT_DATA_DIR,
DEFAULT_KEY_PATH,
DEFAULT_LOG_LEVEL,
DEFAULT_START_MONGO_DB,
)
from monkey_island.cc.setup.island_config_options import IslandConfigOptions
TEST_CONFIG_FILE_CONTENTS_SPECIFIED = {
"data_dir": "/tmp",
"log_level": "test",
"mongodb": {"start_mongodb": False},
"ssl_certificate": {
"ssl_certificate_file": "/tmp/test.crt",
"ssl_certificate_key_file": "/tmp/test.key",
},
}
TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED = {}
TEST_CONFIG_FILE_CONTENTS_NO_STARTMONGO = {"mongodb": {}}
def test_data_dir_specified():
assert_data_dir_equals(TEST_CONFIG_FILE_CONTENTS_SPECIFIED, "/tmp")
def test_data_dir_uses_default():
assert_data_dir_equals(TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED, DEFAULT_DATA_DIR)
def test_data_dir_expanduser(patched_home_env):
DATA_DIR_NAME = "test_data_dir"
assert_data_dir_equals(
{"data_dir": os.path.join("~", DATA_DIR_NAME)},
patched_home_env / DATA_DIR_NAME,
)
def test_data_dir_expandvars(home_env_variable, patched_home_env):
DATA_DIR_NAME = "test_data_dir"
assert_data_dir_equals(
{"data_dir": os.path.join(home_env_variable, DATA_DIR_NAME)},
patched_home_env / DATA_DIR_NAME,
)
def assert_data_dir_equals(config_file_contents, expected_data_dir):
assert_island_config_option_equals(config_file_contents, "data_dir", Path(expected_data_dir))
def test_log_level():
options = IslandConfigOptions(TEST_CONFIG_FILE_CONTENTS_SPECIFIED)
assert options.log_level == "test"
options = IslandConfigOptions(TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED)
assert options.log_level == DEFAULT_LOG_LEVEL
def test_mongodb():
options = IslandConfigOptions(TEST_CONFIG_FILE_CONTENTS_SPECIFIED)
assert not options.start_mongodb
options = IslandConfigOptions(TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED)
assert options.start_mongodb == DEFAULT_START_MONGO_DB
options = IslandConfigOptions(TEST_CONFIG_FILE_CONTENTS_NO_STARTMONGO)
assert options.start_mongodb == DEFAULT_START_MONGO_DB
def test_crt_path_uses_default():
assert_ssl_certificate_file_equals(TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED, DEFAULT_CRT_PATH)
def test_crt_path_specified():
assert_ssl_certificate_file_equals(
TEST_CONFIG_FILE_CONTENTS_SPECIFIED,
TEST_CONFIG_FILE_CONTENTS_SPECIFIED["ssl_certificate"]["ssl_certificate_file"],
)
def test_crt_path_expanduser(patched_home_env):
FILE_NAME = "test.crt"
assert_ssl_certificate_file_equals(
{"ssl_certificate": {"ssl_certificate_file": os.path.join("~", FILE_NAME)}},
patched_home_env / FILE_NAME,
)
def METHOD_NAME(home_env_variable, patched_home_env):
FILE_NAME = "test.crt"
assert_ssl_certificate_file_equals(
{"ssl_certificate": {"ssl_certificate_file": os.path.join(home_env_variable, FILE_NAME)}},
patched_home_env / FILE_NAME,
)
def assert_ssl_certificate_file_equals(config_file_contents, expected_ssl_certificate_file):
assert_island_config_option_equals(
config_file_contents, "crt_path", Path(expected_ssl_certificate_file)
)
def test_key_path_uses_default():
assert_ssl_certificate_key_file_equals(TEST_CONFIG_FILE_CONTENTS_UNSPECIFIED, DEFAULT_KEY_PATH)
def test_key_path_specified():
assert_ssl_certificate_key_file_equals(
TEST_CONFIG_FILE_CONTENTS_SPECIFIED,
TEST_CONFIG_FILE_CONTENTS_SPECIFIED["ssl_certificate"]["ssl_certificate_key_file"],
)
def test_key_path_expanduser(patched_home_env):
FILE_NAME = "test.key"
assert_ssl_certificate_key_file_equals(
{"ssl_certificate": {"ssl_certificate_key_file": os.path.join("~", FILE_NAME)}},
patched_home_env / FILE_NAME,
)
def test_key_path_expandvars(home_env_variable, patched_home_env):
FILE_NAME = "test.key"
assert_ssl_certificate_key_file_equals(
{
"ssl_certificate": {
"ssl_certificate_key_file": os.path.join(home_env_variable, FILE_NAME)
}
},
patched_home_env / FILE_NAME,
)
def assert_ssl_certificate_key_file_equals(config_file_contents, expected_ssl_certificate_file):
assert_island_config_option_equals(
config_file_contents, "key_path", Path(expected_ssl_certificate_file)
)
def assert_island_config_option_equals(config_file_contents, option_name, expected_value):
options = IslandConfigOptions(config_file_contents)
assert getattr(options, option_name) == expected_value
def test_start_mongo_overridden(patched_home_env):
config = IslandConfigOptions()
assert config.start_mongodb
config.update({"mongodb": {"start_mongodb": False}})
assert not config.start_mongodb
def test_crt_path_overridden(patched_home_env):
expected_path = Path("/fake/file.crt")
config = IslandConfigOptions()
assert config.crt_path != expected_path
config.update({"ssl_certificate": {"ssl_certificate_file": str(expected_path)}})
assert config.crt_path == expected_path
def test_key_path_overridden(patched_home_env):
expected_path = Path("/fake/file.key")
config = IslandConfigOptions()
assert config.key_path != expected_path
config.update({"ssl_certificate": {"ssl_certificate_key_file": str(expected_path)}})
assert config.key_path == expected_path
|
2,283 |
options
|
#!/usr/bin/env python
# encoding: utf-8
# Federico Pellegrin, 2016-2019 (fedepell) adapted for Python
"""
This tool helps with finding Python Qt5 tools and libraries,
and provides translation from QT5 files to Python code.
The following snippet illustrates the tool usage::
def options(opt):
opt.load('py pyqt5')
def configure(conf):
conf.load('py pyqt5')
def build(bld):
bld(
features = 'py pyqt5',
source = 'main.py textures.qrc aboutDialog.ui',
)
Here, the UI description and resource files will be processed
to generate code.
Usage
=====
Load the "pyqt5" tool.
Add into the sources list also the qrc resources files or ui5
definition files and they will be translated into python code
with the system tools (PyQt5, PySide2, PyQt4 are searched in this
order) and then compiled
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os
from waflib.Tools import python
from waflib import Task, Options
from waflib.TaskGen import feature, extension
from waflib.Configure import conf
from waflib import Logs
EXT_RCC = ['.qrc']
"""
File extension for the resource (.qrc) files
"""
EXT_UI = ['.ui']
"""
File extension for the user interface (.ui) files
"""
class XMLHandler(ContentHandler):
"""
Parses ``.qrc`` files
"""
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(str(''.join(self.buf)))
def characters(self, cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_pyrcc_task(self, node):
"Creates rcc and py task for ``.qrc`` files"
rcnode = node.change_ext('.py')
self.create_task('pyrcc', node, rcnode)
if getattr(self, 'install_from', None):
self.install_from = self.install_from.get_bld()
else:
self.install_from = self.path.get_bld()
self.install_path = getattr(self, 'install_path', '${PYTHONDIR}')
self.process_py(rcnode)
@extension(*EXT_UI)
def create_pyuic_task(self, node):
"Create uic tasks and py for user interface ``.ui`` definition files"
uinode = node.change_ext('.py')
self.create_task('ui5py', node, uinode)
if getattr(self, 'install_from', None):
self.install_from = self.install_from.get_bld()
else:
self.install_from = self.path.get_bld()
self.install_path = getattr(self, 'install_path', '${PYTHONDIR}')
self.process_py(uinode)
@extension('.ts')
def add_pylang(self, node):
"""Adds all the .ts file into ``self.lang``"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('pyqt5')
def apply_pyqt5(self):
"""
The additional parameters are:
:param lang: list of translation files (\\*.ts) to process
:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
:param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file
:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
"""
if getattr(self, 'lang', None):
qmtasks = []
for x in self.to_list(self.lang):
if isinstance(x, str):
x = self.path.find_resource(x + '.ts')
qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm')))
if getattr(self, 'langname', None):
qmnodes = [k.outputs[0] for k in qmtasks]
rcnode = self.langname
if isinstance(rcnode, str):
rcnode = self.path.find_or_declare(rcnode + '.qrc')
t = self.create_task('qm2rcc', qmnodes, rcnode)
create_pyrcc_task(self, t.outputs[0])
class pyrcc(Task.Task):
"""
Processes ``.qrc`` files
"""
color = 'BLUE'
run_str = '${QT_PYRCC} ${SRC} -o ${TGT}'
ext_out = ['.py']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
"""Parse the *.qrc* files"""
if not has_xml:
Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!')
return ([], [])
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(), 'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd:
nodes.append(nd)
else:
names.append(x)
return (nodes, names)
class ui5py(Task.Task):
"""
Processes ``.ui`` files for python
"""
color = 'BLUE'
run_str = '${QT_PYUIC} ${SRC} -o ${TGT}'
ext_out = ['.py']
class ts2qm(Task.Task):
"""
Generates ``.qm`` files from ``.ts`` files
"""
color = 'BLUE'
run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
"""
Generates ``.qrc`` files from ``.qm`` files
"""
color = 'BLUE'
after = 'ts2qm'
def run(self):
"""Create a qrc file including the inputs"""
txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs])
code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt
self.outputs[0].write(code)
def configure(self):
self.find_pyqt5_binaries()
# warn about this during the configuration too
if not has_xml:
Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!')
@conf
def find_pyqt5_binaries(self):
"""
Detects PyQt5 or PySide2 programs such as pyuic5/pyside2-uic, pyrcc5/pyside2-rcc
"""
env = self.env
if getattr(Options.METHOD_NAME, 'want_pyqt5', True):
self.find_program(['pyuic5'], var='QT_PYUIC')
self.find_program(['pyrcc5'], var='QT_PYRCC')
self.find_program(['pylupdate5'], var='QT_PYLUPDATE')
elif getattr(Options.METHOD_NAME, 'want_pyside2', True):
self.find_program(['pyside2-uic'], var='QT_PYUIC')
self.find_program(['pyside2-rcc'], var='QT_PYRCC')
self.find_program(['pyside2-lupdate'], var='QT_PYLUPDATE')
elif getattr(Options.METHOD_NAME, 'want_pyqt4', True):
self.find_program(['pyuic4'], var='QT_PYUIC')
self.find_program(['pyrcc4'], var='QT_PYRCC')
self.find_program(['pylupdate4'], var='QT_PYLUPDATE')
else:
self.find_program(['pyuic5','pyside2-uic','pyuic4'], var='QT_PYUIC')
self.find_program(['pyrcc5','pyside2-rcc','pyrcc4'], var='QT_PYRCC')
self.find_program(['pylupdate5', 'pyside2-lupdate','pylupdate4'], var='QT_PYLUPDATE')
if not env.QT_PYUIC:
self.fatal('cannot find the uic compiler for python for qt5')
if not env.QT_PYRCC:
self.fatal('cannot find the rcc compiler for python for qt5')
self.find_program(['lrelease-qt5', 'lrelease'], var='QT_LRELEASE')
def METHOD_NAME(opt):
"""
Command-line options
"""
pyqt5opt=opt.add_option_group("Python QT5 Options")
pyqt5opt.add_option('--pyqt5-pyqt5', action='store_true', default=False, dest='want_pyqt5', help='use PyQt5 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)')
pyqt5opt.add_option('--pyqt5-pyside2', action='store_true', default=False, dest='want_pyside2', help='use PySide2 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)')
pyqt5opt.add_option('--pyqt5-pyqt4', action='store_true', default=False, dest='want_pyqt4', help='use PyQt4 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)')
|
2,284 |
connection secrets
|
import pytest
from fides.api.schemas.connection_configuration.connection_config import (
mask_sensitive_fields,
)
class TestMaskSenstiveValues:
@pytest.fixture(scope="function")
def secret_schema(self):
return {
"additionalProperties": False,
"description": "Aircall secrets schema",
"properties": {
"api_id": {"sensitive": False, "title": "API ID", "type": "string"},
"api_token": {
"sensitive": True,
"title": "API token",
"type": "string",
},
"domain": {
"default": "api.aircall.io",
"sensitive": False,
"title": "Domain",
"type": "string",
},
},
"required": ["api_id", "api_token"],
"title": "aircall_schema",
"type": "object",
}
@pytest.fixture(scope="function")
def secret_schema_with_dataset_references(self):
return {
"title": "doordash_schema",
"description": "Doordash secrets schema",
"type": "object",
"properties": {
"domain": {
"title": "Domain",
"default": "openapi.doordash.com",
"sensitive": False,
"type": "string",
},
"developer_id": {
"title": "Developer ID",
"sensitive": False,
"type": "string",
},
"key_id": {"title": "Key ID", "sensitive": False, "type": "string"},
"signing_secret": {
"title": "Signing secret",
"sensitive": True,
"type": "string",
},
"doordash_delivery_id": {
"title": "Doordash Delivery ID",
"external_reference": True,
"allOf": [{"$ref": "#/definitions/FidesDatasetReference"}],
},
},
"required": [
"developer_id",
"key_id",
"signing_secret",
"doordash_delivery_id",
],
"additionalProperties": False,
"definitions": {
"EdgeDirection": {
"title": "EdgeDirection",
"description": "Direction of a FidesDataSetReference",
"enum": ["from", "to"],
"type": "string",
},
"FidesDatasetReference": {
"title": "FidesDatasetReference",
"description": "Reference to a field from another Collection",
"type": "object",
"properties": {
"dataset": {
"title": "Dataset",
"pattern": "^[a-zA-Z0-9_.<>-]+$",
"type": "string",
},
"field": {"title": "Field", "type": "string"},
"direction": {"$ref": "#/definitions/EdgeDirection"},
},
"required": ["dataset", "field"],
},
},
}
@pytest.fixture(scope="function")
def METHOD_NAME(self):
return {
"api_id": "secret-test",
"api_token": "testing with new value",
"domain": "api.aircall.io",
}
def test_mask_sensitive_fields(self, secret_schema, METHOD_NAME):
masked_secrets = mask_sensitive_fields(METHOD_NAME, secret_schema)
assert masked_secrets == {
"api_id": "secret-test",
"api_token": "**********",
"domain": "api.aircall.io",
}
def test_mask_dataset_reference_fields(self, secret_schema_with_dataset_references):
masked_secrets = mask_sensitive_fields(
{
"domain": "openapi.doordash.com",
"developer_id": "123",
"key_id": "123",
"signing_secret": "123",
"doordash_delivery_id": {
"dataset": "shop",
"field": "customer.id",
"direction": "from",
},
},
secret_schema_with_dataset_references,
)
assert masked_secrets == {
"domain": "openapi.doordash.com",
"developer_id": "123",
"key_id": "123",
"signing_secret": "**********",
"doordash_delivery_id": {
"dataset": "shop",
"field": "customer.id",
"direction": "from",
},
}
def test_mask_sensitive_fields_remove_non_schema_values(
self, METHOD_NAME, secret_schema
):
METHOD_NAME["non_schema_value"] = "this should be removed"
METHOD_NAME["another_non_schema_value"] = "this should also be removed"
masked_secrets = mask_sensitive_fields(METHOD_NAME, secret_schema)
keys = masked_secrets.keys()
assert "non_schema_value" not in keys
assert "another_non_schema_value" not in keys
def test_return_none_if_no_secrets(self, secret_schema):
masked_secrets = mask_sensitive_fields(None, secret_schema)
assert masked_secrets is None
|
2,285 |
corr2 coeff
|
# mlxtend Machine Learning Library Extensions
#
# A function for plotting a PCA correlation circle
# File Author: Gabriel Azevedo Ferreira <[email protected]>
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mlxtend.externals.adjust_text import adjust_text
from mlxtend.feature_extraction import PrincipalComponentAnalysis
def METHOD_NAME(A, B):
"""
Compute correlation coefficients and return as a np array
"""
A, B = np.array(A), np.array(B)
# Rowwise mean of input arrays & subtract from input arrays themeselves
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
# Sum of squares across rows
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
# Finally get corr coeff
return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None], ssB[None]))
def create_correlation_table(A, B, names_cols_A, names_cols_B):
"""
Compute correlation coefficients and return as a DataFrame.
A and B: 2d array like.
The columns represent the different variables and the rows are
the samples of thos variables
names_cols_A/B : name to be added to the final pandas table
return: pandas DataFrame with the corelations.Columns and Indexes
represent the different variables of A and B (respectvely)
"""
# corrs = np.corrcoef(np.transpose(A), np.transpose(B)
# )[len(names_cols_A):, :len(names_cols_A)]
corrs = METHOD_NAME(A.T, B.T).T
df_corrs = pd.DataFrame(corrs, columns=names_cols_A, index=names_cols_B)
return df_corrs
def plot_pca_correlation_graph(
X,
variables_names,
dimensions=(1, 2),
figure_axis_size=6,
X_pca=None,
explained_variance=None,
):
"""
Compute the PCA for X and plots the Correlation graph
Parameters
----------
X : 2d array like.
The columns represent the different variables and the rows are the
samples of thos variables
variables_names : array like
Name of the columns (the variables) of X
dimensions: tuple with two elements.
dimensions to be plotted (x,y)
figure_axis_size :
size of the final frame. The figure created is a square with length
and width equal to figure_axis_size.
X_pca : np.ndarray, shape = [n_samples, n_components].
Optional.
`X_pca` is the matrix of the transformed components from X.
If not provided, the function computes PCA automatically using
mlxtend.feature_extraction.PrincipalComponentAnalysis
Expected `n_componentes >= max(dimensions)`
explained_variance : 1 dimension np.ndarray, length = n_components
Optional.
`explained_variance` are the eigenvalues from the diagonalized
covariance matrix on the PCA transformatiopn.
If not provided, the function computes PCA independently
Expected `n_componentes == X.shape[1]`
Returns
----------
matplotlib_figure, correlation_matrix
Examples
-----------
For usage examples, please see
https://rasbt.github.io/mlxtend/user_guide/plotting/plot_pca_correlation_graph/
"""
X = np.array(X)
X = X - X.mean(axis=0)
n_comp = max(dimensions)
if (X_pca is None) and (explained_variance is None):
pca = PrincipalComponentAnalysis(n_components=n_comp)
pca.fit(X)
X_pca = pca.transform(X)
explained_variance = pca.e_vals_
elif (X_pca is not None) and (explained_variance is None):
raise ValueError(
"If `X_pca` is not None, the `explained variance`"
" values should not be `None`."
)
elif (X_pca is None) and (explained_variance is not None):
raise ValueError(
"If `explained variance` is not None, the `X_pca`"
" values should not be `None`."
)
elif (X_pca is not None) and (explained_variance is not None):
if X_pca.shape[1] != len(explained_variance):
raise ValueError(
f"Number of principal components must "
f"match the number "
f"of eigenvalues. Got "
f"{X_pca.shape[1]} "
f"!= "
f"{len(explained_variance)}"
)
if X_pca.shape[1] < n_comp:
raise ValueError(
f"Input array `X_pca` contains fewer principal"
f" components than expected based on `dimensions`."
f" Got {X_pca.shape[1]} components in X_pca, expected"
f" at least `max(dimensions)={n_comp}`."
)
if len(explained_variance) < n_comp:
raise ValueError(
f"Input array `explained_variance` contains fewer"
f" elements than expected. Got"
f" {len(explained_variance)} elements, expected"
f"`X.shape[1]={X.shape[1]}`."
)
corrs = create_correlation_table(
X_pca, X, ["Dim " + str(i + 1) for i in range(n_comp)], variables_names
)
tot = sum(X.var(0)) * X.shape[0] / (X.shape[0] - 1)
explained_var_ratio = [(i / tot) * 100 for i in explained_variance]
# Plotting circle
fig_res = plt.figure(figsize=(figure_axis_size, figure_axis_size))
plt.Circle((0, 0), radius=1, color="k", fill=False)
circle1 = plt.Circle((0, 0), radius=1, color="k", fill=False)
fig = plt.gcf()
fig.gca().add_artist(circle1)
# Plotting arrows
texts = []
for name, row in corrs.iterrows():
x = row["Dim " + str(dimensions[0])]
y = row["Dim " + str(dimensions[1])]
plt.arrow(0.0, 0.0, x, y, color="k", length_includes_head=True, head_width=0.05)
plt.plot([0.0, x], [0.0, y], "k-")
texts.append(plt.text(x, y, name, fontsize=2 * figure_axis_size))
# Plotting vertical lines
plt.plot([-1.1, 1.1], [0, 0], "k--")
plt.plot([0, 0], [-1.1, 1.1], "k--")
# Adjusting text
adjust_text(texts)
# Setting limits and title
plt.xlim((-1.1, 1.1))
plt.ylim((-1.1, 1.1))
plt.title("Correlation Circle", fontsize=figure_axis_size * 3)
plt.xlabel(
"Dim "
+ str(dimensions[0])
+ " (%s%%)" % str(explained_var_ratio[dimensions[0] - 1])[:4],
fontsize=figure_axis_size * 2,
)
plt.ylabel(
"Dim "
+ str(dimensions[1])
+ " (%s%%)" % str(explained_var_ratio[dimensions[1] - 1])[:4],
fontsize=figure_axis_size * 2,
)
return fig_res, corrs
|
2,286 |
connect
|
#!/usr/bin/env pmpython
#
# Copyright (C) 2014-2016,2018,2020 Red Hat.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# pylint: disable=bad-continuation
#
""" Tell how long the system has been running """
import sys
from pcp import pmapi
from cpmapi import PM_TYPE_U32, PM_TYPE_U64, PM_TYPE_FLOAT
from cpmapi import PM_CONTEXT_ARCHIVE, PM_MODE_FORW, PM_ERR_VALUE
def print_timestamp(stamp):
""" Report the sample time (struct tm) in HH:MM:SS form """
return " %02d:%02d:%02d" % (stamp.tm_hour, stamp.tm_min, stamp.tm_sec)
def print_uptime(seconds):
""" Report on system up-time in days, hours and minutes """
days = int(seconds / (60 * 60 * 24))
minutes = int(seconds / 60)
hours = int(minutes / 60)
hours = int(hours % 24)
minutes = int(minutes % 60)
result = " up"
if days > 1:
result += " %d days," % days
elif days != 0:
result += " 1 day,"
if hours != 0:
result += ' %2d:%02d,' % (hours, minutes)
else:
result += ' %d min,' % minutes
return result
def print_users(nusers):
""" Report the number of logged in users at sample time """
if nusers == 1:
return ' 1 user, '
else:
return ' %2d users, ' % nusers
def print_load(one, five, fifteen):
""" Report 1, 5, 15 minute load averages at sample time """
return ' load average: %.2f, %.2f, %.2f' % (one, five, fifteen)
class Uptime(object):
""" Gives a one line display of the following information:
The current time;
How long the system has been running;
How many users are currently logged on; and
The system load averages for the past 1, 5, and 15 minutes.
Knows about some of the default PCP arguments - can function
using remote hosts or historical data, using the timezone of
the metric source, at an offset within an archive, and so on.
"""
def __init__(self):
""" Construct object - prepare for command line handling """
self.context = None
self.opts = pmapi.pmOptions()
self.opts.pmSetShortOptions("V?")
self.opts.pmSetLongOptionHeader("Options")
self.opts.pmSetLongOptionVersion()
self.opts.pmSetLongOptionHelp()
def execute(self):
""" Using a PMAPI context (could be either host or archive),
fetch and report a fixed set of values related to uptime.
"""
metrics = ('kernel.all.uptime', 'kernel.all.nusers', 'kernel.all.load')
pmids = self.context.pmLookupName(metrics)
descs = self.context.pmLookupDescs(pmids)
result = self.context.pmFetch(pmids)
if result.contents.numpmid != len(metrics):
raise pmapi.pmErr(PM_ERR_VALUE)
uptime = ''
sample_time = result.contents.timestamp.tv_sec
time_struct = self.context.pmLocaltime(sample_time)
uptime += print_timestamp(time_struct)
atom = self.context.pmExtractValue(
result.contents.get_valfmt(0),
result.contents.get_vlist(0, 0),
descs[0].contents.type, PM_TYPE_U64)
uptime += print_uptime(atom.ull)
atom = self.context.pmExtractValue(
result.contents.get_valfmt(1),
result.contents.get_vlist(1, 0),
descs[1].contents.type, PM_TYPE_U32)
uptime += print_users(atom.ul)
averages = [None, None, None]
for inst in range(3):
averages[inst] = self.context.pmExtractValue(
result.contents.get_valfmt(2),
result.contents.get_vlist(2, inst),
descs[2].contents.type, PM_TYPE_FLOAT)
uptime += print_load(averages[0].f, averages[1].f, averages[2].f)
print(uptime)
self.context.pmFreeResult(result)
def METHOD_NAME(self):
""" Establish a PMAPI context to archive, host or local, via args """
self.context = pmapi.pmContext.fromOptions(self.opts, sys.argv)
if self.context.type == PM_CONTEXT_ARCHIVE:
origin = self.opts.pmGetOptionOrigin()
self.context.pmSetMode(PM_MODE_FORW, origin, 0)
if __name__ == '__main__':
try:
UPTIME = Uptime()
UPTIME.METHOD_NAME()
UPTIME.execute()
except pmapi.pmErr as error:
print("%s: %s" % (error.progname(), error.message()))
except pmapi.pmUsageErr as usage:
usage.message()
except KeyboardInterrupt:
pass
|
2,287 |
get qualities
|
import logging
import subprocess
import attr
from ..exceptions import InvalidConfigError
from ..factory import target_factory
from ..protocol import VideoProtocol
from .common import Driver
@target_factory.reg_driver
@attr.s(eq=False)
class USBVideoDriver(Driver, VideoProtocol):
bindings = {
"video": {"USBVideo", "NetworkUSBVideo"},
}
def __attrs_post_init__(self):
super().__attrs_post_init__()
self.logger = logging.getLogger(f"{self}")
self._prepared = False
def METHOD_NAME(self):
match = (self.video.vendor_id, self.video.model_id)
if match == (0x046d, 0x082d):
return ("mid", [
("low", "video/x-h264,width=640,height=360,framerate=5/1"),
("mid", "video/x-h264,width=1280,height=720,framerate=15/2"),
("high", "video/x-h264,width=1920,height=1080,framerate=10/1"),
])
if match == (0x046d, 0x0892):
return ("mid", [
("low", "image/jpeg,width=640,height=360,framerate=5/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=15/2"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x046d, 0x08e5): # Logitech HD Pro Webcam C920
return ("mid", [
("low", "image/jpeg,width=640,height=360,framerate=5/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=15/2"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x1224, 0x2825): # LogiLink UA0371
return ("mid", [
("low", "image/jpeg,width=640,height=480,framerate=30/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=30/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=30/1"),
])
if match == (0x05a3, 0x9331): # WansView Webcam 102
return ("mid", [
("low","video/x-h264,width=640,height=360,framerate=30/1"),
("mid","video/x-h264,width=1280,height=720,framerate=30/1"),
("high","video/x-h264,width=1920,height=1080,framerate=30/1"),
])
if match == (0x534d, 0x2109): # MacroSilicon
return ("mid", [
("low", "image/jpeg,width=720,height=480,framerate=10/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=10/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=10/1"),
])
if match == (0x1d6c, 0x0103): # HD 2MP WEBCAM
return ("mid", [
("low", "video/x-h264,width=640,height=480,framerate=25/1"),
("mid", "video/x-h264,width=1280,height=720,framerate=25/1"),
("high", "video/x-h264,width=1920,height=1080,framerate=25/1"),
])
if match == (0x0c45, 0x636d): # AUKEY PC-LM1E
return ("mid", [
("low", "image/jpeg,width=640,height=480,pixel-aspect-ratio=1/1,framerate=30/1"),
("mid", "image/jpeg,width=864,height=480,pixel-aspect-ratio=1/1,framerate=30/1"),
("high", "image/jpeg,width=1280,height=1024,pixel-aspect-ratio=1/1,framerate=30/1"),
])
self.logger.warning(
"Unkown USB video device {:04x}:{:04x}, using fallback pipeline."
.format(*match))
return ("mid", [
("low", "image/jpeg,width=640,height=480,framerate=30/1"),
("mid", "image/jpeg,width=1280,height=720,framerate=30/1"),
("high", "image/jpeg,width=1920,height=1080,framerate=30/1"),
])
def select_caps(self, hint=None):
default, variants = self.METHOD_NAME()
variant = hint if hint else default
for name, caps in variants:
if name == variant:
return caps
raise InvalidConfigError(
f"Unknown video format {variant} for device {self.video.vendor_id:04x}:{self.video.model_id:04x}" # pylint: disable=line-too-long
)
def get_pipeline(self, path, caps, controls=None):
match = (self.video.vendor_id, self.video.model_id)
if match == (0x046d, 0x082d):
controls = controls or "focus_auto=1"
inner = "h264parse"
elif match == (0x046d, 0x0892):
controls = controls or "focus_auto=1"
inner = None
elif match == (0x046d, 0x08e5):
controls = controls or "focus_auto=1"
inner = None
elif match == (0x1224, 0x2825): # LogiLink UA0371
inner = None # just forward the jpeg frames
elif match == (0x05a3, 0x9331): # WansView Webcam 102
inner = "h264parse"
elif match == (0x534d, 0x2109):
inner = None # just forward the jpeg frames
elif match == (0x1d6c, 0x0103):
controls = controls or "focus_auto=1"
inner = "h264parse"
elif match == (0x0c54, 0x636d):
controls = controls or "focus_auto=1"
inner = None # just forward the jpeg frames
else: # fallback pipeline
inner = None # just forward the jpeg frames
pipeline = f"v4l2src device={path} "
if controls:
pipeline += f"extra-controls=c,{controls} "
pipeline += f"! {caps} "
if inner:
pipeline += f"! {inner} "
pipeline += "! matroskamux streamable=true ! fdsink"
return pipeline
@Driver.check_active
def stream(self, caps_hint=None, controls=None):
caps = self.select_caps(caps_hint)
pipeline = self.get_pipeline(self.video.path, caps, controls)
tx_cmd = self.video.command_prefix + ["gst-launch-1.0", "-q"]
tx_cmd += pipeline.split()
rx_cmd = ["gst-launch-1.0"]
rx_cmd += "playbin uri=fd://0".split()
tx = subprocess.Popen(
tx_cmd,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
)
rx = subprocess.Popen(
rx_cmd,
stdin=tx.stdout,
stdout=subprocess.DEVNULL,
)
# wait until one subprocess has terminated
while True:
try:
tx.wait(timeout=0.1)
break
except subprocess.TimeoutExpired:
pass
try:
rx.wait(timeout=0.1)
break
except subprocess.TimeoutExpired:
pass
rx.terminate()
tx.terminate()
rx.communicate()
tx.communicate()
|
2,288 |
single lane gui class
|
from __future__ import absolute_import
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from ilastik.applets.base.standardApplet import StandardApplet
from .opPixelClassification import OpPixelClassification
from .pixelClassificationSerializer import PixelClassificationSerializer, Ilastik05ImportDeserializer
class PixelClassificationApplet(StandardApplet):
"""
Implements the pixel classification "applet", which allows the ilastik shell to use it.
"""
def __init__(self, workflow, projectFileGroupName):
self._topLevelOperator = OpPixelClassification(parent=workflow)
def on_classifier_changed(slot, roi):
if (
self._topLevelOperator.classifier_cache.Output.ready()
and self._topLevelOperator.classifier_cache.fixAtCurrent.value is True
and self._topLevelOperator.classifier_cache.Output.value is None
):
# When the classifier is deleted (e.g. because the number of features has changed,
# then notify the workflow. (Export applet should be disabled.)
self.appletStateUpdateRequested()
self._topLevelOperator.classifier_cache.Output.notifyDirty(on_classifier_changed)
super(PixelClassificationApplet, self).__init__("Training")
# We provide two independent serializing objects:
# one for the current scheme and one for importing old projects.
self._serializableItems = [
PixelClassificationSerializer(
self._topLevelOperator, projectFileGroupName
), # Default serializer for new projects
Ilastik05ImportDeserializer(self._topLevelOperator),
] # Legacy (v0.5) importer
self._gui = None
# GUI needs access to the serializer to enable/disable prediction storage
self.predictionSerializer = self._serializableItems[0]
# FIXME: For now, we can directly connect the progress signal from the classifier training operator
# directly to the applet's overall progress signal, because it's the only thing we report progress for at the moment.
# If we start reporting progress for multiple tasks that might occur simulatneously,
# we'll need to aggregate the progress updates.
self._topLevelOperator.opTrain.progressSignal.subscribe(self.progressSignal)
def getMultiLaneGui(self):
"""
Override from base class. The label that is initially selected needs to be selected after volumina knows
the current layer stack. Which is only the case when the gui objects LayerViewerGui.updateAllLayers run at
least once after object init.
"""
from .pixelClassificationGui import PixelClassificationGui # Prevent imports of QT classes in headless mode
multi_lane_gui = super(PixelClassificationApplet, self).getMultiLaneGui()
guis = multi_lane_gui.getGuis()
if len(guis) > 0 and isinstance(guis[0], PixelClassificationGui) and not guis[0].isInitialized:
guis[0].selectLabel(0)
guis[0].isInitialized = True
return multi_lane_gui
@property
def topLevelOperator(self):
return self._topLevelOperator
@property
def dataSerializers(self):
return self._serializableItems
@property
def METHOD_NAME(self):
from .pixelClassificationGui import PixelClassificationGui # prevent imports of QT classes in headless mode
return PixelClassificationGui
|
2,289 |
list all components of languageand type
|
import glob
import json
import logging
import os
import sys
from dataclasses import dataclass
import nlu
COMPONENT_INFO_FILE_NAME = 'component_infos.json'
logger = logging.getLogger('nlu')
class AllComponentsInfo:
def __init__(self):
''' Initialize every NLU component_to_resolve info object and provide access to them'''
self.all_components = {}
self.classifiers = {}
self.embeddings = {}
self.normalizers = {}
self.pretrained_pipelines = {}
self.selectors = {}
self.spell_checkers = {}
self.stemmers = {}
self.tokenizers = {}
self.utils = {}
self.all_multi_lang_base_ner_languages = ['en', 'fr', 'de', 'it', 'pl', 'pt', 'ru', 'es']
self.all_multi_lang_xtreme_ner_languages = ['af', 'ar', 'bg', 'bn', 'de', 'el', 'en', 'es', 'et', 'eu', 'fa',
'fi', 'fr', 'he', 'hi', 'hu', 'id', 'it', 'ja', 'jv', 'ka', 'kk',
'ko', 'ml', 'mr', 'ms', 'my', 'nl', 'pt', 'ru', 'sw', 'ta', 'te',
'th', 'tl', 'tr', 'ur', 'vi', 'yo', 'zh']
self.all_right_to_left_langs_with_pretrained_tokenizer = ['zh', 'ko', 'ja']
self.all_pretrained_pipe_languages = ['en', 'nl', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'ru', 'es', 'xx', ]
self.all_pretrained_model_languages = ['vi', 'mt', 'ta', 'af', 'cy', 'et', 'bh', 'am', 'da', 'fr', 'de', 'it',
'nb', 'no', 'nn', 'pl', 'pt', 'ru', 'es', 'af', 'ar', 'hy', 'eu', 'bn',
'br', 'bg', 'ca', 'cs', 'eo', 'fi', 'gl', 'el', 'ha', 'he', 'hi', 'hu',
'id', 'ga', 'ja', 'la', 'lv', 'mr', 'fa', 'ro', 'sk', 'sl', 'so', 'st',
'sw', 'sv', 'th', 'tr', 'uk', 'yo', 'zu', 'zh', 'xx', 'ur', 'ko',
'yi', 'uk', 'te', 'ta', 'sd', 'pa', 'ne', 'ml', 'mr', 'kn', 'id', 'gu',
'bs',
'ig', 'lg', 'lou', 'pcm', 'wo', 'rw', 'is',
] + self.all_multi_lang_xtreme_ner_languages
self.all_languages = set(self.all_pretrained_pipe_languages).union(set(self.all_pretrained_model_languages))
self.all_classifier_classes = []
# this maps a requested token to a class
self.all_nlu_actions = ['tokenize', 'pos', 'ner', 'embed', 'classify', 'sentiment', 'emotion', 'spell',
'dependency', 'dep', 'dep.untyped', 'match', 'sentence_detector', 'spell', 'stopwords'
'labled_dependency',
'lemma', 'norm', 'select', 'pretrained_pipe', 'util', 'embed_sentence', 'embed_chunk',
'ngram']
# all_component_paths_regex = nlu.nlu_package_location + 'components/*/*/'
# all_component_paths = glob.glob(all_component_paths_regex)
# for path in all_component_paths:
# if '__py' in path: continue
# # logger.info('Loading info dict @ path'+ path)
# component = ComponentInfo.from_directory(path)
# self.all_components[component.name] = component
# if component.type == 'classifier': self.classifiers[component.name] = component
# if component.type == 'embedding': self.embeddings[component.name] = component
# if component.type == 'normalizer': self.normalizers[component.name] = component
# if component.type == 'pretrained_pipeline': self.pretrained_pipelines[component.name] = component
# if component.type == 'selector': self.selectors[component.name] = component
# if component.type == 'spell_checker': self.spell_checkers[component.name] = component
# if component.type == 'stemmer': self.stemmers[component.name] = component
# if component.type == 'tokenizer': self.tokenizers[component.name] = component
# if component.type == 'util': self.utils[component.name] = component
def list_all_components(self):
print("--------------Avaiable Components in NLU :--------------")
for name in self.all_components.keys(): print(name)
def get_component_info_by_name(self, name):
return self.all_components[name]
def list_all_components_of_type(self, component_type='embeddings'):
pass
@staticmethod
def list_all_components_of_language(component_lang='ger'):
pass
@staticmethod
def METHOD_NAME(component_lang='ger', component_type='embeddings'):
pass
@staticmethod
def get_default_component_of_type():
pass
@staticmethod
def list_avaiable_output_types():
pass
@staticmethod
def get_all_component_info_obj():
pass
@dataclass
class ComponentInfo:
name: str
description: str # general annotator/model_anno_obj/component_to_resolve/pipeline info
outputs: list # this is which columns/output types this component_to_resolve is providing
inputs: list # this tells us which columns/input types the component_to_resolve is depending on
type: str # this tells us which kind of component_to_resolve this is
output_level: str # document, sentence, token, chunk, input_dependent or model_dependent
spark_input_column_names: list # default expected name for input columns when forking with spark nlp annotators on spark DFs
spark_output_column_names: list # default expected name for output columns when forking with spark nlp annotators on spark DFs
provider: str # Who provides the implementation of this annotator, Spark-NLP for base. Would be
license: str # open source or private
computation_context: str # Will this component_to_resolve do its computation in Spark land (like all of Spark NLP annotators do) or does it require some other computation engine or library like Tensorflow, Numpy, HuggingFace, etc..
output_context: str # Will this components final result
trainable: bool
@classmethod
def from_directory(cls, component_info_dir):
"""Create ComponentInfo class from the component_infos.json which is provided for every component_to_resolve
@param component_info_dir:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not component_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
component_info_dir = component_info_dir.replace('//', '/')
with open(os.path.join(component_info_dir, COMPONENT_INFO_FILE_NAME), "r", encoding="utf8") as f:
dataset_info_dict = json.load(f)
try:
return cls(**dataset_info_dict) # dataset_info_dict
except:
print(" Exception Occured! For Path", component_info_dir,
" Json file most likely has missing features. Todo nicer output error info", sys.exc_info()[0])
raise
|
2,290 |
on kill
|
"""
Killing KILL_REQUIREMENT other players in under TIME_REQUIREMENT seconds
sends you into a rampage lasting RAMPAGE_DURATION seconds.
The rampage refills and drastically increases your weapons' rate of fire.
By default this means 3 kills in under 8 seconds to activate. For reference,
lines disappear from the killfeed 10 seconds after they appear.
Intended for use in frantic last team standing or free for all matches.
.. codeauthor:: hompy
"""
from collections import deque
from twisted.internet.reactor import callLater, seconds
from twisted.internet.task import LoopingCall
from pyspades import contained as loaders
from pyspades.common import make_color
from pyspades.constants import (
GRENADE_KILL, RIFLE_WEAPON, SMG_WEAPON, SHOTGUN_WEAPON)
KILL_REQUIREMENT = 3
TIME_REQUIREMENT = 8.0
GRENADE_KILLS_COUNT = True
RAMPAGE_REFILLS = True
RAMPAGE_RELOADS = True
RAMPAGE_DURATION = 20.0
RAPID_INTERVALS = {
RIFLE_WEAPON: 0.16,
SMG_WEAPON: 0.08,
SHOTGUN_WEAPON: 0.18
}
RAMPAGE_FOG_COLOR = (255, 0, 0)
def RAMPAGE_FOG_FUNC():
return RAMPAGE_FOG_COLOR
ANNOUNCE_RAMPAGE = True
S_RAMPAGE_START = '{player} IS ON A RAMPAGE!!'
S_RAMPAGE_KILLED = "{victim}'s rampage was ended by {killer}"
def resend_tool(player):
set_tool = loaders.SetTool()
set_tool.player_id = player.player_id
set_tool.value = player.tool
if player.weapon_object.shoot:
player.protocol.broadcast_contained(set_tool)
else:
player.send_contained(set_tool)
def rapid_cycle(player):
resend_tool(player)
if not player.weapon_object.shoot:
player.rampage_rapid_loop.stop()
def send_fog(player, color):
fog_color = loaders.FogColor()
fog_color.color = make_color(*color)
player.send_contained(fog_color)
def fog_switch(player, colorgetter_a, colorgetter_b):
if player.rampage:
send_fog(player, colorgetter_a())
player.rampage_warning_call = callLater(0.5, fog_switch, player,
colorgetter_b, colorgetter_a)
def apply_script(protocol, connection, config):
class RampageConnection(connection):
rampage = False
rampage_rapid_loop = None
rampage_call = None
rampage_warning_call = None
rampage_kills = None
rampage_reenable_rapid_hack_detect = None
def start_rampage(self):
self.rampage = True
self.rampage_kills.clear()
self.rampage_reenable_rapid_hack_detect = self.rapid_hack_detect
self.rapid_hack_detect = False
self.rampage_call = callLater(RAMPAGE_DURATION, self.end_rampage)
if RAMPAGE_DURATION > 4.0:
self.rampage_warning_call = callLater(
RAMPAGE_DURATION - 3.0,
fog_switch,
self,
self.protocol.get_fog_color,
RAMPAGE_FOG_FUNC)
if RAMPAGE_REFILLS:
self.refill()
if RAMPAGE_RELOADS:
weapon = self.weapon_object
was_shooting = weapon.shoot
weapon.reset()
weapon_reload = loaders.WeaponReload()
weapon_reload.player_id = self.player_id
weapon_reload.clip_ammo = weapon.current_ammo
weapon_reload.reserve_ammo = weapon.current_stock
weapon.set_shoot(was_shooting)
self.send_contained(weapon_reload)
send_fog(self, RAMPAGE_FOG_COLOR)
if ANNOUNCE_RAMPAGE:
message = S_RAMPAGE_START.format(player=self.name)
self.protocol.broadcast_chat(message, global_message=None)
def end_rampage(self):
self.rampage = False
self.rapid_hack_detect = self.rampage_reenable_rapid_hack_detect
if self.rampage_call and self.rampage_call.active():
self.rampage_call.cancel()
self.rampage_call = None
if (self.rampage_warning_call and
self.rampage_warning_call.active()):
self.rampage_warning_call.cancel()
self.rampage_warning_call = None
if self.rampage_rapid_loop and self.rampage_rapid_loop.running:
self.rampage_rapid_loop.stop()
send_fog(self, self.protocol.fog_color)
def on_connect(self):
self.rampage_rapid_loop = LoopingCall(rapid_cycle, self)
self.rampage_kills = deque(maxlen=KILL_REQUIREMENT)
connection.on_connect(self)
def on_disconnect(self):
if self.rampage:
self.end_rampage()
self.rampage_rapid_loop = None
connection.on_disconnect(self)
def on_reset(self):
if self.rampage:
self.end_rampage()
connection.on_reset(self)
def METHOD_NAME(self, killer, type, grenade):
was_rampaging = self.rampage
if self.rampage:
self.end_rampage()
if killer is not None and killer is not self:
if was_rampaging and ANNOUNCE_RAMPAGE:
message = S_RAMPAGE_KILLED.format(victim=self.name,
killer=killer.name)
self.protocol.broadcast_chat(message, global_message=None)
if (not killer.rampage and killer.hp and
killer.team is not self.team and
(GRENADE_KILLS_COUNT or type != GRENADE_KILL)):
now = seconds()
killer.rampage_kills.append(now)
if (len(killer.rampage_kills) == KILL_REQUIREMENT and
killer.rampage_kills[0] >= now - TIME_REQUIREMENT):
killer.start_rampage()
return connection.METHOD_NAME(self, killer, type, grenade)
def on_grenade_thrown(self, grenade):
if self.rampage:
resend_tool(self)
connection.on_grenade_thrown(self, grenade)
def on_shoot_set(self, fire):
if (self.rampage and fire and
self.rampage_rapid_loop and
not self.rampage_rapid_loop.running):
interval = RAPID_INTERVALS[self.weapon]
self.rampage_rapid_loop.start(interval, now=False)
connection.on_shoot_set(self, fire)
def send_fog_rule(player):
return not player.rampage
class RampageProtocol(protocol):
def set_fog_color(self, color):
self.fog_color = color
fog_color = loaders.FogColor()
fog_color.color = make_color(*color)
self.broadcast_contained(fog_color, save=True, rule=send_fog_rule)
return RampageProtocol, RampageConnection
|
2,291 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetReplicationProtectionContainerMappingResult',
'AwaitableGetReplicationProtectionContainerMappingResult',
'get_replication_protection_container_mapping',
'get_replication_protection_container_mapping_output',
]
@pulumi.output_type
class GetReplicationProtectionContainerMappingResult:
"""
Protection container mapping object.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ProtectionContainerMappingPropertiesResponse':
"""
The custom data.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationProtectionContainerMappingResult(GetReplicationProtectionContainerMappingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationProtectionContainerMappingResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
METHOD_NAME=self.METHOD_NAME)
def get_replication_protection_container_mapping(fabric_name: Optional[str] = None,
mapping_name: Optional[str] = None,
protection_container_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationProtectionContainerMappingResult:
"""
Gets the details of a protection container mapping.
:param str fabric_name: Fabric name.
:param str mapping_name: Protection Container mapping name.
:param str protection_container_name: Protection container name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['mappingName'] = mapping_name
__args__['protectionContainerName'] = protection_container_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20230601:getReplicationProtectionContainerMapping', __args__, opts=opts, typ=GetReplicationProtectionContainerMappingResult).value
return AwaitableGetReplicationProtectionContainerMappingResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_replication_protection_container_mapping)
def get_replication_protection_container_mapping_output(fabric_name: Optional[pulumi.Input[str]] = None,
mapping_name: Optional[pulumi.Input[str]] = None,
protection_container_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReplicationProtectionContainerMappingResult]:
"""
Gets the details of a protection container mapping.
:param str fabric_name: Fabric name.
:param str mapping_name: Protection Container mapping name.
:param str protection_container_name: Protection container name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
...
|
2,292 |
slice indices
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data import FairseqDataset, plasma_utils
from fairseq.data.indexed_dataset import best_fitting_int_dtype
class TokenBlockDataset(FairseqDataset):
"""Break a Dataset of tokens into blocks.
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
block_size (int): maximum block size (ignored in 'eos' break mode)
break_mode (str, optional): Mode used for breaking tokens. Values can
be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'complete_doc': similar to 'complete' mode, but do not
cross document boundaries
- 'eos': each block contains one sentence (block_size is ignored)
include_targets (bool, optional): return next tokens as targets
(default: False).
document_sep_len (int, optional): document separator size (required for
'complete_doc' break mode). Typically 1 if the sentences have eos
and 0 otherwise.
"""
def __init__(
self,
dataset,
sizes,
block_size,
pad,
eos,
break_mode=None,
include_targets=False,
document_sep_len=1,
):
try:
from fairseq.data.token_block_utils_fast import (
_get_slice_indices_fast,
_get_block_to_dataset_index_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: `pip install --editable .` "
"or `python setup.py build_ext --inplace`"
)
super().__init__()
self.dataset = dataset
self.pad = pad
self.eos = eos
self.include_targets = include_targets
assert len(dataset) == len(sizes)
assert len(dataset) > 0
if isinstance(sizes, list):
sizes = np.array(sizes, dtype=np.int64)
else:
if torch.is_tensor(sizes):
sizes = sizes.numpy()
sizes = sizes.astype(np.int64)
break_mode = break_mode if break_mode is not None else "none"
# For "eos" break-mode, block_size is not required parameters.
if break_mode == "eos" and block_size is None:
block_size = 0
METHOD_NAME = _get_slice_indices_fast(
sizes, str(break_mode), block_size, document_sep_len
)
self._sizes = METHOD_NAME[:, 1] - METHOD_NAME[:, 0]
# build index mapping block indices to the underlying dataset indices
if break_mode == "eos":
# much faster version for eos break mode
block_to_dataset_index = np.stack(
[
np.arange(len(sizes)), # starting index in dataset
np.zeros(
len(sizes), dtype=np.compat.long
), # starting offset within starting index
np.arange(len(sizes)), # ending index in dataset
],
1,
)
else:
block_to_dataset_index = _get_block_to_dataset_index_fast(
sizes, METHOD_NAME,
)
size_dtype = np.uint16 if block_size < 65535 else np.uint32
slice_indices_dtype = best_fitting_int_dtype(METHOD_NAME[-1].max())
self._slice_indices = plasma_utils.PlasmaArray(
METHOD_NAME.astype(slice_indices_dtype)
)
self._sizes = plasma_utils.PlasmaArray(self._sizes.astype(size_dtype))
self._block_to_dataset_index = plasma_utils.PlasmaArray(
block_to_dataset_index.astype(slice_indices_dtype)
)
@property
def METHOD_NAME(self):
return self._slice_indices.array
@property
def sizes(self):
return self._sizes.array
@property
def block_to_dataset_index(self):
return self._block_to_dataset_index.array
def attr(self, attr: str, index: int):
start_ds_idx, _, _ = self.block_to_dataset_index[index]
return self.dataset.attr(attr, start_ds_idx)
def __getitem__(self, index):
start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index]
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
slice_s, slice_e = self.METHOD_NAME[index]
length = slice_e - slice_s
s, e = start_offset, start_offset + length
item = buffer[s:e]
if self.include_targets:
# *target* is the original sentence (=item)
# *source* is shifted right by 1 (maybe left-padded with eos)
# *past_target* is shifted right by 2 (left-padded as needed)
if s == 0:
source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]])
past_target = torch.cat(
[item.new([self.pad, self.eos]), buffer[0 : e - 2]]
)
else:
source = buffer[s - 1 : e - 1]
if s == 1:
past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]])
else:
past_target = buffer[s - 2 : e - 2]
return source, item, past_target
return item
def __len__(self):
return len(self.METHOD_NAME)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(
{
ds_idx
for index in indices
for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]]
for ds_idx in range(start_ds_idx, end_ds_idx + 1)
}
)
|
2,293 |
read file
|
#!/usr/bin/env python
import argparse
import sys
import os
from . import config
from . import rule_list
from . import vhdlFile
def parse_command_line_arguments():
'''Parses the command line arguments and returns them.'''
parser = argparse.ArgumentParser(
prog='VHDL Style Guide (VSG) Parser',
description='''Outputs formatted rule documentation.''')
return parser.parse_args()
def main():
'''Main routine of parser output'''
fExitStatus = 0
commandLineArguments = parse_command_line_arguments()
create_rule_documentation()
sys.exit(fExitStatus)
def create_rule_documentation():
oRuleList = build_rule_list()
dRules = build_rule_dictionary(oRuleList)
lRuleNames = get_names_of_rule_classes(oRuleList)
for sRuleName in lRuleNames:
build_rule_class_doc(sRuleName, dRules)
def build_rule_class_doc(sRuleName, dRules):
# for sRuleName in lRuleName:
lRuleClassDoc = []
lRuleClassDoc.append('.. include:: includes.rst')
lRuleClassDoc.extend(blank_line())
if sRuleName == 'context_ref':
sTitle = 'Context Reference Rules'
elif sRuleName == 'exit_statement':
sTitle = 'Exit Rules'
else:
sTitle = (sRuleName.title() + ' Rules').replace('_', ' ')
lRuleClassDoc.append(sTitle)
lRuleClassDoc.append('-'*len(sTitle))
lRuleClassDoc.extend(blank_line())
lRuleClassDoc.extend(import_preamble_doc(sRuleName))
lRuleClassDoc.extend(do_something(list(dRules[sRuleName])))
write_file(f'{sRuleName}_rules.rst', lRuleClassDoc)
def import_preamble_doc(sRuleName):
lReturn = []
if sRuleName == 'range':
sFileName = f'vsg/rules/ranges/preamble_doc.rst'
else:
sFileName = f'vsg/rules/{sRuleName}/preamble_doc.rst'
if os.path.exists(sFileName):
lReturn = METHOD_NAME(sFileName)
lReturn.extend(blank_line())
return lReturn
def build_rule_list():
oVhdlFile = vhdlFile.vhdlFile([''])
oConfig = config.config()
return rule_list.rule_list(oVhdlFile, oConfig)
def build_rule_dictionary(oRuleList):
lRuleName = []
dRules = {}
for oRule in oRuleList.rules:
if oRule.name not in lRuleName:
lRuleName.append(oRule.name)
dRules[oRule.name] = []
dRules[oRule.name].append(oRule)
return dRules
def get_names_of_rule_classes(oRuleList):
lRuleName = []
for oRule in oRuleList.rules:
if oRule.name not in lRuleName:
lRuleName.append(oRule.name)
return lRuleName
def do_something(lRules):
lRuleDoc = []
for oRule in lRules:
lRuleDoc.extend(generate_rule_header(oRule))
lRuleDoc.extend(blank_line())
if not oRule.deprecated and not oRule.proposed:
lRuleDoc.extend(generate_icons(oRule))
lRuleDoc.extend(blank_line())
lRuleDoc.extend(add_doc_string(oRule))
return lRuleDoc
def add_doc_string(oRule):
lReturn = []
sReturn = oRule.__doc__
iFirstCharacter = find_index_of_first_character(sReturn)
sReturn = sReturn[iFirstCharacter:]
sReturn = sReturn.replace('\n' + ' '*(iFirstCharacter-1), '\n')
sReturn = sReturn.replace('\n' + ' '*(iFirstCharacter-2) + '\n', '\n\n')
sReturn = sReturn.replace('[Violation]', '**Violation**\n\n.. code-block:: vhdl')
sReturn = sReturn.replace('[Fix]', '**Fix**\n\n.. code-block:: vhdl')
lReturn.append(sReturn)
return lReturn
def find_index_of_first_character(sReturn):
for iChar, sChar in enumerate(sReturn):
if sChar != ' ' and sChar != '\n':
return iChar
return None
def generate_rule_header(oRule):
lReturn = []
lReturn.append(oRule.unique_id)
lReturn.append('#'*len(oRule.unique_id))
return lReturn
def blank_line():
lReturn = []
lReturn.append('')
return lReturn
def write_file(sFilename, lLines):
with open(sFilename, 'w') as oFile:
for sLine in lLines:
oFile.write(sLine + '\n')
def METHOD_NAME(sFileName):
lLines = []
with open(sFileName) as oFile:
for sLine in oFile:
lLines.append(sLine.rstrip())
return lLines
def generate_icons(oRule):
sIcons = ''
sIcons += create_phase_icon(oRule)
sIcons += create_disabled_icon(oRule)
sIcons += create_severity_icon(oRule)
sIcons += create_group_icons(oRule)
return [sIcons]
def create_phase_icon(oRule):
return '|phase_' + str(oRule.phase) + '|'
def create_disabled_icon(oRule):
sReturn = ''
if oRule.disable:
sReturn += (' ')
sReturn += ('|disabled|')
return sReturn
def create_severity_icon(oRule):
sReturn = ' '
sReturn += ('|' + oRule.severity.name.lower() + '|')
return sReturn
def create_group_icons(oRule):
sReturn = ''
for sGroup in oRule.groups:
sReturn += ' '
sReturn += '|' + sGroup.replace('::', '_') + '|'
return sReturn
if __name__ == '__rule_doc_gen__':
main()
|
2,294 |
build
|
#!/usr/bin/env python3
import argparse
import os
import shutil
import requests
import sys
import json
from pathlib import Path
## Configure command-line options
parser = argparse.ArgumentParser()
parser.add_argument('action', help="Action to execute", choices=["build", "push", "delete"], default="build")
parser.add_argument('--organisation', help="Organisation, e.g. opencb", default="opencb")
parser.add_argument('--images', help="comma separated list of images to be made, e.g. app", default="app")
parser.add_argument('--tag', help="the tag for this code, e.g. v2.0.0")
parser.add_argument('--build-folder', help="the location of the build folder, if not default location")
parser.add_argument('--username', help="credentials for dockerhub (REQUIRED if deleting from DockerHub)")
parser.add_argument('--password', help="credentials for dockerhub (REQUIRED if deleting from DockerHub)")
## Some ANSI colors to print shell output
shell_colors = {
'red': '\033[91m',
'green': '\033[92m',
'blue': '\033[94m',
'magenta': '\033[95m',
'bold': '\033[1m',
'reset': '\033[0m'
}
def error(message):
sys.stderr.write(shell_colors['red'] + 'ERROR: %s\n' % message + shell_colors['reset'])
sys.exit(2)
def run(command):
print(shell_colors['bold'] + command + shell_colors['reset'])
code = os.system(command)
if code != 0:
error("Error executing: " + command)
def print_header(str):
print(shell_colors['magenta'] + "*************************************************" + shell_colors['reset'])
print(shell_colors['magenta'] + str + shell_colors['reset'])
print(shell_colors['magenta'] + "*************************************************" + shell_colors['reset'])
def package_json():
basedir = str(Path(__file__).resolve().parents[1])
p = Path(basedir + "/package.json")
with open(p, "r") as package_json:
data=package_json.read()
return json.loads(data)
# def login(loginRequired=False):
# if args.username is None or args.password is None:
# if loginRequired:
# error("Username and password are required")
# else:
# return
#
# code = os.system("docker login -u " + args.username + " --password " + args.password)
# if code != 0:
# error("Error executing: docker login")
def METHOD_NAME():
print_header('Building docker images: ' + ', '.join(images))
## IMPORTANT: we cannot build Docker images using directories outside the file context.
## A simple solution is to copy 'custom-sites' into 'build' folder and the run 'docker build' from there.
if os.path.exists('custom-sites'):
print(shell_colors['blue'] + "Copying 'custom-sites' folder into 'build' ...\n" + shell_colors['reset'])
shutil.rmtree("build/custom-sites", ignore_errors=True)
shutil.copytree("custom-sites", "build/custom-sites")
for image in images:
if image == "app":
site = "src/sites"
else:
## We must create 'img' if not exist, otherwise Dockerfile build will fail
Path("build/custom-sites/" + image + "/iva/img").mkdir(exist_ok=True)
site = "build/custom-sites" + "/" + image
print(shell_colors['blue'] + "Building " + organisation + "/iva-" + image + ":" + tag + " ..." + shell_colors['reset'])
run("docker build -t " + organisation + "/iva-" + image + ":" + tag + " --build-arg SITE=" + site + " -f " + build_folder + "/docker/iva-app/Dockerfile " + build_folder)
def tag_latest(image):
latest_tag = os.popen(("curl -s https://registry.hub.docker.com/v1/repositories/" + organisation + "/iva-" + image + "/tags"
+ " | jq -r .[].name"
+ " | grep -v latest"
+ " | sort -h"
+ " | head"))
if tag >= latest_tag.read():
print(shell_colors['blue'] + "Pushing " + organisation + "/iva-" + image + ":latest" + shell_colors['reset'])
run("docker tag " + organisation + "/iva-" + image + ":" + tag + " " + organisation + "/iva-" + image + ":latest")
run("docker push " + organisation + "/iva-" + image + ":latest")
def push():
print_header('Pushing to DockerHub: ' + ', '.join(images))
for image in images:
print()
print(shell_colors['blue'] + "Pushing " + organisation + "/iva-" + image + ":" + tag + " ..." + shell_colors['reset'])
run("docker push " + organisation + "/iva-" + image + ":" + tag)
tag_latest(image)
def delete():
print_header('Deleting from DockerHub: ' + ', '.join(images))
if args.username is None or args.password is None:
error("Username and password are required")
headers = {
'Content-Type': 'application/json',
}
data = '{"username": "' + args.username + '", "password": "' + args.password + '"}'
response = requests.post('https://hub.docker.com/v2/users/login/', headers=headers, data=data)
json_response = json.loads(response.content)
if response.status_code != 200:
error("dockerhub login failed")
for i in images:
print()
print(shell_colors['blue'] + "Deleting image on Docker hub for " + organisation + "/iva-" + i + ":" + tag + shell_colors['reset'])
headers = {
'Authorization': 'JWT ' + json_response["token"]
}
requests.delete("https://hub.docker.com/v2/repositories/" + organisation + "/iva-" + i + "/tags/" + tag + "/", headers=headers)
## Parse command-line parameters and init basedir, tag and build_folder
args = parser.parse_args()
# 1. init basedir: root of the iva repo
basedir = str(Path(__file__).resolve().parents[1])
# 2. init tag: set tag to default value if not set
if args.organisation is not None:
organisation = args.organisation
else:
organisation = "opencb"
# 2. init tag: set tag to default value if not set
if args.tag is not None:
tag = args.tag
else:
tag = package_json()["version"]
# 3. init build_folder: set build folder to default value if not set
if args.build_folder is not None:
build_folder = args.build_folder
else:
build_folder = basedir
if not os.path.isdir(build_folder):
error("Build folder does not exist: " + build_folder)
# 4. init images: get a list with all images
if args.images is None:
images = ["app"]
elif args.images == "all":
if not os.path.isdir('custom-sites'):
error("Custom sites folder does not exist (required if images is set to 'all')")
# Get all folders in 'custom-sites'
images = [d for d in os.listdir("custom-sites") if os.path.isdir(os.path.join("custom-sites", d)) and not d.startswith(".")]
else:
images = args.images.split(",")
## Execute the action
if args.action == "build":
# login(loginRequired=False)
METHOD_NAME()
elif args.action == "push":
# login(loginRequired=False)
METHOD_NAME()
push()
elif args.action == "delete":
delete()
else:
error("Unknown action: " + args.action)
|
2,295 |
setup
|
#
# Copyright (c) 2020 Alex Richardson
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory (Department of Computer Science and
# Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
# DARPA SSITH research programme.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import sys
from .cross.bbl import BuildBBLNoPayload
from .cross.cheribsd import BuildCheriBsdMfsKernel, ConfigPlatform
from .project import AutotoolsProject, BuildType, CheriConfig, DefaultInstallDir, GitRepository, MakeCommandKind
from .simple_project import SimpleProject
from ..config.compilation_targets import CompilationTargets
class BuildCheriSpike(AutotoolsProject):
target = "spike"
repository = GitRepository("https://github.com/CTSRD-CHERI/riscv-isa-sim",
default_branch="cheri", force_branch=True)
native_install_dir = DefaultInstallDir.CHERI_SDK
default_build_type = BuildType.RELEASE
lto_by_default = True
prefer_full_lto_over_thin_lto = True
lto_set_ld = False
make_kind = MakeCommandKind.GnuMake
def check_system_dependencies(self) -> None:
super().check_system_dependencies()
self.check_required_system_tool("dtc", apt="device-tree-compiler", homebrew="dtc")
def METHOD_NAME(self):
super().METHOD_NAME()
self.configure_args.append("--enable-cheri")
self.configure_args.append("--disable-rvfi-dii")
# We have to pass LDFLAGS as part of CC/CXX since the build system is dumb.
common_flags = self.default_compiler_flags + self.default_ldflags
self.configure_environment["CC"] = self.commandline_to_str([str(self.CC), *common_flags, *self.CFLAGS])
self.configure_environment["CXX"] = self.commandline_to_str([str(self.CXX), *common_flags, *self.CXXFLAGS])
@classmethod
def get_simulator_binary(cls, caller):
return cls.get_install_dir(caller, cross_target=CompilationTargets.NATIVE) / "bin/spike"
class RunCheriSpikeBase(SimpleProject):
do_not_add_to_targets = True
_bbl_xtarget = CompilationTargets.FREESTANDING_RISCV64_PURECAP
_bbl_class = BuildBBLNoPayload.get_class_for_target(_bbl_xtarget)
_source_class = None
@classmethod
def dependencies(cls, _: CheriConfig) -> "tuple[str, ...]":
return cls._source_class.target, cls._bbl_class.target, BuildCheriSpike.target
def process(self):
kernel_project = self._source_class.get_instance(self)
kernel_config = kernel_project.default_kernel_config(ConfigPlatform.QEMU)
kernel = kernel_project.get_kernel_install_path(kernel_config)
# We always want output even with --quiet
self.run_cmd([BuildCheriSpike.get_simulator_binary(self), "+payload=" + str(kernel),
self._bbl_class.get_installed_kernel_path(self, cross_target=self._bbl_xtarget)],
give_tty_control=True, stdout=sys.stdout, stderr=sys.stderr)
class RunCheriBsdSpike(RunCheriSpikeBase):
target = "run-spike"
_source_class = BuildCheriBsdMfsKernel
supported_architectures = (CompilationTargets.CHERIBSD_RISCV_PURECAP, CompilationTargets.CHERIBSD_RISCV_NO_CHERI,
CompilationTargets.CHERIBSD_RISCV_HYBRID)
|
2,296 |
test get text unit name no context
|
from unittest import mock
import unittest
from ict import GettextUtils
from ict import IctMetadataBuilder
from ict import TagsBlockEncoder
# Keep this snippet in sync with the Javascript implementation IctMetadataExtractor.js, TagsBlockDecoder.js, etc
# in this directory
# python3 -m venv ~/p3-env
# . ~/p3-env/bin/activate
# python test_ict.py
class GettextUtilsTestCase(unittest.TestCase):
def test_po_plural_form_to_cldr_form_en_US(self):
self.assertEqual('one', GettextUtils.po_plural_form_to_cldr_form(0, 'en-US'))
self.assertEqual('other', GettextUtils.po_plural_form_to_cldr_form(1, 'en-US'))
def test_po_plural_form_to_cldr_form_fr_FR(self):
self.assertEqual('one', GettextUtils.po_plural_form_to_cldr_form(0, 'fr-FR'))
self.assertEqual('other', GettextUtils.po_plural_form_to_cldr_form(1, 'fr-FR'))
def test_po_plural_form_to_cldr_form_ja_JP(self):
self.assertEqual('other', GettextUtils.po_plural_form_to_cldr_form(0, 'ja-JP'))
def test_po_plural_form_to_cldr_form_ru_RU(self):
self.assertEqual('one', GettextUtils.po_plural_form_to_cldr_form(0, 'ru-RU'))
self.assertEqual('few', GettextUtils.po_plural_form_to_cldr_form(1, 'ru-RU'))
self.assertEqual('many', GettextUtils.po_plural_form_to_cldr_form(2, 'ru-RU'))
def test_po_plural_form_to_cldr_form_unsupported(self):
self.assertEqual('one', GettextUtils.po_plural_form_to_cldr_form(0, 'unsupported'))
self.assertEqual('other', GettextUtils.po_plural_form_to_cldr_form(1, 'unsupported'))
self.assertEqual('other', GettextUtils.po_plural_form_to_cldr_form(2, 'unsupported'))
def METHOD_NAME(self):
self.assertEqual('msg', GettextUtils.get_text_unit_name('msg', None, None, 'en'))
def test_get_text_unit_name_with_context(self):
self.assertEqual('msg --- ctx', GettextUtils.get_text_unit_name('msg', 'ctx', None, 'en'))
def test_get_text_unit_name_with_plural_en_singular(self):
self.assertEqual('msg --- ctx _one', GettextUtils.get_text_unit_name('msg', 'ctx', 0, 'en'))
def test_get_text_unit_name_with_plural_en_plural(self):
self.assertEqual('msg --- ctx _other', GettextUtils.get_text_unit_name('msg', 'ctx', 1, 'en'))
def test_get_text_unit_name_with_plural_ru_plural_one(self):
self.assertEqual('msg --- ctx _one', GettextUtils.get_text_unit_name('msg', 'ctx', 0, 'ru-RU'))
def test_get_text_unit_name_with_plural_ru_plural_few(self):
self.assertEqual('msg --- ctx _few', GettextUtils.get_text_unit_name('msg', 'ctx', 1, 'ru-RU'))
def test_get_text_unit_name_with_plural_ru_plural_many(self):
self.assertEqual('msg --- ctx _many', GettextUtils.get_text_unit_name('msg', 'ctx', 2, 'ru-RU'))
class IctMetadataBuilderTestCase(unittest.TestCase):
def test_unicode_to_tags_block(self):
expected = (
'\U000e0022\U000e0063\U000e006d\U000e0056\U000e0077\U000e0062\U000e0078\U000e004e'
'\U000e0068\U000e0063\U000e0033\U000e004e\U000e006c\U000e0064\U000e0042\U000e004e'
'\U000e0030\U000e005a\U000e0058\U000e0068\U000e0030\U000e0064\U000e0057\U000e0035'
'\U000e0070\U000e0064\U000e0042\U000e004e\U000e006d\U000e0063\U000e0069\U000e0031'
'\U000e0047\U000e0055\U000e0068\U000e004e\U000e007a\U000e0064\U000e0047\U000e0046'
'\U000e006a\U000e0061\U000e0077\U000e003d\U000e003d\U000e0023translation3\U000e0024'
)
actual = IctMetadataBuilder.get_translation_with_metadata(
'repo', 'asset', 'textunit', 'fr-FR', 'stack', 'translation3'
)
# _print_actual_for_test_update(actual)
self.assertEqual(expected, actual)
class TagsBlockEncoderTestCase(unittest.TestCase):
def test_unicode_to_tags_block_basic_ascii(self):
expected = '\U000e0059\U000e0057\U000e0046\U000e0069\U000e0059\U000e006d\U000e004e\U000e006a'
actual = TagsBlockEncoder.unicode_to_tags_block('aabbcc')
# _print_actual_for_test_update(actual)
self.assertEqual(expected, actual)
def test_unicode_to_tags_block_japanese(self):
expected = (
'\U000e0035\U000e0070\U000e0065\U000e006c\U000e0035\U000e0070\U000e0079\U000e0073'
'\U000e0035\U000e005a\U000e0075\U000e0039'
)
actual = TagsBlockEncoder.unicode_to_tags_block('日本国')
# _print_actual_for_test_update(actual)
self.assertEqual(expected, actual)
def _print_actual_for_test_update(actual):
print(actual.encode('raw_unicode_escape').decode('utf-8'))
if __name__ == '__main__':
unittest.main(
|
2,297 |
setup network
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.txouts = gen_return_txouts()
def METHOD_NAME(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-printpriority=1"]))
connect_nodes(self.nodes[0], 1)
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate()
self.nodes[0].prioritisetransaction(txid, 0, -int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate()
assert(template != new_template)
if __name__ == '__main__':
PrioritiseTransactionTest().main()
|
2,298 |
test save report permission
|
import os
import re
import tempfile
import unittest
from unittest.mock import patch
from AnyQt.QtCore import Qt, QRectF
from AnyQt.QtGui import QFont, QBrush, QPixmap, QColor, QIcon
from AnyQt.QtWidgets import QGraphicsScene
from orangewidget.report.owreport import OWReport, HAVE_REPORT
from orangewidget import gui
from orangewidget.utils.itemmodels import PyTableModel
from orangewidget.widget import OWBaseWidget
from orangewidget.tests.base import GuiTest
class TstWidget(OWBaseWidget):
def send_report(self):
self.report_caption("AA")
class TestReport(GuiTest):
def test_report(self):
count = 5
rep = OWReport()
for _ in range(count):
widget = TstWidget()
widget.create_report_html()
rep.make_report(widget)
self.assertEqual(rep.table_model.rowCount(), count)
def test_report_table(self):
rep = OWReport()
model = PyTableModel([['x', 1, 2],
['y', 2, 2]])
model.setHorizontalHeaderLabels(['a', 'b', 'c'])
model.setData(model.index(0, 0), Qt.AlignHCenter | Qt.AlignTop, Qt.TextAlignmentRole)
model.setData(model.index(1, 0), QFont('', -1, QFont.Bold), Qt.FontRole)
model.setData(model.index(1, 2), QBrush(Qt.red), Qt.BackgroundRole)
view = gui.TableView()
view.show()
view.setModel(model)
rep.report_table('Name', view)
self.maxDiff = None
self.assertEqual(
rep.report_html,
"""
<h2>Name</h2><table>
<tr><th style="color:black; background:transparent; text-align:left; vertical-align:middle;">a</th>
<th style="color:black; background:transparent; text-align:left; vertical-align:middle;">b</th>
<th style="color:black; background:transparent; text-align:left; vertical-align:middle;">c</th>
</tr><tr><td style="color:black; background:transparent; text-align:center; vertical-align:top;">x</td>
<td style="color:black; background:transparent; text-align:right; vertical-align:middle;">1</td>
<td style="color:black; background:transparent; text-align:right; vertical-align:middle;">2</td>
</tr><tr><td style="color:black; background:transparent; font-weight: bold; text-align:left; vertical-align:middle;">y</td>
<td style="color:black; background:transparent; text-align:right; vertical-align:middle;">2</td>
<td style="color:black; background:#ff0000; text-align:right; vertical-align:middle;">2</td>
</tr></table>
""".strip())
def METHOD_NAME(self):
"""
Permission Error may occur when trying to save report.
GH-2147
"""
rep = OWReport()
filenames = ["f.report", "f.html"]
for filename in filenames:
with patch("orangewidget.report.owreport.open",
create=True, side_effect=PermissionError),\
patch("AnyQt.QtWidgets.QFileDialog.getSaveFileName",
return_value=(filename, 'Report (*.report)')),\
patch("AnyQt.QtWidgets.QMessageBox.exec",
return_value=True), \
patch("orangewidget.report.owreport.log.error") as log:
rep.save_report()
log.assert_called()
def test_save_report(self):
rep = OWReport()
widget = TstWidget()
widget.create_report_html()
rep.make_report(widget)
temp_dir = tempfile.mkdtemp()
temp_name = os.path.join(temp_dir, "f.report")
try:
with patch("AnyQt.QtWidgets.QFileDialog.getSaveFileName",
return_value=(temp_name, 'Report (*.report)')), \
patch("AnyQt.QtWidgets.QMessageBox.exec",
return_value=True):
rep.save_report()
finally:
os.remove(temp_name)
os.rmdir(temp_dir)
@patch("AnyQt.QtWidgets.QFileDialog.getSaveFileName",
return_value=(False, 'HTML (*.html)'))
def test_save_report_formats(self, mock):
rep = OWReport()
widget = TstWidget()
widget.create_report_html()
rep.make_report(widget)
rep.report_view = False
rep.save_report()
formats = mock.call_args_list[-1][0][-1].split(';;')
self.assertEqual(["Report (*.report)"], formats)
rep.report_view = True
rep.save_report()
formats = mock.call_args_list[-1][0][-1].split(';;')
self.assertEqual(['HTML (*.html)', 'PDF (*.pdf)', 'Report (*.report)'], formats)
def test_show_webengine_warning_only_once(self):
rep = OWReport()
widget = TstWidget()
with patch("AnyQt.QtWidgets.QMessageBox.critical", return_value=True) as p:
widget.show_report()
self.assertEqual(0 if HAVE_REPORT else 1, len(p.call_args_list))
widget.show_report()
self.assertEqual(0 if HAVE_REPORT else 1, len(p.call_args_list))
def test_disable_saving_empty(self):
"""Test if save and print buttons are disabled on empty report"""
rep = OWReport()
self.assertFalse(rep.save_button.isEnabled())
self.assertFalse(rep.print_button.isEnabled())
widget = TstWidget()
widget.create_report_html()
rep.make_report(widget)
self.assertTrue(rep.save_button.isEnabled())
self.assertTrue(rep.print_button.isEnabled())
rep.clear()
self.assertFalse(rep.save_button.isEnabled())
self.assertFalse(rep.print_button.isEnabled())
def test_report_table_with_images(self):
def basic_icon():
pixmap = QPixmap(15, 15)
pixmap.fill(QColor("red"))
return QIcon(pixmap)
def basic_scene():
scene = QGraphicsScene()
scene.addRect(QRectF(0, 0, 100, 100));
return scene
rep = OWReport()
model = PyTableModel([['x', 1, 2]])
model.setHorizontalHeaderLabels(['a', 'b', 'c'])
model.setData(model.index(0, 1), basic_icon(), Qt.DecorationRole)
model.setData(model.index(0, 2), basic_scene(), Qt.DisplayRole)
view = gui.TableView()
view.show()
view.setModel(model)
rep.report_table('Name', view)
self.assertIsNotNone(
re.search('<img(.*) src="data:image/png;base64,', rep.report_html))
if __name__ == "__main__":
unittest.main()
|
2,299 |
supported extensions
|
# -*- coding: utf-8 -*-
# Copyright © 2016-2019, Chris Warrick.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Post scanner for package indexes."""
from __future__ import unicode_literals
import glob
import sys
import os
from nikola import utils
from nikola.post import Post
from nikola.plugin_categories import PostScanner
LOGGER = utils.get_logger('pkgindex_scan', utils.STDERR_HANDLER)
class PackageIndexScanner(PostScanner):
"""Scanner for package indexes."""
name = "pkgindex_scan"
def scan(self):
"""Scan posts in a package index."""
if 'PKGINDEX_CONFIG' not in self.site.config:
return []
config = self.site.config['PKGINDEX_CONFIG']
compiler = self.site.get_compiler('sample' + config['extension'])
if not self.site.quiet:
print("Scanning package index posts...", end='', file=sys.stderr)
timeline = []
self.site.pkgindex_entries = {}
self.site.pkgindex_by_name = {}
self.site.pkgindex_multiver = {}
for topdir, dirsettings in self.site.config['PKGINDEX_DIRS'].items():
destination, template_name = dirsettings
self.site.pkgindex_entries[topdir] = []
for pkgdir in glob.glob(topdir + "/*"):
if not os.path.isdir(pkgdir):
# Ignore non-directories
continue
post = Post(
os.path.join(pkgdir, 'README.md'),
self.site.config,
destination,
False,
self.site.MESSAGES,
template_name,
compiler
)
try:
post._is_two_file = True
except AttributeError:
post.is_two_file = True
for d in post.meta.values():
d['is_special_entry'] = False
timeline.append(post)
self.site.pkgindex_entries[topdir].append(post)
self._update_name_multiver(post)
if 'special_entries' in config:
for source_path, destination, template_name, topdir in config['special_entries']:
post = Post(
source_path,
self.site.config,
destination,
False,
self.site.MESSAGES,
template_name,
compiler
)
try:
post._is_two_file = True
except AttributeError:
post.is_two_file = True
for d in post.meta.values():
d['is_special_entry'] = True
timeline.append(post)
self.site.pkgindex_entries[topdir].append(post)
self._update_name_multiver(post)
# But wait, we need to change tags on multiver stuff!
# This is kinda... hacky...
maxver = config['versions_supported'][-1]
for versions in self.site.pkgindex_multiver.values():
versions = sorted(versions, key=lambda post: post.meta('dirver'))
v2p = {}
for post in versions:
dirver = post.meta('dirver')
for v in range(dirver, maxver + 1):
v2p[v] = post
p2v = {}
for v, p in v2p.items():
if p in p2v:
p2v[p].append(v)
else:
p2v[p] = [v]
for post, versions in p2v.items():
# And finally, update tags.
tags = post._tags[self.site.default_lang]
tags = [i for i in tags if not (i.startswith('v') and i[1:].isdigit())]
tags += ['v{0}'.format(i) for i in versions]
tags.append('multiver')
post._tags[self.site.default_lang] = tags
post.meta['en']['tags'] = tags
post.meta['en']['multiver'] = True
post.meta['en']['allver'] = versions
if not post.meta['en']['maxver'] and versions[-1] != maxver:
post.meta['en']['maxver'] = versions[-1]
# And generate self.site.pkgindex_by_version
self.site.pkgindex_by_version = {i: [] for i in config['versions_supported']}
for l in self.site.pkgindex_entries.values():
for post in l:
for version in post.meta['en']['allver']:
self.site.pkgindex_by_version[version] = post
return timeline
def METHOD_NAME(self):
"""Return a list of supported file extensions, or None if such a list isn't known beforehand."""
if 'PKGINDEX_CONFIG' not in self.site.config:
return None
return [self.site.config['PKGINDEX_CONFIG']['extension']]
def _update_name_multiver(self, post):
name = post.meta('slug')
if name in self.site.pkgindex_by_name:
self.site.pkgindex_by_name[name].append(post)
multiver = True
else:
self.site.pkgindex_by_name[name] = [post]
multiver = False
if multiver:
self.site.pkgindex_multiver[name] = self.site.pkgindex_by_name[name]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.