id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,100 |
watchdog
|
"""
Pytest configuration that spins up a single localstack instance that is shared across test modules.
See: https://docs.pytest.org/en/6.2.x/fixture.html#conftest-py-sharing-fixtures-across-multiple-files
It is thread/process safe to run with pytest-parallel, however not for pytest-xdist.
"""
import logging
import multiprocessing as mp
import os
import threading
import pytest
from localstack import config, constants
from localstack.config import is_env_true
from localstack.constants import ENV_INTERNAL_TEST_RUN
from localstack.runtime import events
from localstack.services import infra
from localstack.utils.common import safe_requests
from tests.aws.services.es.test_es import install_async as es_install_async
from tests.aws.services.opensearch.test_opensearch import install_async as opensearch_install_async
from tests.aws.test_terraform import TestTerraform
logger = logging.getLogger(__name__)
localstack_started = mp.Event() # event indicating whether localstack has been started
localstack_stop = mp.Event() # event that can be triggered to stop localstack
localstack_stopped = mp.Event() # event indicating that localstack has been stopped
startup_monitor_event = mp.Event() # event that can be triggered to start localstack
# collection of functions that should be executed to initialize tests
test_init_functions = set()
if config.is_collect_metrics_mode():
pytest_plugins = "localstack.testing.pytest.metric_collection"
@pytest.hookimpl()
def pytest_configure(config):
# first pytest lifecycle hook
_start_monitor()
def pytest_runtestloop(session):
# second pytest lifecycle hook (before test runner starts)
# collect test classes
test_classes = set()
for item in session.items:
if item.parent and item.parent.cls:
test_classes.add(item.parent.cls)
# OpenSearch/Elasticsearch are pytests, not unit test classes, so we check based on the item parent's name.
# Any pytests that rely on opensearch/elasticsearch must be special-cased by adding them to the list below
parent_name = str(item.parent).lower()
if any(opensearch_test in parent_name for opensearch_test in ["opensearch", "firehose"]):
test_init_functions.add(opensearch_install_async)
if any(opensearch_test in parent_name for opensearch_test in ["test_es", "firehose"]):
test_init_functions.add(es_install_async)
# add init functions for certain tests that download/install things
for test_class in test_classes:
# set flag that terraform will be used
if TestTerraform is test_class:
logger.info("will initialize TestTerraform")
test_init_functions.add(TestTerraform.init_async)
continue
if not session.items:
return
if session.config.option.collectonly:
return
# trigger localstack startup in startup_monitor and wait until it becomes ready
startup_monitor_event.set()
localstack_started.wait()
@pytest.hookimpl()
def pytest_unconfigure(config):
# last pytest lifecycle hook (before pytest exits)
_trigger_stop()
# wait for localstack to stop. We do not want to exit immediately, otherwise new threads during shutdown will fail
if not localstack_stopped.wait(timeout=10):
logger.warning("LocalStack did not exit in time!")
def _start_monitor():
threading.Thread(target=startup_monitor).start()
def _trigger_stop():
localstack_stop.set()
startup_monitor_event.set()
def startup_monitor() -> None:
"""
The startup monitor is a thread that waits for the startup_monitor_event and, once the event is true, starts a
localstack instance in its own thread context.
"""
logger.info("waiting on localstack_start signal")
startup_monitor_event.wait()
if localstack_stop.is_set():
# this is called if _trigger_stop() is called before any test has requested the localstack_runtime fixture.
logger.info("ending startup_monitor")
localstack_stopped.set()
return
if is_env_true("TEST_SKIP_LOCALSTACK_START") or os.environ.get("TEST_TARGET") == "AWS_CLOUD":
logger.info("TEST_SKIP_LOCALSTACK_START is set, not starting localstack")
localstack_started.set()
localstack_stopped.set()
return
logger.info("running localstack")
run_localstack()
def run_localstack():
"""
Start localstack and block until it terminates. Terminate localstack by calling _trigger_stop().
"""
# configure
os.environ[ENV_INTERNAL_TEST_RUN] = "1"
safe_requests.verify_ssl = False
config.FORCE_SHUTDOWN = False
config.GATEWAY_LISTEN = [config.HostAndPort(host="0.0.0.0", port=constants.DEFAULT_PORT_EDGE)]
def METHOD_NAME():
logger.info("waiting stop event")
localstack_stop.wait() # triggered by _trigger_stop()
logger.info("stopping infra")
infra.stop_infra()
monitor = threading.Thread(target=METHOD_NAME)
monitor.start()
logger.info("starting localstack infrastructure")
infra.start_infra(asynchronous=True)
for fn in test_init_functions:
try:
# asynchronous init functions
fn()
except Exception:
logger.exception("exception while running init function for test")
logger.info("waiting for infra to be ready")
events.infra_ready.wait() # wait for infra to start (threading event)
localstack_started.set() # set conftest inter-process Event
logger.info("waiting for shutdown")
try:
logger.info("waiting for watchdog to join")
monitor.join()
finally:
logger.info("ok bye")
localstack_stopped.set()
@pytest.fixture(scope="session", autouse=True)
def localstack_runtime():
"""
This is a dummy fixture. Each test requests the fixture, but it actually just makes sure that localstack is running,
blocks until localstack is running, or starts localstack the first time the fixture is requested.
It doesn't actually do anything but signal to the `startup_monitor` function.
"""
if localstack_started.is_set():
# called by all tests after the startup has completed and the initial tests are unblocked
yield
return
startup_monitor_event.set()
localstack_started.wait()
yield
return
|
2,101 |
num masked values
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Optional
import numpy as np
from gluonts.time_feature import get_seasonality
def calculate_seasonal_error(
past_data: np.ndarray,
freq: Optional[str] = None,
seasonality: Optional[int] = None,
):
r"""
.. math::
seasonal\_error = mean(|Y[t] - Y[t-m]|)
where m is the seasonal frequency. See [HA21]_ for more details.
"""
# Check if the length of the time series is larger than the seasonal
# frequency
if not seasonality:
assert freq is not None, "Either freq or seasonality must be provided"
seasonality = get_seasonality(freq)
if seasonality < len(past_data):
forecast_freq = seasonality
else:
# edge case: the seasonal freq is larger than the length of ts
# revert to freq=1
# logging.info('The seasonal frequency is larger than the length of the
# time series. Reverting to freq=1.')
forecast_freq = 1
y_t = past_data[:-forecast_freq]
y_tm = past_data[forecast_freq:]
return np.mean(abs(y_t - y_tm))
def mse(target: np.ndarray, forecast: np.ndarray) -> float:
r"""
.. math::
mse = mean((Y - \hat{Y})^2)
See [HA21]_ for more details.
"""
return np.mean(np.square(target - forecast))
def abs_error(target: np.ndarray, forecast: np.ndarray) -> float:
r"""
.. math::
abs\_error = sum(|Y - \hat{Y}|)
"""
return np.sum(np.abs(target - forecast))
def quantile_loss(target: np.ndarray, forecast: np.ndarray, q: float) -> float:
r"""
.. math::
quantile\_loss = 2 * sum(|(Y - \hat{Y}) * (Y <= \hat{Y}) - q|)
"""
return 2 * np.sum(np.abs((forecast - target) * ((target <= forecast) - q)))
def coverage(target: np.ndarray, forecast: np.ndarray) -> float:
r"""
.. math::
coverage = mean(Y <= \hat{Y})
"""
return float(np.mean(target <= forecast))
def mase(
target: np.ndarray,
forecast: np.ndarray,
seasonal_error: float,
) -> float:
r"""
.. math::
mase = mean(|Y - \hat{Y}|) / seasonal\_error
See [HA21]_ for more details.
"""
return np.mean(np.abs(target - forecast)) / seasonal_error
def mape(target: np.ndarray, forecast: np.ndarray) -> float:
r"""
.. math::
mape = mean(|Y - \hat{Y}| / |Y|))
See [HA21]_ for more details.
"""
return np.mean(np.abs(target - forecast) / np.abs(target))
def smape(target: np.ndarray, forecast: np.ndarray) -> float:
r"""
.. math::
smape = 2 * mean(|Y - \hat{Y}| / (|Y| + |\hat{Y}|))
See [HA21]_ for more details.
"""
return 2 * np.mean(
np.abs(target - forecast) / (np.abs(target) + np.abs(forecast))
)
def msis(
target: np.ndarray,
lower_quantile: np.ndarray,
upper_quantile: np.ndarray,
seasonal_error: float,
alpha: float,
) -> float:
r"""
.. math::
msis = mean(U - L + 2/alpha * (L-Y) * I[Y<L] + 2/alpha * (Y-U) * I[Y>U]) / seasonal\_error
See [SSA20]_ for more details.
""" # noqa: E501
numerator = np.mean(
upper_quantile
- lower_quantile
+ 2.0 / alpha * (lower_quantile - target) * (target < lower_quantile)
+ 2.0 / alpha * (target - upper_quantile) * (target > upper_quantile)
)
return numerator / seasonal_error
def abs_target_sum(target) -> float:
r"""
.. math::
abs\_target\_sum = sum(|Y|)
"""
return np.sum(np.abs(target))
def abs_target_mean(target) -> float:
r"""
.. math::
abs\_target\_mean = mean(|Y|)
"""
return np.mean(np.abs(target))
def METHOD_NAME(target) -> float:
"""
Count number of masked values in target
"""
if np.ma.isMaskedArray(target):
return np.ma.count_masked(target)
else:
return 0
|
2,102 |
test icons
|
from homeassistant.components.humidifier import HumidifierEntityFeature
from homeassistant.components.humidifier.const import (
MODE_AUTO,
MODE_BOOST,
MODE_NORMAL,
MODE_SLEEP,
)
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import PERCENTAGE
from ..const import WETAIR_WAWH1210_HUMIDIFIER_PAYLOAD
from ..helpers import assert_device_properties_set
from ..mixins.light import BasicLightTests
from ..mixins.lock import BasicLockTests
from ..mixins.sensor import MultiSensorTests
from ..mixins.switch import MultiSwitchTests, SwitchableTests
from .base_device_tests import TuyaDeviceTestCase
SWITCH_DPS = "1"
LIGHT_DPS = "5"
SOUND_DPS = "8"
HUMIDITY_DPS = "13"
CURRENTHUMID_DPS = "14"
UNKNOWN22_DPS = "22"
PRESET_DPS = "24"
IONIZER_DPS = "25"
LOCK_DPS = "29"
LEVEL_DPS = "101"
class TestWetairWAWH1210LWHumidifier(
BasicLightTests,
BasicLockTests,
MultiSensorTests,
MultiSwitchTests,
SwitchableTests,
TuyaDeviceTestCase,
):
__test__ = True
def setUp(self):
self.setUpForConfig(
"wetair_wawh1210lw_humidifier.yaml", WETAIR_WAWH1210_HUMIDIFIER_PAYLOAD
)
self.subject = self.entities.get("humidifier_humidifier")
self.setUpSwitchable(SWITCH_DPS, self.subject)
self.setUpBasicLight(LIGHT_DPS, self.entities.get("light_display"))
self.setUpBasicLock(LOCK_DPS, self.entities.get("lock_child_lock"))
self.setUpMultiSensors(
[
{
"dps": CURRENTHUMID_DPS,
"name": "sensor_current_humidity",
"device_class": SensorDeviceClass.HUMIDITY,
"state_class": "measurement",
"unit": PERCENTAGE,
},
{
"dps": LEVEL_DPS,
"name": "sensor_water_level",
"unit": PERCENTAGE,
},
]
)
self.setUpMultiSwitch(
[
{
"dps": SOUND_DPS,
"name": "switch_sound",
},
{
"dps": IONIZER_DPS,
"name": "switch_ionizer",
},
]
)
self.mark_secondary(
[
"light_display",
"lock_child_lock",
"sensor_current_humidity",
"sensor_water_level",
"switch_sound",
]
)
def test_supported_features(self):
self.assertEqual(self.subject.supported_features, HumidifierEntityFeature.MODES)
def METHOD_NAME(self):
self.dps[SWITCH_DPS] = True
self.assertEqual(self.subject.icon, "mdi:air-humidifier")
self.dps[SWITCH_DPS] = False
self.assertEqual(self.subject.icon, "mdi:air-humidifier-off")
def test_min_target_humidity(self):
self.assertEqual(self.subject.min_humidity, 30)
def test_max_target_humidity(self):
self.assertEqual(self.subject.max_humidity, 80)
def test_target_humidity(self):
self.dps[HUMIDITY_DPS] = 55
self.assertEqual(self.subject.target_humidity, 55)
def test_available_modes(self):
self.assertCountEqual(
self.subject.available_modes,
[MODE_AUTO, MODE_BOOST, MODE_NORMAL, MODE_SLEEP],
)
def test_mode(self):
self.dps[PRESET_DPS] = "AUTO"
self.assertEqual(self.subject.mode, MODE_AUTO)
self.dps[PRESET_DPS] = "MIDDLE"
self.assertEqual(self.subject.mode, MODE_NORMAL)
self.dps[PRESET_DPS] = "HIGH"
self.assertEqual(self.subject.mode, MODE_BOOST)
self.dps[PRESET_DPS] = "SLEEP"
self.assertEqual(self.subject.mode, MODE_SLEEP)
async def test_set_mode_to_auto(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "AUTO"}
):
await self.subject.async_set_mode(MODE_AUTO)
async def test_set_mode_to_normal(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "MIDDLE"}
):
await self.subject.async_set_mode(MODE_NORMAL)
async def test_set_mode_to_boost(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "HIGH"}
):
await self.subject.async_set_mode(MODE_BOOST)
async def test_set_mode_to_sleep(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "SLEEP"}
):
await self.subject.async_set_mode(MODE_SLEEP)
def test_extra_state_attributes(self):
self.dps[UNKNOWN22_DPS] = 22
self.assertDictEqual(
self.subject.extra_state_attributes,
{"unknown_22": 22},
)
|
2,103 |
test check lock up to date
|
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from poetry.packages import Locker
if TYPE_CHECKING:
import httpretty
from cleo.testers.command_tester import CommandTester
from pytest_mock import MockerFixture
from poetry.poetry import Poetry
from tests.types import CommandTesterFactory
from tests.types import FixtureDirGetter
from tests.types import ProjectFactory
@pytest.fixture()
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("check")
def _project_factory(
fixture_name: str,
project_factory: ProjectFactory,
fixture_dir: FixtureDirGetter,
) -> Poetry:
source = fixture_dir(fixture_name)
pyproject_content = (source / "pyproject.toml").read_text(encoding="utf-8")
poetry_lock_content = (source / "poetry.lock").read_text(encoding="utf-8")
return project_factory(
name="foobar",
pyproject_content=pyproject_content,
poetry_lock_content=poetry_lock_content,
source=source,
)
@pytest.fixture
def poetry_with_outdated_lockfile(
project_factory: ProjectFactory, fixture_dir: FixtureDirGetter
) -> Poetry:
return _project_factory("outdated_lock", project_factory, fixture_dir)
@pytest.fixture
def poetry_with_up_to_date_lockfile(
project_factory: ProjectFactory, fixture_dir: FixtureDirGetter
) -> Poetry:
return _project_factory("up_to_date_lock", project_factory, fixture_dir)
def test_check_valid(tester: CommandTester) -> None:
tester.execute()
expected = """\
All set!
"""
assert tester.io.fetch_output() == expected
def test_check_invalid(
mocker: MockerFixture, tester: CommandTester, fixture_dir: FixtureDirGetter
) -> None:
from poetry.toml import TOMLFile
mocker.patch(
"poetry.poetry.Poetry.file",
return_value=TOMLFile(fixture_dir("invalid_pyproject") / "pyproject.toml"),
new_callable=mocker.PropertyMock,
)
tester.execute("--lock")
expected = """\
Error: 'description' is a required property
Error: Project name (invalid) is same as one of its dependencies
Error: Unrecognized classifiers: ['Intended Audience :: Clowns'].
Error: Declared README file does not exist: never/exists.md
Error: poetry.lock was not found.
Warning: A wildcard Python dependency is ambiguous.\
Consider specifying a more explicit one.
Warning: The "pendulum" dependency specifies the "allows-prereleases" property,\
which is deprecated. Use "allow-prereleases" instead.
Warning: Deprecated classifier 'Natural Language :: Ukranian'.\
Must be replaced by ['Natural Language :: Ukrainian'].
Warning: Deprecated classifier\
'Topic :: Communications :: Chat :: AOL Instant Messenger'.\
Must be removed.
"""
assert tester.io.fetch_error() == expected
def test_check_private(
mocker: MockerFixture, tester: CommandTester, fixture_dir: FixtureDirGetter
) -> None:
mocker.patch(
"poetry.factory.Factory.locate",
return_value=fixture_dir("private_pyproject") / "pyproject.toml",
)
tester.execute()
expected = """\
All set!
"""
assert tester.io.fetch_output() == expected
@pytest.mark.parametrize(
("options", "expected", "expected_status"),
[
("", "All set!\n", 0),
("--lock", "Error: poetry.lock was not found.\n", 1),
],
)
def test_check_lock_missing(
mocker: MockerFixture,
tester: CommandTester,
fixture_dir: FixtureDirGetter,
options: str,
expected: str,
expected_status: int,
) -> None:
from poetry.toml import TOMLFile
mocker.patch(
"poetry.poetry.Poetry.file",
return_value=TOMLFile(fixture_dir("private_pyproject") / "pyproject.toml"),
new_callable=mocker.PropertyMock,
)
status_code = tester.execute(options)
assert status_code == expected_status
if status_code == 0:
assert tester.io.fetch_output() == expected
else:
assert tester.io.fetch_error() == expected
@pytest.mark.parametrize("options", ["", "--lock"])
def test_check_lock_outdated(
command_tester_factory: CommandTesterFactory,
poetry_with_outdated_lockfile: Poetry,
http: type[httpretty.httpretty],
options: str,
) -> None:
http.disable()
locker = Locker(
lock=poetry_with_outdated_lockfile.pyproject.file.path.parent / "poetry.lock",
local_config=poetry_with_outdated_lockfile.locker._local_config,
)
poetry_with_outdated_lockfile.set_locker(locker)
tester = command_tester_factory("check", poetry=poetry_with_outdated_lockfile)
status_code = tester.execute(options)
expected = (
"Error: poetry.lock is not consistent with pyproject.toml. "
"Run `poetry lock [--no-update]` to fix it.\n"
)
assert tester.io.fetch_error() == expected
# exit with an error
assert status_code == 1
@pytest.mark.parametrize("options", ["", "--lock"])
def METHOD_NAME(
command_tester_factory: CommandTesterFactory,
poetry_with_up_to_date_lockfile: Poetry,
http: type[httpretty.httpretty],
options: str,
) -> None:
http.disable()
locker = Locker(
lock=poetry_with_up_to_date_lockfile.pyproject.file.path.parent / "poetry.lock",
local_config=poetry_with_up_to_date_lockfile.locker._local_config,
)
poetry_with_up_to_date_lockfile.set_locker(locker)
tester = command_tester_factory("check", poetry=poetry_with_up_to_date_lockfile)
status_code = tester.execute(options)
expected = "All set!\n"
assert tester.io.fetch_output() == expected
# exit with an error
assert status_code == 0
|
2,104 |
port end
|
#! /usr/bin/python
try:
import time
import string
from ctypes import create_string_buffer
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class SfpUtil(SfpUtilBase):
"""Platform specific sfputil class"""
_port_start = 0
_port_end = 31
ports_in_block = 32
_port_to_eeprom_mapping = {}
_qsfp_ports = list(range(0, ports_in_block + 1))
def __init__(self):
# Override port_to_eeprom_mapping for class initialization
eeprom_path = '/sys/bus/i2c/devices/{0}-0050/sfp_eeprom'
for x in range(self.port_start, self.METHOD_NAME + 1):
self._port_to_eeprom_mapping[x] = eeprom_path.format(x + 18)
SfpUtilBase.__init__(self)
def reset(self, port_num):
raise NotImplementedError
def get_presence(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
path = "/sys/bus/i2c/devices/{0}-0050/sfp_is_present"
port_ps = path.format(port_num+18)
reg_value = '0'
try:
reg_file = open(port_ps)
reg_value = reg_file.readline().rstrip()
reg_file.close()
except IOError as e:
print("Error: unable to access file: %s" % str(e))
return False
if reg_value == '1':
return True
return False
@property
def port_start(self):
return self._port_start
@property
def METHOD_NAME(self):
return self._port_end
@property
def qsfp_ports(self):
return list(range(0, self.ports_in_block + 1))
@property
def port_to_eeprom_mapping(self):
return self._port_to_eeprom_mapping
def get_transceiver_change_event(self):
"""
TODO: This function need to be implemented
when decide to support monitoring SFP(Xcvrd)
on this platform.
"""
raise NotImplementedError
def get_low_power_mode(self, port_num):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
try:
eeprom = None
if not self.get_presence(port_num):
return False
eeprom = open(self.port_to_eeprom_mapping[port_num], "rb")
eeprom.seek(93)
lpmode = ord(eeprom.read(1))
if ((lpmode & 0x3) == 0x3):
return True # Low Power Mode if "Power override" bit is 1 and "Power set" bit is 1
else:
# High Power Mode if one of the following conditions is matched:
# 1. "Power override" bit is 0
# 2. "Power override" bit is 1 and "Power set" bit is 0
return False
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
finally:
if eeprom is not None:
eeprom.close()
time.sleep(0.01)
def set_low_power_mode(self, port_num, lpmode):
# Check for invalid port_num
if port_num < self._port_start or port_num > self._port_end:
return False
try:
eeprom = None
if not self.get_presence(port_num):
return False # Port is not present, unable to set the eeprom
# Fill in write buffer
regval = 0x3 if lpmode else 0x1 # 0x3:Low Power Mode, 0x1:High Power Mode
buffer = create_string_buffer(1)
buffer[0] = chr(regval)
# Write to eeprom
eeprom = open(self.port_to_eeprom_mapping[port_num], "r+b")
eeprom.seek(93)
eeprom.write(buffer[0])
return True
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
finally:
if eeprom is not None:
eeprom.close()
time.sleep(0.01)
|
2,105 |
parse chromium
|
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main functions for link parsing
"""
from .. import strformat, url as urlutil
from ..htmlutil import linkparse
from ..bookmarks import firefox
def parse_url(url_data):
"""Parse a URL."""
if url_data.is_directory():
# both ftp and file links represent directories as HTML data
key = "html"
elif (
url_data.is_file()
and firefox.has_sqlite
and firefox.extension.search(url_data.url)
):
key = "firefox"
elif url_data.scheme == "itms-services":
key = "itms_services"
else:
# determine parse routine according to content types
mime = url_data.content_type
key = url_data.ContentMimetypes[mime]
funcname = "parse_" + key
if funcname in globals():
globals()[funcname](url_data)
else:
url_data.aggregate.plugin_manager.run_parser_plugins(url_data, pagetype=key)
def parse_html(url_data):
"""Parse into HTML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
linkparse.find_links(url_data.get_soup(), url_data.add_url, linkparse.LinkTags)
def parse_opera(url_data):
"""Parse an opera bookmark file."""
from ..bookmarks.opera import parse_bookmark_data
for url, name, lineno in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, line=lineno, name=name)
def METHOD_NAME(url_data):
"""Parse a Chromium or Google Chrome bookmark file."""
from ..bookmarks.chromium import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name)
def parse_safari(url_data):
"""Parse a Safari bookmark file."""
from ..bookmarks.safari import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_raw_content()):
url_data.add_url(url, name=name)
def parse_text(url_data):
"""Parse a text file with one url per line; comment and blank
lines are ignored."""
lineno = 0
for line in url_data.get_content().splitlines():
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
url_data.add_url(line, line=lineno)
def parse_css(url_data):
"""
Parse a CSS file for url() patterns.
"""
lineno = 0
linkfinder = linkparse.css_url_re.finditer
strip_comments = linkparse.strip_c_comments
for line in strip_comments(url_data.get_content()).splitlines():
lineno += 1
for mo in linkfinder(line):
column = mo.start("url")
url = strformat.unquote(mo.group("url").strip())
url_data.add_url(url, line=lineno, column=column)
def parse_swf(url_data):
"""Parse a SWF file for URLs."""
linkfinder = linkparse.swf_url_re.finditer
for mo in linkfinder(url_data.get_raw_content()):
# We're scraping binary data for anything that looks like an URL using
# a regex that matches only ASCII characters. Any non-ASCII characters
# in the URL are expected to be %-encoded.
url = mo.group().decode('ascii')
url_data.add_url(url)
def parse_wml(url_data):
"""Parse into WML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
linkparse.find_links(url_data.get_soup(), url_data.add_url, linkparse.WmlTags)
def parse_firefox(url_data):
"""Parse a Firefox3 bookmark file."""
filename = url_data.get_os_filename()
for url, name in firefox.parse_bookmark_file(filename):
url_data.add_url(url, name=name)
def parse_itms_services(url_data):
"""Get "url" CGI parameter value as child URL."""
query = url_data.urlparts[3]
for k, v, sep in urlutil.parse_qsl(
query, encoding=url_data.encoding, keep_blank_values=True
):
if k == "url":
url_data.add_url(v)
break
from .sitemap import parse_sitemap, parse_sitemapindex # noqa: F401
|
2,106 |
test register single module
|
import json
import pytest
import core.event
import core.config
import core.output
import core.module
class SampleModule(core.module.Module):
pass
@pytest.fixture(autouse=True)
def clear_events():
core.event.clear()
@pytest.fixture
def i3():
return core.output.i3()
@pytest.fixture
def module_a(mocker):
widget = mocker.MagicMock()
widget.full_text.return_value = "test"
widget.id = "a"
widget.hidden = False
return SampleModule(config=core.config.Config([]), widgets=[widget, widget, widget])
@pytest.fixture
def module_b(mocker):
widget = mocker.MagicMock()
widget.full_text.return_value = "test"
widget.id = "b"
return SampleModule(config=core.config.Config([]), widgets=[widget, widget, widget])
@pytest.fixture
def paddedTheme():
return core.theme.Theme(raw_data={"defaults": {"padding": " "}})
@pytest.fixture
def separatorTheme():
return core.theme.Theme(
raw_data={"defaults": {"separator": "***", "fg": "red", "bg": "blue"}}
)
@pytest.fixture
def block_a(separatorTheme, module_a):
return core.output.block(
theme=separatorTheme, module=module_a, widget=module_a.widget(),
)
def test_start(i3):
all_data = i3.start()
data = all_data["blocks"]
assert data["version"] == 1
assert data["click_events"] == True
assert all_data["suffix"] == "\n["
def test_stop(i3):
assert i3.stop()["suffix"] == "\n]"
def test_no_modules_by_default(i3):
assert i3.modules() == []
def METHOD_NAME(i3, module_a):
i3.modules(module_a)
assert i3.modules() == [module_a]
def test_register_multiple_modules(i3, module_a):
i3.modules([module_a, module_a, module_a])
assert i3.modules() == [module_a, module_a, module_a]
def test_toggle_module(i3, module_a, module_b):
i3.modules([module_a, module_b])
i3.update()
i3.toggle_minimize({ "instance": module_a.widget().id })
i3.update()
assert i3.content()[module_a.widget().id]["minimized"] == True
# assert module_a.widget().minimized == True
# assert module_b.widget().minimized == False
#
# i3.toggle_minimize({ "instance": module_a.widget().id })
# i3.toggle_minimize({ "instance": module_b.widget().id })
#
# assert module_a.widget().minimized == False
# assert module_b.widget().minimized == True
def test_draw_existing_module(mocker, i3):
i3.test_draw = mocker.MagicMock(
return_value={"blocks": {"test": True}, "suffix": "end"}
)
i3.draw("test_draw")
i3.test_draw.assert_called_once_with()
def test_empty_status_line(i3):
data = i3.statusline()
assert data["blocks"] == []
assert data["suffix"] == ","
def test_statusline(i3, module_a):
i3.modules([module_a, module_a, module_a])
i3.update()
data = i3.statusline()
assert len(data["blocks"]) == len(module_a.widgets()) * 3
def test_padding(i3, paddedTheme, module_a):
i3.theme(paddedTheme)
blk = core.output.block(i3.theme(), module_a, module_a.widget())
blk.set("full_text", "abc")
result = blk.dict()["full_text"]
assert result == " abc "
def test_no_separator(i3, module_a):
result = i3.separator_block(module_a, module_a.widget())
assert result == []
def test_separator(i3, separatorTheme, module_a):
i3.theme(separatorTheme)
result = i3.separator_block(module_a, module_a.widget())
assert len(result) == 1
assert result[0].dict()["full_text"] == "***"
assert result[0].dict().get("_decorator") == True
assert result[0].dict()["color"] == separatorTheme.get("bg", module_a.widget())
def test_dump_json(mocker):
obj = mocker.MagicMock()
obj.dict = mocker.MagicMock()
core.output.dump_json(obj)
obj.dict_assert_called_once_with()
def test_assign():
src = {"a": "x", "b": "y", "c": "z"}
dst = {}
core.output.assign(src, dst, "a")
assert src["a"] == dst["a"]
core.output.assign(src, dst, "123", "b")
assert src["b"] == dst["123"]
core.output.assign(src, dst, "blub", default="def")
assert dst["blub"] == "def"
def test_pango_detection(block_a):
assert block_a.is_pango({}) == False
assert block_a.is_pango({"pango": {}}) == True
def test_pangoize(block_a):
assert block_a.pangoize("test") == "test"
assert not "markup" in block_a.dict()
pango = block_a.pangoize({"pango": {"attr": "blub", "x": "y", "full_text": "test"}})
assert 'attr="blub"' in pango
assert 'x="y"' in pango
assert "<span " in pango
assert ">test</span>" in pango
assert block_a.dict()["markup"] == "pango"
def test_padding(block_a):
block_a.set("padding", "***")
block_a.set("full_text", "test")
assert block_a.dict()["full_text"] == "***test***"
def test_pre_suffix(block_a):
block_a.set("padding", "*")
block_a.set("prefix", "pre")
block_a.set("suffix", "suf")
block_a.set("full_text", "test")
assert block_a.dict()["full_text"] == "*pre*test*suf*"
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
2,107 |
prepare get categories
|
import contextlib
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.params import Arg, ArgPlainText
from nonebot.adapters import Message, MessageTemplate
from nonebot_plugin_saa import Text, PlatformTarget, SupportedAdapters
from ..types import Target
from ..config import config
from ..apis import check_sub_target
from ..platform import Platform, platform_manager
from ..config.db_config import SubscribeDupException
from .utils import common_platform, ensure_user_info, gen_handle_cancel
def do_add_sub(add_sub: type[Matcher]):
handle_cancel = gen_handle_cancel(add_sub, "已中止订阅")
add_sub.handle()(ensure_user_info(add_sub))
@add_sub.handle()
async def init_promote(state: T_State):
state["_prompt"] = (
"请输入想要订阅的平台,目前支持,请输入冒号左边的名称:\n"
+ "".join(
[f"{platform_name}: {platform_manager[platform_name].name}\n" for platform_name in common_platform]
)
+ "要查看全部平台请输入:“全部”\n中止订阅过程请输入:“取消”"
)
@add_sub.got("platform", MessageTemplate("{_prompt}"), [handle_cancel])
async def parse_platform(state: T_State, platform: str = ArgPlainText()) -> None:
if platform == "全部":
message = "全部平台\n" + "\n".join(
[f"{platform_name}: {platform.name}" for platform_name, platform in platform_manager.items()]
)
await add_sub.reject(message)
elif platform == "取消":
await add_sub.finish("已中止订阅")
elif platform in platform_manager:
state["platform"] = platform
else:
await add_sub.reject("平台输入错误")
@add_sub.handle()
async def prepare_get_id(matcher: Matcher, state: T_State):
cur_platform = platform_manager[state["platform"]]
if cur_platform.has_target:
state["_prompt"] = (
("1." + cur_platform.parse_target_promot + "\n2.") if cur_platform.parse_target_promot else ""
) + "请输入订阅用户的id\n查询id获取方法请回复:“查询”"
else:
matcher.set_arg("raw_id", None) # type: ignore
state["id"] = "default"
state["name"] = await check_sub_target(state["platform"], Target(""))
@add_sub.got("raw_id", MessageTemplate("{_prompt}"), [handle_cancel])
async def got_id(state: T_State, raw_id: Message = Arg()):
raw_id_text = raw_id.extract_plain_text()
try:
if raw_id_text == "查询":
url = "https://nonebot-bison.netlify.app/usage/#%E6%89%80%E6%94%AF%E6%8C%81%E5%B9%B3%E5%8F%B0%E7%9A%84-uid"
msg = Text(url)
with contextlib.suppress(ImportError):
from nonebot.adapters.onebot.v11 import MessageSegment
title = "Bison所支持的平台UID"
content = "查询相关平台的uid格式或获取方式"
image = "https://s3.bmp.ovh/imgs/2022/03/ab3cc45d83bd3dd3.jpg"
msg.overwrite(
SupportedAdapters.onebot_v11,
MessageSegment.share(url=url, title=title, content=content, image=image),
)
await msg.reject()
platform = platform_manager[state["platform"]]
with contextlib.suppress(ImportError):
from nonebot.adapters.onebot.v11 import Message
from nonebot.adapters.onebot.v11.utils import unescape
if isinstance(raw_id, Message):
raw_id_text = unescape(raw_id_text)
raw_id_text = await platform.parse_target(raw_id_text)
name = await check_sub_target(state["platform"], raw_id_text)
if not name:
await add_sub.reject("id输入错误")
state["id"] = raw_id_text
state["name"] = name
except Platform.ParseTargetException:
await add_sub.reject("不能从你的输入中提取出id,请检查你输入的内容是否符合预期")
else:
await add_sub.send(
f"即将订阅的用户为:{state['platform']} {state['name']} {state['id']}\n如有错误请输入“取消”重新订阅"
)
@add_sub.handle()
async def METHOD_NAME(matcher: Matcher, state: T_State):
if not platform_manager[state["platform"]].categories:
matcher.set_arg("raw_cats", None) # type: ignore
state["cats"] = []
return
state["_prompt"] = "请输入要订阅的类别,以空格分隔,支持的类别有:{}".format(
" ".join(list(platform_manager[state["platform"]].categories.values()))
)
@add_sub.got("raw_cats", MessageTemplate("{_prompt}"), [handle_cancel])
async def parser_cats(state: T_State, raw_cats: Message = Arg()):
raw_cats_text = raw_cats.extract_plain_text()
res = []
if platform_manager[state["platform"]].categories:
for cat in raw_cats_text.split():
if cat not in platform_manager[state["platform"]].reverse_category:
await add_sub.reject(f"不支持 {cat}")
res.append(platform_manager[state["platform"]].reverse_category[cat])
state["cats"] = res
@add_sub.handle()
async def prepare_get_tags(matcher: Matcher, state: T_State):
if not platform_manager[state["platform"]].enable_tag:
matcher.set_arg("raw_tags", None) # type: ignore
state["tags"] = []
return
state["_prompt"] = (
'请输入要订阅/屏蔽的标签(不含#号)\n多个标签请使用空格隔开\n订阅所有标签输入"全部标签"\n具体规则回复"详情"'
)
@add_sub.got("raw_tags", MessageTemplate("{_prompt}"), [handle_cancel])
async def parser_tags(state: T_State, raw_tags: Message = Arg()):
raw_tags_text = raw_tags.extract_plain_text()
if raw_tags_text == "详情":
await add_sub.reject(
"订阅标签直接输入标签内容\n"
"屏蔽标签请在标签名称前添加~号\n"
"详见https://nonebot-bison.netlify.app/usage/#%E5%B9%B3%E5%8F%B0%E8%AE%A2%E9%98%85%E6%A0%87%E7%AD%BE-tag"
)
if raw_tags_text in ["全部标签", "全部", "全标签"]:
state["tags"] = []
else:
state["tags"] = raw_tags_text.split()
@add_sub.handle()
async def add_sub_process(state: T_State, user: PlatformTarget = Arg("target_user_info")):
try:
await config.add_subscribe(
user=user,
target=state["id"],
target_name=state["name"],
platform_name=state["platform"],
cats=state.get("cats", []),
tags=state.get("tags", []),
)
except SubscribeDupException:
await add_sub.finish(f"添加 {state['name']} 失败: 已存在该订阅")
except Exception as e:
await add_sub.finish(f"添加 {state['name']} 失败: {e}")
await add_sub.finish("添加 {} 成功".format(state["name"]))
|
2,108 |
set value
|
from datetime import datetime
from types import ModuleType
from typing import Dict, List, Optional, Sequence, TypedDict, Union, cast
import sentry_sdk
from rest_framework.exceptions import ValidationError
from rest_framework.request import Request
from rest_framework.response import Response
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases import OrganizationEventsV2EndpointBase
from sentry.models import Organization
from sentry.search.events import fields
from sentry.snuba import discover, metrics_performance
from sentry.snuba.metrics.extraction import to_standard_metrics_query
from sentry.snuba.referrer import Referrer
from sentry.utils.snuba import SnubaTSResult
class CountResult(TypedDict):
count: Optional[float]
# Type returned by get_events_stats_data is actually a [int, List[CountResult]] where the first
# param is the timestamp
MetricVolumeRow = List[Union[int, List[CountResult]]]
@region_silo_endpoint
class OrganizationMetricsEstimationStatsEndpoint(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
"""Gets the estimated volume of an organization's metric events."""
def get(self, request: Request, organization: Organization) -> Response:
with sentry_sdk.start_span(
op="discover.metrics.endpoint", description="get_full_metrics"
) as span:
span.set_data("organization", organization)
try:
# the discover stats
discover_stats = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=True, remove_on_demand=False),
)
# the closest we have to the stats in discover that can also be queried in metrics
base_discover = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=True, remove_on_demand=True),
)
# the closest we have to the stats in metrics, with no on_demand metrics
base_metrics = self.get_event_stats_data(
request,
organization,
get_stats_generator(use_discover=False, remove_on_demand=True),
)
estimated_volume = estimate_volume(
discover_stats["data"], base_discover["data"], base_metrics["data"]
)
discover_stats["data"] = estimated_volume
except ValidationError:
return Response(
{"detail": "Comparison period is outside retention window"}, status=400
)
return Response(discover_stats, status=200)
def get_stats_generator(use_discover: bool, remove_on_demand: bool):
"""
Returns a get_stats function that can fetch from either metrics or discover and
with or without on_demand metrics.
"""
def get_discover_stats(
query_columns: Sequence[str],
query: str,
params: Dict[str, str],
rollup: int,
zerofill_results: bool,
comparison_delta: Optional[datetime],
) -> SnubaTSResult:
# use discover or metrics_performance depending on the dataset
if use_discover:
module: ModuleType = discover
else:
module = metrics_performance
# (RaduW) horrible hack check function definition
query_columns = to_metrics_columns(query_columns)
if remove_on_demand:
query = to_standard_metrics_query(query)
return module.timeseries_query(
selected_columns=query_columns,
query=query,
params=params,
rollup=rollup,
referrer=Referrer.API_ORGANIZATION_METRICS_ESTIMATION_STATS.value,
zerofill_results=True,
has_metrics=True,
)
return get_discover_stats
def estimate_volume(
indexed_data: List[MetricVolumeRow],
base_index: List[MetricVolumeRow],
base_metrics: List[MetricVolumeRow],
) -> List[MetricVolumeRow]:
"""
Estimates the volume of an on-demand metric by scaling the counts of the indexed metric with an estimated
sampling rate deduced from the factor of base_indexed and base_metrics time series.
The idea is that if we could multiply the indexed data by the actual sampling rate at each interval we would
obtain a good estimate of the volume. To get the actual sampling rate at any time we query both the indexed and
the metric data for the base metric (not the derived metric) and the ratio would be the approximate sample rate
"""
assert _is_data_aligned(indexed_data, base_index)
assert _is_data_aligned(indexed_data, base_metrics)
index_total = 0.0
for elm in base_index:
index_total += _get_value(elm)
metrics_total = 0.0
for elm in base_metrics:
metrics_total += _get_value(elm)
if index_total == 0.0:
return indexed_data # there is no way to estimate the volume
avg_inverted_rate = metrics_total / index_total
for idx in range(len(indexed_data)):
indexed = _get_value(base_index[idx])
metrics = _get_value(base_metrics[idx])
if indexed != 0:
inverted_rate = metrics / indexed
else:
inverted_rate = avg_inverted_rate
METHOD_NAME(indexed_data[idx], _get_value(indexed_data[idx]) * inverted_rate)
return indexed_data
def _get_value(elm: MetricVolumeRow) -> float:
ret_val = cast(List[CountResult], elm[1])[0].get("count")
if ret_val is None:
return 0.0
return ret_val
def METHOD_NAME(elm: MetricVolumeRow, value: float) -> None:
cast(List[CountResult], elm[1])[0]["count"] = value
def _is_data_aligned(left: List[MetricVolumeRow], right: List[MetricVolumeRow]) -> bool:
"""
Checks if the two timeseries are aligned (represent the same time intervals).
Checks the length and the first and last timestamp (assumes they are correctly constructed, no
check for individual intervals)
"""
if len(left) != len(right):
return False
if len(left) == 0:
return True
return left[0][0] == right[0][0] and left[-1][0] == right[-1][0]
def to_metrics_columns(query_columns: Sequence[str]):
"""
(RaduW): This is a hack to convert the columns from discover to metrics
specifically this was done to convert 'apdex(XXX)' to 'apdex()'
for metrics we need to fix the acceptable rate at tag creation time
(i.e. in Relay) so at query creation time we can only work with whatever was
collected, so apdex() does not accept a parameter
If things get more complicated create a more robust parsing solution.
"""
ret_val = []
for column in query_columns:
if fields.is_function(column):
column = discover_to_metrics_function_call(column)
ret_val.append(column)
return ret_val
def discover_to_metrics_function_call(column: str):
"""
(RaduW): Hacky function cleanup, converts discover function calls to
metric function calls
"""
if fields.is_function(column):
function, params, alias = fields.parse_function(column)
if function.lower() == "apdex":
ret_val = "apdex()"
if alias:
ret_val += f" AS {alias}"
return ret_val
return column
|
2,109 |
create
|
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid, is_delete=False)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list', 'retrieve', 'destroy']:
return serializers.GoodsoriginGetSerializer
elif self.action in ['create']:
return serializers.GoodsoriginPostSerializer
elif self.action in ['update']:
return serializers.GoodsoriginUpdateSerializer
elif self.action in ['partial_update']:
return serializers.GoodsoriginPartialUpdateSerializer
else:
return self.http_method_not_allowed(request=self.request)
def METHOD_NAME(self, request, *args, **kwargs):
data = self.request.data
data['openid'] = self.request.auth.openid
if ListModel.objects.filter(openid=data['openid'], goods_origin=data['goods_origin'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
data = self.request.data
if ListModel.objects.filter(openid=self.request.auth.openid,goods_origin=data['goods_origin'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
|
2,110 |
check test type
|
import os
import shutil
import sys
import time
from typing import Any, NoReturn, Optional
from .setting import (
CompilerType,
LOG_DIR,
PROFILE_DIR,
TestList,
TestPlatform,
TestType,
)
def convert_time(seconds: float) -> str:
seconds = int(round(seconds))
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
def print_time(message: str, start_time: float, summary_time: bool = False) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
end_time = time.time()
print(message, convert_time(end_time - start_time), file=log_file)
if summary_time:
print("\n", file=log_file)
def print_log(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[LOG] {' '.join(args)}", file=log_file)
def print_error(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[ERROR] {' '.join(args)}", file=log_file)
def remove_file(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def remove_folder(path: str) -> None:
shutil.rmtree(path)
def create_folder(*paths: Any) -> None:
for path in paths:
os.makedirs(path, exist_ok=True)
# clean up all the files generated by coverage tool
def clean_up() -> None:
# remove profile folder
remove_folder(PROFILE_DIR)
sys.exit("Clean Up Successfully!")
def convert_to_relative_path(whole_path: str, base_path: str) -> str:
# ("profile/raw", "profile") -> "raw"
if base_path not in whole_path:
raise RuntimeError(base_path + " is not in " + whole_path)
return whole_path[len(base_path) + 1 :]
def replace_extension(filename: str, ext: str) -> str:
return filename[: filename.rfind(".")] + ext
# a file is related if it's in one of the test_list folder
def related_to_test_list(file_name: str, test_list: TestList) -> bool:
for test in test_list:
if test.name in file_name:
return True
return False
def get_raw_profiles_folder() -> str:
return os.environ.get("RAW_PROFILES_FOLDER", os.path.join(PROFILE_DIR, "raw"))
def detect_compiler_type(platform: TestPlatform) -> CompilerType:
if platform == TestPlatform.OSS:
from package.oss.utils import ( # type: ignore[assignment, import, misc]
detect_compiler_type,
)
cov_type = detect_compiler_type() # type: ignore[call-arg]
else:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
detect_compiler_type,
)
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
return cov_type # type: ignore[no-any-return]
def get_test_name_from_whole_path(path: str) -> str:
# code_coverage_tool/profile/merged/haha.merged -> haha
start = path.rfind("/")
end = path.rfind(".")
assert start >= 0 and end >= 0
return path[start + 1 : end]
def check_compiler_type(cov_type: Optional[CompilerType]) -> None:
if cov_type is not None and cov_type in [CompilerType.GCC, CompilerType.CLANG]:
return
raise Exception(
f"Can't parse compiler type: {cov_type}.",
" Please set environment variable COMPILER_TYPE as CLANG or GCC",
)
def check_platform_type(platform_type: TestPlatform) -> None:
if platform_type in [TestPlatform.OSS, TestPlatform.FBCODE]:
return
raise Exception(
f"Can't parse platform type: {platform_type}.",
" Please set environment variable COMPILER_TYPE as OSS or FBCODE",
)
def METHOD_NAME(test_type: str, target: str) -> None:
if test_type in [TestType.CPP.value, TestType.PY.value]:
return
raise Exception(
f"Can't parse test type: {test_type}.",
f" Please check the type of buck target: {target}",
)
def raise_no_test_found_exception(
cpp_binary_folder: str, python_binary_folder: str
) -> NoReturn:
raise RuntimeError(
f"No cpp and python tests found in folder **{cpp_binary_folder} and **{python_binary_folder}**"
)
|
2,111 |
test roc auc score with sklearn single
|
import numpy as np
import pytest
import sklearn
from autogluon.core.metrics import confusion_matrix, log_loss, quadratic_kappa, roc_auc
from autogluon.core.metrics.softclass_metrics import soft_log_loss
def test_confusion_matrix_with_valid_inputs_without_labels_and_weights():
# Given
input_solution = [2, 0, 2, 2, 0, 1]
input_prediction = [0, 0, 2, 2, 0, 2]
expected_output = np.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
# When
observed_output = confusion_matrix(input_solution, input_prediction)
# Then
assert np.array_equal(expected_output, observed_output)
def test_confusion_matrix_with_valid_inputs_with_labels_and_without_weights():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = ["ant", "bird", "cat"]
expected_output = np.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
# When
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
# Then
assert np.array_equal(expected_output, observed_output)
def test_confusion_matrix_with_valid_inputs_with_labels_and_with_weights():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = ["ant", "bird", "cat"]
weights = [0.1, 0.3, 1.0, 0.8, 0.2, 2.0]
expected_output = np.array([[0.5, 0.0, 0.0], [0.0, 0.0, 2.0], [0.1, 0.0, 1.8]])
# When
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels, weights=weights)
# Then
assert np.array_equal(expected_output, observed_output)
def test_confusion_matrix_with_valid_inputs_with_lesser_number_of_labels_and_without_weights():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = ["bird", "cat"]
expected_output = np.array([[0, 1], [0, 2]])
# When
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
# Then
assert np.array_equal(expected_output, observed_output)
def test_confusion_matrix_with_unequal_samples():
# Given
input_solution = ["cat", "ant", "cat"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
# When-Then
with pytest.raises(ValueError):
observed_output = confusion_matrix(input_solution, input_prediction)
def test_confusion_matrix_with_multioutput_samples():
# Given
input_solution = [["cat", "ant", "cat"]]
input_prediction = [["ant", "ant", "cat"]]
# When-Then
with pytest.raises(ValueError):
observed_output = confusion_matrix(input_solution, input_prediction)
def test_confusion_matrix_with_empty_labels():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = []
# When-Then
with pytest.raises(ValueError):
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
def test_confusion_matrix_with_multiDimensional_labels():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = [["ant", "bird"], "cat"]
# When-Then
with pytest.raises(ValueError):
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
def test_confusion_matrix_with_invalid_weights():
# Given
input_solution = ["cat", "ant", "cat", "cat", "ant", "bird"]
input_prediction = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = [[1, 2], 0.1, [0.1], 3, 1]
# When-Then
with pytest.raises(ValueError):
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
def test_confusion_matrix_with_empty_inputs():
# Given
input_solution = []
input_prediction = []
labels = ["bird", "cat"]
expected_output = np.array([[0, 0], [0, 0]])
# When
observed_output = confusion_matrix(input_solution, input_prediction, labels=labels)
# Then
assert np.array_equal(expected_output, observed_output)
@pytest.mark.parametrize(
"gt,probs",
[
([0, 2, 1, 0], [[0.1, 0.2, 0.7], [0.2, 0.1, 0.7], [0.3, 0.4, 0.3], [0.01, 0.9, 0.09]]),
([0, 2, 0, 0], [[0.1, 0.2, 0.7], [0.2, 0.1, 0.7], [0.3, 0.4, 0.3], [0.01, 0.9, 0.09]]),
],
)
def test_log_loss(gt, probs):
gt = np.array(gt, dtype=np.int64)
probs = np.array(probs, dtype=np.float32)
ag_loss = log_loss(gt, probs)
expected = np.log(probs[np.arange(probs.shape[0]), gt]).mean()
np.testing.assert_allclose(ag_loss, expected)
@pytest.mark.parametrize(
"gt,probs",
[([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3], [0.9, 0.05, 0.05], [0.3, 0.5, 0.2]], [[0.1, 0.2, 0.7], [0.2, 0.1, 0.7], [0.3, 0.4, 0.3], [0.01, 0.9, 0.09]])],
)
def test_soft_log_loss(gt, probs):
gt = np.array(gt, dtype=np.float32)
probs = np.array(probs, dtype=np.float32)
ag_loss = soft_log_loss(gt, probs)
expected = -1.4691482
np.testing.assert_allclose(ag_loss, expected)
def test_log_loss_single_binary_class():
gt = np.array([1, 1, 1])
probs = np.array([0.1, 0.2, 0.3])
np.testing.assert_allclose(log_loss(gt, probs), np.log(probs).mean())
np.testing.assert_allclose(log_loss(1 - gt, probs), np.log(1 - probs).mean())
@pytest.mark.parametrize(
"gt,probs",
[
([0, 2, 1, 1], [[0.1, 0.2, 0.7], [0.2, 0.1, 0.7], [0.3, 0.4, 0.3], [0.01, 0.9, 0.09]]),
([0, 1, 0, 1], [0.1, 0.2, 0.3, 0.4]),
],
)
def test_log_loss_with_sklearn(gt, probs):
gt = np.array(gt, dtype=np.int64)
probs = np.array(probs, dtype=np.float32)
ag_loss = log_loss(gt, probs)
sklearn_log_loss = sklearn.metrics.log_loss(gt, probs)
# In AutoGluon, the metrics will always return score that is higher the better.
# Thus, the true value should be the negation of the real log_loss
np.testing.assert_allclose(ag_loss, -sklearn_log_loss)
ag_loss_as_sklearn = log_loss.convert_score_to_original(ag_loss)
np.testing.assert_allclose(ag_loss_as_sklearn, sklearn_log_loss)
def test_roc_auc_score_with_sklearn():
"""
Ensure AutoGluon's custom fast roc_auc_score produces the same result as sklearn's roc_auc_score.
"""
y_true = np.array([0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1])
y_score = np.array([0, 1, 0, 1, 0.1, 0.81, 0.76, 0.1, 0.31, 0.32, 0.34, 0.9])
expected_score = sklearn.metrics.roc_auc_score(y_true, y_score)
actual_score = roc_auc(y_true, y_score)
assert np.isclose(actual_score, expected_score)
def METHOD_NAME():
y_true = np.array([1])
y_score = np.array([0.9])
with pytest.raises(ValueError):
sklearn.metrics.roc_auc_score(y_true, y_score)
with pytest.raises(ValueError):
roc_auc(y_true, y_score)
def test_roc_auc_score_with_sklearn_zero_raise():
y_true = np.array([])
y_score = np.array([])
with pytest.raises(ValueError):
sklearn.metrics.roc_auc_score(y_true, y_score)
with pytest.raises(ValueError):
roc_auc(y_true, y_score)
def test_quadratic_kappa():
actuals = np.array([4, 4, 3, 4, 4, 4, 1, 1, 2, 1])
preds = np.array([0, 2, 1, 0, 0, 0, 1, 1, 2, 1])
value = quadratic_kappa(actuals, preds)
assert round(value, 3) == -0.139
actuals = np.array([0, 1, 0, 1])
preds = np.array([[0.8, 0.1, 0.1], [0.7, 0.1, 0.2], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]])
value = quadratic_kappa(actuals, preds)
assert value == 0.25
|
2,112 |
forward
|
# Copyright (c) Alibaba, Inc. and its affiliates.
import math
import os.path as osp
from typing import Any, Dict, List, Union
import torch
import torch.nn.functional as F
from torch import autograd, nn
from modelscope.metainfo import Models
from modelscope.models.base import Tensor, TorchModel
from modelscope.models.builder import MODELS
from modelscope.utils.config import Config
from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.logger import get_logger
from .gpen import Discriminator, FullGenerator
from .losses.losses import IDLoss, L1Loss
logger = get_logger()
__all__ = ['ImagePortraitEnhancement']
@MODELS.register_module(
Tasks.image_portrait_enhancement, module_name=Models.gpen)
class ImagePortraitEnhancement(TorchModel):
def __init__(self, model_dir: str, *args, **kwargs):
"""initialize the face enhancement model from the `model_dir` path.
Args:
model_dir (str): the model path.
"""
super().__init__(model_dir, *args, **kwargs)
self.size = 256
self.style_dim = 512
self.n_mlp = 8
self.mean_path_length = 0
self.accum = 0.5**(32 / (10 * 1000))
if torch.cuda.is_available():
self._device = torch.device('cuda')
else:
self._device = torch.device('cpu')
self.l1_loss = L1Loss()
self.id_loss = IDLoss(f'{model_dir}/arcface/model_ir_se50.pth',
self._device)
self.generator = FullGenerator(
self.size, self.style_dim, self.n_mlp,
isconcat=True).to(self._device)
self.g_ema = FullGenerator(
self.size, self.style_dim, self.n_mlp,
isconcat=True).to(self._device)
self.discriminator = Discriminator(self.size).to(self._device)
if self.size == 512:
self.load_pretrained(model_dir)
def load_pretrained(self, model_dir):
g_path = f'{model_dir}/{ModelFile.TORCH_MODEL_FILE}'
g_dict = torch.load(g_path, map_location=torch.device('cpu'))
self.generator.load_state_dict(g_dict)
self.g_ema.load_state_dict(g_dict)
d_path = f'{model_dir}/net_d.pt'
d_dict = torch.load(d_path, map_location=torch.device('cpu'))
self.discriminator.load_state_dict(d_dict)
logger.info('load model done.')
def accumulate(self):
par1 = dict(self.g_ema.named_parameters())
par2 = dict(self.generator.named_parameters())
for k in par1.keys():
par1[k].data.mul_(self.accum).add_(1 - self.accum, par2[k].data)
def requires_grad(self, model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def d_logistic_loss(self, real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(self, real_pred, real_img):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True)
grad_penalty = grad_real.pow(2).view(grad_real.shape[0],
-1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(self,
fake_pred,
fake_img=None,
real_img=None,
input_img=None):
loss = F.softplus(-fake_pred).mean()
loss_l1 = self.l1_loss(fake_img, real_img)
loss_id, __, __ = self.id_loss(fake_img, real_img, input_img)
loss_id = 0
loss += 1.0 * loss_l1 + 1.0 * loss_id
return loss
def g_path_regularize(self,
fake_img,
latents,
mean_path_length,
decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3])
grad, = autograd.grad(
outputs=(fake_img * noise).sum(),
inputs=latents,
create_graph=True)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (
path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
@torch.no_grad()
def _evaluate_postprocess(self, input: Tensor,
target: Tensor) -> Dict[str, list]:
preds, _ = self.generator(input)
preds = list(torch.split(preds, 1, 0))
targets = list(torch.split(target, 1, 0))
preds = [((pred.data * 0.5 + 0.5) * 255.).squeeze(0).type(
torch.uint8).permute(1, 2, 0).cpu().numpy() for pred in preds]
targets = [((target.data * 0.5 + 0.5) * 255.).squeeze(0).type(
torch.uint8).permute(1, 2, 0).cpu().numpy() for target in targets]
return {'pred': preds, 'target': targets}
def _train_forward_d(self, input: Tensor, target: Tensor) -> Tensor:
self.requires_grad(self.generator, False)
self.requires_grad(self.discriminator, True)
preds, _ = self.generator(input)
fake_pred = self.discriminator(preds)
real_pred = self.discriminator(target)
d_loss = self.d_logistic_loss(real_pred, fake_pred)
return d_loss
def _train_forward_d_r1(self, input: Tensor, target: Tensor) -> Tensor:
input.requires_grad = True
target.requires_grad = True
real_pred = self.discriminator(target)
r1_loss = self.d_r1_loss(real_pred, target)
return r1_loss
def _train_forward_g(self, input: Tensor, target: Tensor) -> Tensor:
self.requires_grad(self.generator, True)
self.requires_grad(self.discriminator, False)
preds, _ = self.generator(input)
fake_pred = self.discriminator(preds)
g_loss = self.g_nonsaturating_loss(fake_pred, preds, target, input)
return g_loss
def _train_forward_g_path(self, input: Tensor, target: Tensor) -> Tensor:
fake_img, latents = self.generator(input, return_latents=True)
path_loss, self.mean_path_length, path_lengths = self.g_path_regularize(
fake_img, latents, self.mean_path_length)
return path_loss
@torch.no_grad()
def _inference_forward(self, input: Tensor) -> Dict[str, Tensor]:
return {'outputs': (self.generator(input)[0] * 0.5 + 0.5).clamp(0, 1)}
def METHOD_NAME(self, input: Dict[str,
Tensor]) -> Dict[str, Union[list, Tensor]]:
"""return the result by the model
Args:
input (Dict[str, Tensor]): the preprocessed data
Returns:
Dict[str, Union[list, Tensor]]: results
"""
for key, value in input.items():
input[key] = input[key].to(self._device)
if 'target' in input:
return self._evaluate_postprocess(**input)
else:
return self._inference_forward(**input)
|
2,113 |
run onlyif instance
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import builtins
import operator
from typing import Any, Callable, Type
import functools
import torch
from torch.fx import Node
# These need to run in global scope to handle nested calls correctly
_orig_module_call: Callable = torch.nn.Module.__call__
_orig_module_getattr: Callable = torch.nn.Module.__getattr__
_orig_module_getattribute: Callable = torch.nn.Module.__getattribute__
_orig_agfunc_apply: Callable = torch.autograd.function.Function.apply
_orig_torch_assert: Callable = torch._assert
_orig_type: Callable = builtins.type
_orig_isinstance: Callable = builtins.isinstance
_orig_issubclass: Callable = builtins.issubclass
_orig_getattr: Callable = builtins.getattr
_orig_range: Type[Any] = builtins.range
_orig_int: Type[Any] = builtins.int
_orig_bool: Type[Any] = builtins.bool
_orig_tuple: Type[Any] = builtins.tuple
_orig_list: Type[Any] = builtins.list
_orig_set: Type[Any] = builtins.set
_orig_frozenset: Type[Any] = builtins.frozenset
_orig_dict: Type[Any] = builtins.dict
_orig_map: Type[Any] = builtins.map
_orig_zip: Type[Any] = builtins.zip
_orig_enumerate: Type[Any] = builtins.enumerate
_orig_slice: Type[Any] = builtins.slice
_orig_reversed: Type[Any] = builtins.reversed
_orig_torch_size: Type[Any] = torch.Size
_orig_torch_finfo: Type[Any] = torch.finfo
_orig_len: Callable = builtins.len
_orig_not: Callable = operator.not_
_orig_is: Callable = operator.is_
_orig_is_not: Callable = operator.is_not
_orig_contains: Callable = operator.contains
_orig_index: Callable = operator.index
_orig_all: Callable = builtins.all
_orig_min: Callable = builtins.min
_orig_max: Callable = builtins.max
_orig_node_is_impure: Callable = Node.is_impure
def METHOD_NAME(cond_type: Type[Any], return_orig: bool = True, return_const: Any = None):
def helper(fn):
if return_orig:
@functools.wraps(fn)
def wrapper_orig(*args):
if _orig_isinstance(args[-1], cond_type):
return fn(*args)
return args[-1]
return wrapper_orig
else:
@functools.wraps(fn)
def wrapper_const(*args):
if _orig_isinstance(args[-1], cond_type):
return fn(*args)
return return_const
return wrapper_const
return helper
def map_recursive(fn: Callable, arg) -> Any:
"""
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
"""
if _orig_type(arg) != torch.Size and _orig_isinstance(arg, _orig_tuple):
t = _orig_tuple(map_recursive(fn, elem) for elem in arg)
# Support NamedTuple (if it has `_fields`) by repacking into original type.
return t if not hasattr(arg, '_fields') else _orig_type(arg)(*t)
elif _orig_isinstance(arg, _orig_list):
return _orig_list(map_recursive(fn, elem) for elem in arg)
elif _orig_isinstance(arg, _orig_dict):
return {k: map_recursive(fn, v) for k, v in arg.items()}
else:
return fn(arg)
def map_recursive_zip(fn: Callable, arg0, *args) -> Any:
"""
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
"""
if _orig_type(arg0) != torch.Size and _orig_isinstance(arg0, _orig_tuple):
for arg in args:
assert (not _orig_isinstance(arg, torch.Size)) and _orig_isinstance(arg, _orig_tuple)
assert len(arg0) == len(arg)
return _orig_tuple(map_recursive_zip(fn, *sub_args) for sub_args in _orig_zip(arg0, *args))
elif _orig_isinstance(arg0, _orig_list):
for arg in args:
assert _orig_isinstance(arg, _orig_list)
assert len(arg0) == len(arg)
return _orig_list(map_recursive_zip(fn, *sub_args) for sub_args in _orig_zip(arg0, *args))
elif _orig_isinstance(arg0, _orig_dict):
keys = _orig_set(arg0.keys())
for arg in args:
assert _orig_isinstance(arg, _orig_dict) and len(keys.symmetric_difference(arg.keys())) == 0
return {k: map_recursive_zip(fn, arg0[k], *(arg[k] for arg in args)) for k in keys}
else:
# assert not _orig_isinstance(arg0, slice)
return fn(arg0, *args)
|
2,114 |
get tasks
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Script for downloading all GLUE data.
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
'''
import io
import os
import sys
import shutil
import argparse
import tempfile
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {"CoLA":'https://dl.fbaipublicfiles.com/glue/data/CoLA.zip',
"SST":'https://dl.fbaipublicfiles.com/glue/data/SST-2.zip',
"QQP":'https://dl.fbaipublicfiles.com/glue/data/STS-B.zip',
"STS":'https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip',
"MNLI":'https://dl.fbaipublicfiles.com/glue/data/MNLI.zip',
"QNLI":'https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip',
"RTE":'https://dl.fbaipublicfiles.com/glue/data/RTE.zip',
"WNLI":'https://dl.fbaipublicfiles.com/glue/data/WNLI.zip',
"MRPC":"https://raw.githubusercontent.com/MegEngine/Models/master/official/nlp/bert/glue_data/MRPC/dev_ids.tsv",
"diagnostic":'https://dl.fbaipublicfiles.com/glue/data/AX.tsv'}
MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
def download_and_extract(task, output_dir):
print("Downloading and extracting %s..." % task)
if task == "MNLI":
print("\tNote (12/10/20): This script no longer downloads SNLI. You will need to manually download and format the data to use SNLI.")
data_file = "%s.zip" % task
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(output_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(output_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(output_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
try:
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
except urllib.error.HTTPError:
print("Error downloading MRPC")
return
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
with io.open(mrpc_test_file, encoding='utf-8') as data_fh, \
io.open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding='utf-8') as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
try:
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
except KeyError or urllib.error.HTTPError:
print("\tError downloading standard development IDs for MRPC. You will need to manually split your data.")
return
dev_ids = []
with io.open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding='utf-8') as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with io.open(mrpc_train_file, encoding='utf-8') as data_fh, \
io.open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding='utf-8') as train_fh, \
io.open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding='utf-8') as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(output_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(output_dir, "diagnostic")):
os.mkdir(os.path.join(output_dir, "diagnostic"))
data_file = os.path.join(output_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def METHOD_NAME(task_names):
task_names = task_names.split(',')
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', help='directory to save data to', type=str, default='data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string',
type=str, default='all')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt',
type=str, default='')
args = parser.parse_args(arguments)
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
tasks = METHOD_NAME(args.tasks)
for task in tasks:
if task == 'MRPC':
format_mrpc(args.output_dir, args.path_to_mrpc)
elif task == 'diagnostic':
download_diagnostic(args.output_dir)
else:
download_and_extract(task, args.output_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
2,115 |
handle
|
from __future__ import annotations
from collections import defaultdict
from django.core.management.base import BaseCommand
from django.db.models import Sum, Q, OuterRef, Subquery
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.xform import XForm
from onadata.apps.main.models.user_profile import UserProfile
from onadata.libs.utils.jsonbfield_helper import ReplaceValues
class Command(BaseCommand):
help = (
'Retroactively calculate the total attachment file storage '
'per xform and user profile'
)
def METHOD_NAME(self, *args, **kwargs):
self.verbosity = kwargs['verbosity']
# Release any locks on the users' profile from getting submissions
UserProfile.objects.all().update(
metadata=ReplaceValues(
'metadata',
updates={'submissions_suspended': False},
),
)
# Get all profiles already updated to exclude their forms from the list.
# It is a lazy query and will be `xforms` queryset.
subquery = UserProfile.objects.values_list('user_id', flat=True).filter(
metadata__attachments_counting_status='complete'
)
# Get only xforms whose users' storage counters have not been updated yet
xforms = (
XForm.objects.exclude(user_id__in=subquery)
.values('pk', 'user_id', 'user__username')
.order_by('user_id')
)
last_xform = None
for xform in xforms:
if not last_xform or (last_xform['user_id'] != xform['user_id']):
# All forms for the previous user are complete; update that user's profile
if last_xform:
self.update_user_profile(last_xform)
# Retrieve or create user's profile.
(
user_profile,
created,
) = UserProfile.objects.get_or_create(user_id=xform['user_id'])
# Some old profiles don't have metadata
if user_profile.metadata is None:
user_profile.metadata = {}
# Set the flag to true if it was never set.
if not user_profile.metadata.get('submissions_suspended'):
# We are using the flag `submissions_suspended` to prevent
# new submissions from coming in while the
# `attachment_storage_bytes` is being calculated.
user_profile.metadata['submissions_suspended'] = True
user_profile.save(update_fields=['metadata'])
# write out xform progress
if self.verbosity >= 1:
self.stdout.write(
f"Calculating attachments for xform_id #{xform['pk']}"
f" (user {xform['user__username']})"
)
# aggregate total media file size for all media per xform
form_attachments = Attachment.objects.filter(
instance__xform_id=xform['pk']
).aggregate(total=Sum('media_file_size'))
if form_attachments['total']:
if self.verbosity >= 1:
self.stdout.write(
f'\tUpdating xform attachment storage to '
f"{form_attachments['total']} bytes"
)
XForm.objects.filter(
pk=xform['pk']
).update(
attachment_storage_bytes=form_attachments['total']
)
elif self.verbosity >= 1:
self.stdout.write('\tNo attachments found')
last_xform = xform
# need to call `update_user_profile()` one more time outside the loop
# because the last user profile will not be up-to-date otherwise
if last_xform:
self.update_user_profile(last_xform)
if self.verbosity >= 1:
self.stdout.write('Done!')
def update_user_profile(self, xform: dict):
user_id = xform['user_id']
username = xform['user__username']
if self.verbosity >= 1:
self.stdout.write(
f'Updating attachment storage total on '
f'{username}’s profile'
)
# Update user's profile (and lock the related row)
updates = {
'submissions_suspended': False,
'attachments_counting_status': 'complete',
}
# We cannot use `.aggregate()` in a subquery because it's evaluated
# right away. See https://stackoverflow.com/a/56122354/1141214 for
# details.
subquery = (
XForm.objects.filter(user_id=user_id)
.values('user_id')
.annotate(total=Sum('attachment_storage_bytes'))
.values('total')
)
UserProfile.objects.filter(user_id=user_id).update(
attachment_storage_bytes=Subquery(subquery),
metadata=ReplaceValues(
'metadata',
updates=updates,
),
)
|
2,116 |
method
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"disk delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a managed disk.
:example: Delete a managed disk.
az disk delete --name MyManagedDisk --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2019-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/disks/{}", "2019-07-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.disk_name = AAZStrArg(
options=["-n", "--name", "--disk-name"],
help="The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.DisksDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class DisksDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"diskName", self.ctx.args.disk_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2019-07-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
2,117 |
rx
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import sys
from platformio.project.exception import PlatformioException
from platformio.public import (
DeviceMonitorFilterBase,
load_build_metadata,
)
# By design, __init__ is called inside miniterm and we can't pass context to it.
# pylint: disable=attribute-defined-outside-init
IS_WINDOWS = sys.platform.startswith("win")
class Esp32C3ExceptionDecoder(DeviceMonitorFilterBase):
NAME = "esp32_c3_exception_decoder"
PCADDR_PATTERN = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
def __call__(self):
self.buffer = ""
self.pcaddr_re = self.PCADDR_PATTERN
self.firmware_path = None
self.addr2line_path = None
self.enabled = self.setup_paths()
if self.config.get("env:" + self.environment, "build_type") != "debug":
print(
"""
Please build project in debug configuration to get more details about an exception.
See https://docs.platformio.org/page/projectconf/build_configurations.html
"""
)
return self
def setup_paths(self):
self.project_dir = os.path.abspath(self.project_dir)
try:
data = load_build_metadata(self.project_dir, self.environment)
self.firmware_path = data["prog_path"]
if not os.path.isfile(self.firmware_path):
sys.stderr.write(
"%s: disabling, firmware at %s does not exist, rebuild the project?\n"
% (self.__class__.__name__, self.firmware_path)
)
return False
if self.addr2line_path is None:
cc_path = data.get("cc_path", "")
if "-gcc" in cc_path:
self.addr2line_path = cc_path.replace("-gcc", "-addr2line")
else:
sys.stderr.write(
"%s: disabling, failed to find addr2line.\n"
% self.__class__.__name__
)
return False
if not os.path.isfile(self.addr2line_path):
sys.stderr.write(
"%s: disabling, addr2line at %s does not exist\n"
% (self.__class__.__name__, self.addr2line_path)
)
return False
return True
except PlatformioException as e:
sys.stderr.write(
"%s: disabling, exception while looking for addr2line: %s\n"
% (self.__class__.__name__, e)
)
return False
def METHOD_NAME(self, text):
if not self.enabled:
return text
last = 0
while True:
idx = text.find("\n", last)
if idx == -1:
if len(self.buffer) < 4096:
self.buffer += text[last:]
break
line = text[last:idx]
if self.buffer:
line = self.buffer + line
self.buffer = ""
last = idx + 1
# Output each trace on a separate line below ours
# Logic identical to https://github.com/espressif/esp-idf/blob/master/tools/idf_monitor_base/logger.py#L131
for m in re.finditer(self.pcaddr_re, line):
if m is None:
continue
trace = self.get_backtrace(m)
if len(trace) != "":
text = text[: last] + trace + text[last :]
last += len(trace)
return text
def get_backtrace(self, match):
trace = "\n"
enc = "mbcs" if IS_WINDOWS else "utf-8"
args = [self.addr2line_path, u"-fipC", u"-e", self.firmware_path]
try:
addr = match.group()
output = (
subprocess.check_output(args + [addr])
.decode(enc)
.strip()
)
output = output.replace(
"\n", "\n "
) # newlines happen with inlined methods
output = self.strip_project_dir(output)
# Output the trace in yellow color so that it is easier to spot
trace += "\033[33m=> %s: %s\033[0m\n" % (addr, output)
except subprocess.CalledProcessError as e:
sys.stderr.write(
"%s: failed to call %s: %s\n"
% (self.__class__.__name__, self.addr2line_path, e)
)
return trace
def strip_project_dir(self, trace):
while True:
idx = trace.find(self.project_dir)
if idx == -1:
break
trace = trace[:idx] + trace[idx + len(self.project_dir) + 1 :]
return trace
|
2,118 |
measure thread
|
import json
import os
import rclpy
import cv2
import sys
import base64
import threading
import time
import numpy as np
from datetime import datetime
from websocket_server import WebsocketServer
import multiprocessing
from shared.image import SharedImage
from shared.image import SharedImage
from shared.value import SharedValue
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host):
rclpy.init()
rclpy.create_node('GUI')
self.payload = {'image':'', 'v':'','w':''}
self.server = None
self.client = None
self.host = host
# Image variable host
self.shared_image = SharedImage("guiimage")
# Get HAL variables
self.shared_v = SharedValue("velocity")
self.shared_w = SharedValue("angular")
# Event objects for multiprocessing
self.ack_event = multiprocessing.Event()
self.cli_event = multiprocessing.Event()
# Start server thread
t = threading.Thread(target=self.run_server)
t.start()
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
image = self.shared_image.get()
payload = {'image': '', 'shape': ''}
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
return payload
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
self.cli_event.set()
print(client, 'connected')
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload["image"] = json.dumps(payload)
# Payload V Message
v_message = str(self.shared_v.get())
self.payload["v"] = v_message
# Payload W Message
w_message = str(self.shared_w.get())
self.payload["w"] = w_message
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if(message[:4] == "#ack"):
# Set acknowledgement flag
self.ack_event.set()
# Reset message
elif(message[:5] == "#rest"):
self.reset_gui()
# Function that gets called when the connected closes
def handle_close(self, client, server):
print(client, 'closed')
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
self.server.set_fn_client_left(self.handle_close)
home_dir = os.path.expanduser('~')
logged = False
while not logged:
try:
f = open(f"{home_dir}/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# This class decouples the user thread
# and the GUI update thread
class ProcessGUI(multiprocessing.Process):
def __init__(self):
super(ProcessGUI, self).__init__()
self.host = sys.argv[1]
# Time variables
self.time_cycle = SharedValue("gui_time_cycle")
self.ideal_cycle = SharedValue("gui_ideal_cycle")
self.iteration_counter = 0
# Function to initialize events
def initialize_events(self):
# Events
self.ack_event = self.gui.ack_event
self.cli_event = self.gui.cli_event
self.exit_signal = multiprocessing.Event()
# Function to start the execution of threads
def run(self):
# Initialize GUI
self.gui = GUI(self.host)
self.initialize_events()
# Wait for client before starting
self.cli_event.wait()
self.METHOD_NAME = threading.Thread(target=self.METHOD_NAME)
self.thread = threading.Thread(target=self.run_gui)
self.METHOD_NAME.start()
self.thread.start()
print("GUI Process Started!")
self.exit_signal.wait()
# The measuring thread to measure frequency
def METHOD_NAME(self):
previous_time = datetime.now()
while(True):
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.ideal_cycle.add(ms / self.iteration_counter)
except:
self.ideal_cycle.add(0)
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def run_gui(self):
while(True):
start_time = datetime.now()
# Send update signal
self.gui.update_gui()
# Wait for acknowldege signal
self.ack_event.wait()
self.ack_event.clear()
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
time_cycle = self.time_cycle.get()
if(ms < time_cycle):
time.sleep((time_cycle-ms) / 1000.0)
self.exit_signal.set()
# Functions to handle auxillary GUI functions
def reset_gui():
self.gui.reset_gui()
if __name__ == "__main__":
gui = ProcessGUI()
gui.start()
|
2,119 |
test unknown keyword without messages
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/pylint-dev/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/pylint-dev/pylint/blob/main/CONTRIBUTORS.txt
import pytest
from pylint.utils.pragma_parser import (
OPTION_PO,
InvalidPragmaError,
UnRecognizedOptionError,
parse_pragma,
)
def test_simple_pragma() -> None:
comment = "#pylint: disable = missing-docstring"
match = OPTION_PO.search(comment)
assert match
for pragma_repr in parse_pragma(match.group(2)):
assert pragma_repr.action == "disable"
assert pragma_repr.messages == ["missing-docstring"]
def test_disable_checker_with_number_in_name() -> None:
comment = "#pylint: disable = j3-custom-checker"
match = OPTION_PO.search(comment)
assert match
for pragma_repr in parse_pragma(match.group(2)):
assert pragma_repr.action == "disable"
assert pragma_repr.messages == ["j3-custom-checker"]
def test_simple_pragma_no_messages() -> None:
comment = "#pylint: skip-file"
match = OPTION_PO.search(comment)
assert match
for pragma_repr in parse_pragma(match.group(2)):
assert pragma_repr.action == "skip-file"
assert not pragma_repr.messages
def test_simple_pragma_multiple_messages() -> None:
comment = "#pylint: disable = missing-docstring, invalid-name"
match = OPTION_PO.search(comment)
assert match
for pragma_repr in parse_pragma(match.group(2)):
assert pragma_repr.action == "disable"
assert pragma_repr.messages == ["missing-docstring", "invalid-name"]
def test_multiple_pragma_multiple_messages() -> None:
comment = "#pylint: disable = missing-docstring, invalid-name, enable = R0202, no-staticmethod-decorator"
match = OPTION_PO.search(comment)
assert match
res = list(parse_pragma(match.group(2)))
assert res[0].action == "disable"
assert res[0].messages == ["missing-docstring", "invalid-name"]
assert res[1].action == "enable"
assert res[1].messages == ["R0202", "no-staticmethod-decorator"]
def test_missing_assignment() -> None:
comment = "#pylint: disable missing-docstring"
match = OPTION_PO.search(comment)
assert match
with pytest.raises(InvalidPragmaError):
list(parse_pragma(match.group(2)))
def test_missing_keyword() -> None:
comment = "#pylint: = missing-docstring"
match = OPTION_PO.search(comment)
assert match
with pytest.raises(InvalidPragmaError):
list(parse_pragma(match.group(2)))
def test_unsupported_assignment() -> None:
comment = "#pylint: disable-all = missing-docstring"
match = OPTION_PO.search(comment)
assert match
with pytest.raises(UnRecognizedOptionError):
list(parse_pragma(match.group(2)))
def test_unknown_keyword_with_messages() -> None:
comment = "#pylint: unknown-keyword = missing-docstring"
match = OPTION_PO.search(comment)
assert match
with pytest.raises(UnRecognizedOptionError):
list(parse_pragma(match.group(2)))
def test_unknown_keyword_with_missing_messages() -> None:
comment = "#pylint: unknown-keyword = "
match = OPTION_PO.search(comment)
assert match
with pytest.raises(UnRecognizedOptionError):
list(parse_pragma(match.group(2)))
def METHOD_NAME() -> None:
comment = "#pylint: unknown-keyword"
match = OPTION_PO.search(comment)
assert match
with pytest.raises(UnRecognizedOptionError):
list(parse_pragma(match.group(2)))
def test_missing_message() -> None:
comment = "#pylint: disable = "
match = OPTION_PO.search(comment)
assert match
with pytest.raises(InvalidPragmaError):
list(parse_pragma(match.group(2)))
def test_parse_message_with_dash() -> None:
comment = "#pylint: disable = raw_input-builtin"
match = OPTION_PO.search(comment)
assert match
res = list(parse_pragma(match.group(2)))
assert res[0].action == "disable"
assert res[0].messages == ["raw_input-builtin"]
|
2,120 |
polynomial regression
|
#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to display various data modelling techniques and their
associated statistics in Testplan. The models used are:
* linear regression
* classification
* clustering
"""
import os
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest
from testplan.testing.multitest.suite import testsuite, testcase
from testplan.report.testing.styles import Style
from testplan.common.utils.timing import Timer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plot
import numpy as np
# Create a Matplotlib scatter plot.
def create_scatter_plot(title, x, y, label, c=None):
plot.scatter(x, y, c=c, label=label)
plot.grid()
plot.xlabel("x")
plot.ylabel("y")
plot.xlim((0, 1))
plot.ylim((-2, 2))
plot.title(title)
# Use the original docstring, formatting
# it using kwargs via string interpolation.
# e.g. `foo: {foo}, bar: {bar}`.format(foo=2, bar=5)` -> 'foo: 2, bar: 5'
def interpolate_docstring(docstring, kwargs):
return docstring.format(**kwargs)
@testsuite
class ModelExamplesSuite:
def setup(self, env, result):
"""
Load the raw data from the CSV file.
Log this data as a table in the report.
"""
# Load the raw cosine data from the CSV file.
self.x, self.y = np.loadtxt(
os.path.join(os.path.dirname(__file__), "cos_data.csv"),
delimiter=",",
unpack=True,
skiprows=1,
)
self.x_test = np.linspace(0, 1, 100)
# Log it to display in the report, this will show the first 5 and last 5
# rows if there are more than 10 rows.
data = [["X", "y"]] + [
[self.x[i], self.y[i]] for i in range(len(self.x))
]
result.table.log(data, description="Raw cosine data")
@testcase(
parameters={"degrees": [2, 3, 4, 5, 10, 15]},
docstring_func=interpolate_docstring,
)
def METHOD_NAME(self, env, result, degrees):
"""
Create and train a polynomial regression function with {degrees} degrees
of freedom. Check if the Mean Square Error (MSE) and time to train the
model are within their thresholds. Display the train data and the model
on a plot.
"""
# This example was based on
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html
# Create the pipeline to train a polynomial regression with varying
# degrees of freedom.
polynomial_features = PolynomialFeatures(
degree=degrees, include_bias=False
)
pipeline = Pipeline(
[
("polynomial_features", polynomial_features),
("linear_regression", LinearRegression()),
]
)
# Train the model and record how long this takes.
timer = Timer()
with timer.record("train_model"):
pipeline.fit(self.x[:, np.newaxis], self.y)
scores = cross_val_score(
pipeline,
self.x[:, np.newaxis],
self.y,
scoring="neg_mean_squared_error",
cv=10,
)
# Check the Mean Square Error (MSE) and time to train the model are
# within their thresholds.
result.less(
-scores.mean(),
0.05,
description="Mean Square Error threshold on test data",
)
result.less(
timer["train_model"].elapsed,
1,
description="How long did the model take to train?",
)
# Display the train data and the model on a plot.
create_scatter_plot(
title="{} degrees of freedom model & Train data".format(degrees),
x=self.x,
y=self.y,
label="Samples",
c="black",
)
y_test = pipeline.predict(self.x_test[:, np.newaxis])
plot.plot(self.x_test, y_test, label="Model")
plot.legend(loc="best")
result.matplot(plot)
# Hard-coding `pdf_path` and 'pdf_style' so that the downloadable example gives
# meaningful and presentable output. NOTE: this programmatic arguments passing
# approach will cause Testplan to ignore any command line arguments related to
# that functionality.
@test_plan(
name="Basic Data Modelling Example",
pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
pdf_style=Style(passing="assertion-detail", failing="assertion-detail"),
)
def main(plan):
"""
Testplan decorated main function to add and execute MultiTests.
:return: Testplan result object.
:rtype: :py:class:`~testplan.base.TestplanResult`
"""
model_examples = MultiTest(
name="Model Examples", suites=[ModelExamplesSuite()]
)
plan.add(model_examples)
if __name__ == "__main__":
sys.exit(not main())
|
2,121 |
move rel
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 13 16:54:15 2014
@author: David Baddeley
"""
import Pyro.core
import Pyro.naming
import threading
from PYME.misc.computerName import GetComputerName
from PYME.Acquire import eventLog
import time
from .base_piezo import PiezoBase
class piezoOffsetProxy(PiezoBase, Pyro.core.ObjBase):
def __init__(self, basePiezo):
Pyro.core.ObjBase.__init__(self)
self.basePiezo = basePiezo
self.offset = 0
@property
def units_um(self):
return self.basePiezo.units_um
def SetServo(self,val = 1):
return self.basePiezo.SetServo(val)
def MoveTo(self, iChannel, fPos, bTimeOut=True):
return self.basePiezo.MoveTo(iChannel, fPos + self.offset, bTimeOut)
def METHOD_NAME(self, iChannel, incr, bTimeOut=True):
return self.basePiezo.METHOD_NAME(iChannel, incr, bTimeOut)
def GetPos(self, iChannel=0):
return self.basePiezo.GetPos(iChannel) - self.offset
def GetTargetPos(self, iChannel=0):
return self.basePiezo.GetTargetPos(iChannel) - self.offset
def GetMin(self,iChan=1):
return self.basePiezo.GetMin(iChan)
def GetMax(self, iChan=1):
return self.basePiezo.GetMax(iChan)
def GetFirmwareVersion(self):
return self.basePiezo.GetFirmwareVersion()
def GetOffset(self):
return self.offset
def SetOffset(self, val):
p = self.GetTargetPos()
self.offset = val
self.MoveTo(0, p)
def LogShifts(self, dx, dy, dz, active=True):
import wx
#eventLog.logEvent('ShiftMeasure', '%3.4f, %3.4f, %3.4f' % (dx, dy, dz))
wx.CallAfter(eventLog.logEvent, 'ShiftMeasure', '%3.4f, %3.4f, %3.4f' % (dx, dy, dz))
wx.CallAfter(eventLog.logEvent, 'PiezoOffset', '%3.4f, %d' % (self.GetOffset(), active))
def OnTarget(self):
return self.basePiezo.OnTarget()
def LogFocusCorrection(self,offset):
import wx
wx.CallAfter(eventLog.logEvent, 'PiezoOffsetUpdate', '%3.4f' %offset)
# @property
# def lastPos(self):
# return self.basePiezo.lastPos - self.offset
# @lastPos.setter
# def lastPos(self,val):
# self.basePiezo.lastPos = val
class ServerThread(threading.Thread):
def __init__(self, basePiezo):
threading.Thread.__init__(self)
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
compName = GetComputerName()
Pyro.core.initServer()
pname = "%s.Piezo" % compName
try:
from PYME.misc import pyme_zeroconf
ns = pyme_zeroconf.getNS()
except:
ns=Pyro.naming.NameServerLocator().getNS()
if not compName in [n[0] for n in ns.list('')]:
ns.createGroup(compName)
#get rid of any previous instance
try:
ns.unregister(pname)
except Pyro.errors.NamingError:
pass
self.daemon=Pyro.core.Daemon(host = ip_addr)
self.daemon.useNameServer(ns)
#self.piezo = piezoOffsetProxy(basePiezo)
self.piezo = basePiezo
#pname = "%s.Piezo" % compName
uri=self.daemon.connect(self.piezo,pname)
def run(self):
#print 'foo'
#try:
self.daemon.requestLoop()
#finally:
# daemon.shutdown(True)
def cleanup(self):
print('Shutting down Offset Piezo Server')
self.daemon.shutdown(True)
def getClient(compName = GetComputerName()):
#try:
from PYME.misc import pyme_zeroconf
ns = pyme_zeroconf.getNS()
time.sleep(3)
#print ns.list()
URI = ns.resolve('%s.Piezo' % compName)
#except:
# URI ='PYRONAME://%s.Piezo'%compName
#print URI
return Pyro.core.getProxyForURI(URI)
def main():
"""For testing only"""
from PYME.Acquire.Hardware.Simulator import fakePiezo
bp = fakePiezo.FakePiezo(100)
st = ServerThread(bp)
#print 'foo'
st.start()
st.join()
#st.run()
#st.daemon.requestLoop()
#print 'bar'
if __name__ == '__main__':
main(
|
2,122 |
reset
|
from __future__ import annotations
import math
from river import base, stats
class HDDM_A(base.BinaryDriftAndWarningDetector):
"""Drift Detection Method based on Hoeffding's bounds with moving average-test.
HDDM_A is a drift detection method based on the Hoeffding's inequality which uses
the input average as estimator.
**Input:** `x` is an entry in a stream of bits, where 1 indicates error/failure and 0
represents correct/normal values.
For example, if a classifier's prediction $y'$ is right or wrong w.r.t. the
true target label $y$:
- 0: Correct, $y=y'$
- 1: Error, $y \\neq y'$
*Implementation based on MOA.*
Parameters
----------
drift_confidence
Confidence to the drift
warning_confidence
Confidence to the warning
two_sided_test
If `True`, will monitor error increments and decrements (two-sided). By default will only
monitor increments (one-sided).
Examples
--------
>>> import random
>>> from river import drift
>>> rng = random.Random(42)
>>> hddm_a = drift.binary.HDDM_A()
>>> # Simulate a data stream where the first 1000 instances come from a uniform distribution
>>> # of 1's and 0's
>>> data_stream = rng.choices([0, 1], k=1000)
>>> # Increase the probability of 1's appearing in the next 1000 instances
>>> data_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])
>>> print_warning = True
>>> # Update drift detector and verify if change is detected
>>> for i, x in enumerate(data_stream):
... _ = hddm_a.update(x)
... if hddm_a.warning_detected and print_warning:
... print(f"Warning detected at index {i}")
... print_warning = False
... if hddm_a.drift_detected:
... print(f"Change detected at index {i}")
... print_warning = True
Warning detected at index 451
Change detected at index 1206
References
----------
[^1]: Frías-Blanco I, del Campo-Ávila J, Ramos-Jimenez G, et al. Online and non-parametric drift detection
methods based on Hoeffding's bounds. IEEE Transactions on Knowledge and Data Engineering, 2014, 27(3): 810-823.
[^2]: Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal
of Machine Learning Research 11: 1601-1604, 2010.
"""
def __init__(self, drift_confidence=0.001, warning_confidence=0.005, two_sided_test=False):
super().__init__()
self.drift_confidence = drift_confidence
self.warning_confidence = warning_confidence
self.two_sided_test = two_sided_test
self.METHOD_NAME()
def METHOD_NAME(self):
super().METHOD_NAME()
# To check if the global mean increased
self._x_min = stats.Mean()
# To check if the global mean decreased
self._x_max = stats.Mean()
# Global mean
self._z = stats.Mean()
def _hoeffding_bound(self, n):
return math.sqrt(1.0 / (2 * n) * math.log(1.0 / self.drift_confidence))
def update(self, x):
"""Update the change detector with a single data point.
Parameters
----------
value
This parameter indicates whether the last sample analyzed was correctly classified or
not. 1 indicates an error (miss-classification).
Returns
-------
self
"""
if self.drift_detected:
self.METHOD_NAME()
self._z.update(x)
if self._x_min.n == 0:
self._x_min = self._z.clone(include_attributes=True)
if self._x_max.n == 0:
self._x_max = self._z.clone(include_attributes=True)
# Bound the data
eps_z = self._hoeffding_bound(self._z.n)
eps_x = self._hoeffding_bound(self._x_min.n)
# Update the cut point for tracking mean increase
if self._x_min.get() + eps_x >= self._z.get() + eps_z:
self._x_min = self._z.clone(include_attributes=True)
eps_x = self._hoeffding_bound(self._x_max.n)
# Update the cut point for tracking mean decrease
if self._x_max.get() - eps_x <= self._z.get() - eps_z:
self._x_max = self._z.clone(include_attributes=True)
if self._mean_incr(self.drift_confidence):
self._warning_detected = False
self._drift_detected = True
elif self._mean_incr(self.warning_confidence):
self._warning_detected = True
self._drift_detected = False
else:
self._warning_detected = False
self._drift_detected = False
if self.two_sided_test:
if self._mean_decr(self.drift_confidence):
self._drift_detected = True
elif self._mean_decr(self.warning_confidence):
self._warning_detected = True
return self
# Check if the global mean increased
def _mean_incr(self, confidence: float):
if self._x_min.n == self._z.n:
return False
m = (self._z.n - self._x_min.n) / self._x_min.n * (1.0 / self._z.n)
eps = math.sqrt(m / 2 * math.log(2.0 / confidence))
return self._z.get() - self._x_min.get() >= eps
# Check if the global mean decreased
def _mean_decr(self, confidence: float):
if self._x_max.n == self._z.n:
return False
m = (self._z.n - self._x_max.n) / self._x_max.n * (1.0 / self._z.n)
eps = math.sqrt(m / 2 * math.log(2.0 / confidence))
return self._x_max.get() - self._z.get() >= eps
|
2,123 |
username
|
"""Utilities for views"""
from __future__ import annotations
import json
import os
import subprocess
from flask import render_template, request, session, current_app
from flask_security.core import current_user
from flexmeasures import __version__ as flexmeasures_version
from flexmeasures.auth.policy import user_has_admin_access
from flexmeasures.utils import time_utils
from flexmeasures.ui import flexmeasures_ui
from flexmeasures.data.models.user import User, Account
from flexmeasures.ui.utils.chart_defaults import chart_options
def render_flexmeasures_template(html_filename: str, **variables):
"""Render template and add all expected template variables, plus the ones given as **variables."""
variables["flask_env"] = current_app.env
variables["documentation_exists"] = False
if os.path.exists(
"%s/static/documentation/html/index.html" % flexmeasures_ui.root_path
):
variables["documentation_exists"] = True
variables["show_queues"] = False
if current_user.is_authenticated:
if (
user_has_admin_access(current_user, "update")
or current_app.config.get("FLEXMEASURES_MODE", "") == "demo"
):
variables["show_queues"] = True
variables["event_starts_after"] = session.get("event_starts_after")
variables["event_ends_before"] = session.get("event_ends_before")
variables["chart_type"] = session.get("chart_type", "bar_chart")
variables["page"] = html_filename.split("/")[-1].replace(".html", "")
variables["resolution"] = session.get("resolution", "")
variables["resolution_human"] = time_utils.freq_label_to_human_readable_label(
session.get("resolution", "")
)
variables["horizon_human"] = time_utils.freq_label_to_human_readable_label(
session.get("forecast_horizon", "")
)
variables["flexmeasures_version"] = flexmeasures_version
(
variables["git_version"],
variables["git_commits_since"],
variables["git_hash"],
) = get_git_description()
app_start_time = current_app.config.get("START_TIME")
variables["app_running_since"] = time_utils.naturalized_datetime_str(app_start_time)
variables["loaded_plugins"] = ", ".join(
f"{p_name} (v{p_version})"
for p_name, p_version in current_app.config.get("LOADED_PLUGINS", {}).items()
)
variables["user_is_logged_in"] = current_user.is_authenticated
variables["user_is_admin"] = user_has_admin_access(current_user, "update")
variables["user_has_admin_reader_rights"] = user_has_admin_access(
current_user, "read"
)
variables[
"user_is_anonymous"
] = current_user.is_authenticated and current_user.has_role("anonymous")
variables["user_email"] = current_user.is_authenticated and current_user.email or ""
variables["user_name"] = (
current_user.is_authenticated and current_user.METHOD_NAME or ""
)
variables["js_versions"] = current_app.config.get("FLEXMEASURES_JS_VERSIONS")
variables["chart_options"] = json.dumps(chart_options)
variables["menu_logo"] = current_app.config.get("FLEXMEASURES_MENU_LOGO_PATH")
variables["extra_css"] = current_app.config.get("FLEXMEASURES_EXTRA_CSS_PATH")
return render_template(html_filename, **variables)
def clear_session():
for skey in [
k
for k in session.keys()
if k not in ("_fresh", "_id", "_user_id", "csrf_token", "fs_cc", "fs_paa")
]:
current_app.logger.info(
"Removing %s:%s from session ... " % (skey, session[skey])
)
del session[skey]
def set_session_variables(*var_names: str):
"""Store request values as session variables, for a consistent UX across UI page loads.
>>> set_session_variables("event_starts_after", "event_ends_before", "chart_type")
"""
for var_name in var_names:
var = request.values.get(var_name)
if var is not None:
session[var_name] = var
def get_git_description() -> tuple[str, int, str]:
"""
Get information about the SCM (git) state if possible (if a .git directory exists).
Returns the latest git version (tag) as a string, the number of commits since then as an int and the
current commit hash as string.
"""
def _minimal_ext_cmd(cmd: list):
# construct minimal environment
env = {}
for k in ["SYSTEMROOT", "PATH"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
return subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
version = "Unknown"
commits_since = 0
sha = "Unknown"
path_to_flexmeasures_root = os.path.join(
os.path.dirname(__file__), "..", "..", ".."
)
if os.path.exists(os.path.join(path_to_flexmeasures_root, ".git")):
commands = ["git", "describe", "--always", "--long"]
try:
git_output = _minimal_ext_cmd(commands)
components = git_output.strip().decode("ascii").split("-")
if not (len(components) == 1 and components[0] == ""):
sha = components.pop()
if len(components) > 0:
commits_since = int(components.pop())
version = "-".join(components)
except OSError as ose:
current_app.logger.warning("Problem when reading git describe: %s" % ose)
return version, commits_since, sha
def asset_icon_name(asset_type_name: str) -> str:
"""Icon name for this asset type.
This can be used for UI html templates made with Jinja.
ui.__init__ makes this function available as the filter "asset_icon".
For example:
<i class={{ asset_type.name | asset_icon }}></i>
becomes (for a battery):
<i class="icon-battery"></i>
"""
# power asset exceptions
if "evse" in asset_type_name.lower():
return "icon-charging_station"
# weather exceptions
if asset_type_name == "irradiance":
return "wi wi-horizon-alt"
elif asset_type_name == "temperature":
return "wi wi-thermometer"
elif asset_type_name == "wind direction":
return "wi wi-wind-direction"
elif asset_type_name == "wind speed":
return "wi wi-strong-wind"
# aggregation exceptions
elif asset_type_name == "renewables":
return "icon-wind"
return f"icon-{asset_type_name}"
def METHOD_NAME(user_id) -> str:
user = User.query.get(user_id)
if user is None:
current_app.logger.warning(f"Could not find user with id {user_id}")
return ""
else:
return user.METHOD_NAME
def accountname(account_id) -> str:
account = Account.query.get(account_id)
if account is None:
current_app.logger.warning(f"Could not find account with id {account_id}")
return ""
else:
return account.name
|
2,124 |
test unknown browser default
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.conf import settings
from django.test import Client, TestCase
from django.utils.timezone import now
from pretix.base.models import Event, Organizer, User
class LocaleDeterminationTest(TestCase):
"""
This test case tests various methods around the properties /
variations concept.
"""
def setUp(self):
o = Organizer.objects.create(name='Dummy', slug='dummy')
self.event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
self.TEST_LOCALE = 'de' if settings.LANGUAGE_CODE == 'en' else 'en'
self.TEST_LOCALE_LONG = 'de-AT' if settings.LANGUAGE_CODE == 'en' else 'en-NZ'
self.user = User.objects.create_user('[email protected]', 'dummy')
def test_global_default(self):
c = Client()
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, settings.LANGUAGE_CODE)
def test_browser_default(self):
c = Client(HTTP_ACCEPT_LANGUAGE=self.TEST_LOCALE)
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, self.TEST_LOCALE)
c = Client(HTTP_ACCEPT_LANGUAGE=self.TEST_LOCALE_LONG)
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, self.TEST_LOCALE)
def METHOD_NAME(self):
c = Client(HTTP_ACCEPT_LANGUAGE='sjn')
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, settings.LANGUAGE_CODE)
def test_cookie_settings(self):
c = Client()
cookies = c.cookies
cookies[settings.LANGUAGE_COOKIE_NAME] = self.TEST_LOCALE
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, self.TEST_LOCALE)
cookies[settings.LANGUAGE_COOKIE_NAME] = self.TEST_LOCALE_LONG
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, self.TEST_LOCALE)
def test_user_settings(self):
c = Client()
self.user.locale = self.TEST_LOCALE
self.user.save()
response = c.post('/control/login', {
'email': '[email protected]',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
response = c.get('/control/login')
language = response['Content-Language']
self.assertEqual(language, self.TEST_LOCALE)
def test_event_allowed(self):
self.event.settings.set('locales', ['de', 'en'])
c = Client()
cookies = c.cookies
cookies[settings.LANGUAGE_COOKIE_NAME] = 'de'
response = c.get('/dummy/dummy/')
language = response['Content-Language']
self.assertEqual(language, 'de')
def test_event_fallback_to_short(self):
self.event.settings.set('locales', ['de'])
c = Client()
cookies = c.cookies
cookies[settings.LANGUAGE_COOKIE_NAME] = 'de-informal'
response = c.get('/dummy/dummy/')
language = response['Content-Language']
self.assertEqual(language, 'de')
def test_event_fallback_to_long(self):
self.event.settings.set('locales', ['de-informal'])
c = Client()
cookies = c.cookies
cookies[settings.LANGUAGE_COOKIE_NAME] = 'de'
response = c.get('/dummy/dummy/')
language = response['Content-Language']
self.assertEqual(language, 'de-informal')
def test_event_not_allowed(self):
self.event.settings.set('locales', ['en'])
c = Client()
cookies = c.cookies
cookies[settings.LANGUAGE_COOKIE_NAME] = 'de'
response = c.get('/dummy/dummy/')
language = response['Content-Language']
self.assertEqual(language, 'en')
|
2,125 |
step list skus
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .. import try_manual
# EXAMPLE: /DiskPools/put/Create or Update Disk pool
@try_manual
def step_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool create '
'--location "westus" '
'--availability-zones "1" '
'--disks "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Compute/disks/vm-name_D'
'ataDisk_0" '
'--disks "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Compute/disks/vm-name_D'
'ataDisk_1" '
'--subnet-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetw'
'orks/{vn}/subnets/{subnets}" '
'--sku name="Basic_V1" tier="Basic" '
'--tags key="value" '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=[])
test.cmd('az disk-pool wait --created '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/get/Get Disk pool
@try_manual
def step_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool show '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/get/Get Disk Pool outbound network dependencies
@try_manual
def step_list_outbound_network_dependency_endpoint(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool list-outbound-network-dependency-endpoint '
'--name "{myDiskPool2}" '
'--resource-group "{rg_2}"',
checks=checks)
# EXAMPLE: /DiskPools/get/List Disk Pools
@try_manual
def step_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool list '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/get/List Disk Pools by subscription
@try_manual
def step_list2(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool list '
'-g ""',
checks=checks)
# EXAMPLE: /DiskPools/patch/Update Disk pool
@try_manual
def step_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool update '
'--name "{myDiskPool}" '
'--disks "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Compute/disks/vm-name_D'
'ataDisk_0" '
'--disks "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Compute/disks/vm-name_D'
'ataDisk_1" '
'--sku name="Basic_B1" tier="Basic" '
'--tags key="value" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/post/Deallocate Disk Pool
@try_manual
def step_stop(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool stop '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/post/Start Disk Pool
@try_manual
def step_start(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool start '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/post/Upgrade Disk Pool
@try_manual
def step_redeploy(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool redeploy '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPools/delete/Delete Disk pool
@try_manual
def step_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool delete -y '
'--name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /DiskPoolZones/get/List Disk Pool Skus
@try_manual
def METHOD_NAME(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool list-skus '
'--location "eastus"',
checks=checks)
# EXAMPLE: /DiskPoolZones/get/List Disk Pool Zones
@try_manual
def step_list_zones(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool list-zones '
'--location "eastus"',
checks=checks)
# EXAMPLE: /IscsiTargets/put/Create or Update iSCSI Target
@try_manual
def step_iscsi_target_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool iscsi-target create '
'--disk-pool-name "{myDiskPool}" '
'--acl-mode "Dynamic" '
'--luns name="lun0" managed-disk-azure-resource-id="/subscriptions/{subscription_id}/resourceGroups/{rg}/p'
'roviders/Microsoft.Compute/disks/vm-name_DataDisk_1" '
'--target-iqn "iqn.2005-03.org.iscsi:server1" '
'--name "{myIscsiTarget}" '
'--resource-group "{rg}"',
checks=[])
test.cmd('az disk-pool iscsi-target wait --created '
'--disk-pool-name "{myDiskPool}" '
'--name "{myIscsiTarget}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /IscsiTargets/get/Get iSCSI Target
@try_manual
def step_iscsi_target_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool iscsi-target show '
'--disk-pool-name "{myDiskPool}" '
'--name "{myIscsiTarget}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /IscsiTargets/get/List Disk Pools by Resource Group
@try_manual
def step_iscsi_target_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool iscsi-target list '
'--disk-pool-name "{myDiskPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /IscsiTargets/patch/Update iSCSI Target
@try_manual
def step_iscsi_target_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool iscsi-target update '
'--disk-pool-name "{myDiskPool}" '
'--name "{myIscsiTarget}" '
'--luns name="lun0" managed-disk-azure-resource-id="/subscriptions/{subscription_id}/resourceGroups/{rg}/p'
'roviders/Microsoft.Compute/disks/vm-name_DataDisk_1" '
'--static-acls initiator-iqn="iqn.2005-03.org.iscsi:client" mapped-luns="lun0" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /IscsiTargets/delete/Delete iSCSI Target
@try_manual
def step_iscsi_target_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az disk-pool iscsi-target delete -y '
'--disk-pool-name "{myDiskPool}" '
'--name "{myIscsiTarget}" '
'--resource-group "{rg}"',
checks=checks)
|
2,126 |
make model
|
# Owner(s): ["module: unknown"]
from torch import nn
from torch.ao.pruning import WeightNormSparsifier
from torch.ao.pruning import BaseScheduler, LambdaSL, CubicSL
from torch.testing._internal.common_utils import TestCase
import warnings
class ImplementedScheduler(BaseScheduler):
def get_sl(self):
if self.last_epoch > 0:
return [group['sparsity_level'] * 0.5
for group in self.sparsifier.groups]
else:
return list(self.base_sl)
class TestScheduler(TestCase):
def test_constructor(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
scheduler = ImplementedScheduler(sparsifier)
assert scheduler.sparsifier is sparsifier
assert scheduler._step_count == 1
assert scheduler.base_sl == [sparsifier.groups[0]['sparsity_level']]
def test_order_of_steps(self):
"""Checks if the warning is thrown if the scheduler step is called
before the sparsifier step"""
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
scheduler = ImplementedScheduler(sparsifier)
# Sparsifier step is not called
with self.assertWarns(UserWarning):
scheduler.step()
# Correct order has no warnings
# Note: This will trigger if other warnings are present.
with warnings.catch_warnings(record=True) as w:
sparsifier.step()
scheduler.step()
# Make sure there is no warning related to the base_scheduler
for warning in w:
fname = warning.filename
fname = '/'.join(fname.split('/')[-5:])
assert fname != 'torch/ao/sparsity/scheduler/base_scheduler.py'
def test_step(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
scheduler = ImplementedScheduler(sparsifier)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
sparsifier.step()
scheduler.step()
assert sparsifier.groups[0]['sparsity_level'] == 0.25
def test_lambda_scheduler(self):
model = nn.Sequential(
nn.Linear(16, 16)
)
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=None)
assert sparsifier.groups[0]['sparsity_level'] == 0.5
scheduler = LambdaSL(sparsifier, lambda epoch: epoch * 10)
assert sparsifier.groups[0]['sparsity_level'] == 0.0 # Epoch 0
scheduler.step()
assert sparsifier.groups[0]['sparsity_level'] == 5.0 # Epoch 1
class TestCubicScheduler(TestCase):
def setUp(self):
self.model_sparse_config = [
{'tensor_fqn': '0.weight', 'sparsity_level': 0.8},
{'tensor_fqn': '2.weight', 'sparsity_level': 0.4},
]
self.sorted_sparse_levels = [conf['sparsity_level'] for conf in self.model_sparse_config]
self.initial_sparsity = 0.1
self.initial_step = 3
def METHOD_NAME(self, **kwargs):
model = nn.Sequential(
nn.Linear(13, 17),
nn.Dropout(0.5),
nn.Linear(17, 3),
)
return model
def _make_scheduler(self, model, **kwargs):
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=self.model_sparse_config)
scheduler_args = {
'init_sl': self.initial_sparsity,
'init_t': self.initial_step,
}
scheduler_args.update(kwargs)
scheduler = CubicSL(sparsifier, **scheduler_args)
return sparsifier, scheduler
@staticmethod
def _get_sparsity_levels(sparsifier, precision=32):
r"""Gets the current levels of sparsity in a sparsifier."""
return [round(group['sparsity_level'], precision) for group in sparsifier.groups]
def test_constructor(self):
model = self.METHOD_NAME()
sparsifier, scheduler = self._make_scheduler(model=model, initially_zero=True)
self.assertIs(
scheduler.sparsifier, sparsifier,
msg="Sparsifier is not properly attached")
self.assertEqual(
scheduler._step_count, 1,
msg="Scheduler is initialized with incorrect step count")
self.assertEqual(
scheduler.base_sl, self.sorted_sparse_levels,
msg="Scheduler did not store the target sparsity levels correctly")
# Value before t_0 is 0
self.assertEqual(
self._get_sparsity_levels(sparsifier), scheduler._make_sure_a_list(0.0),
msg="Sparsifier is not reset correctly after attaching to the Scheduler")
# Value before t_0 is s_0
model = self.METHOD_NAME()
sparsifier, scheduler = self._make_scheduler(model=model, initially_zero=False)
self.assertEqual(
self._get_sparsity_levels(sparsifier),
scheduler._make_sure_a_list(self.initial_sparsity),
msg="Sparsifier is not reset correctly after attaching to the Scheduler")
def test_step(self):
# For n=5, dt=2, there will be totally 10 steps between s_0 and s_f, starting from t_0
model = self.METHOD_NAME()
sparsifier, scheduler = self._make_scheduler(
model=model, initially_zero=True, init_t=3, delta_t=2, total_t=5)
scheduler.step()
scheduler.step()
self.assertEqual(scheduler._step_count, 3, msg="Scheduler step_count is expected to increment")
# Value before t_0 is supposed to be 0
self.assertEqual(
self._get_sparsity_levels(sparsifier), scheduler._make_sure_a_list(0.0),
msg="Scheduler step updating the sparsity level before t_0")
scheduler.step() # Step = 3 => sparsity = initial_sparsity
self.assertEqual(
self._get_sparsity_levels(sparsifier), scheduler._make_sure_a_list(self.initial_sparsity),
msg="Sparsifier is not reset to initial sparsity at the first step")
scheduler.step() # Step = 4 => sparsity ~ [0.3, 0.2]
self.assertEqual(
self._get_sparsity_levels(sparsifier, 1), [0.3, 0.2],
msg="Sparsity level is not set correctly after the first step")
current_step = scheduler._step_count - scheduler.init_t[0] - 1
more_steps_needed = scheduler.delta_t[0] * scheduler.total_t[0] - current_step
for _ in range(more_steps_needed): # More steps needed to final sparsity level
scheduler.step()
self.assertEqual(
self._get_sparsity_levels(sparsifier), self.sorted_sparse_levels,
msg="Sparsity level is not reaching the target level afer delta_t * n steps ")
|
2,127 |
test get enum
|
# -*- coding: utf-8 -*-
"""Tests for the :class:`aiida.orm.nodes.data.enum.Enum` data plugin."""
import enum
import pytest
from aiida.common import links
from aiida.orm import load_node
from aiida.orm.nodes.data.enum import EnumData
class DummyEnum(enum.Enum):
"""Dummy enum for testing."""
OPTION_A = 'a'
OPTION_B = 'b'
def test_construct():
"""Test the ``EnumData`` constructor."""
instance = DummyEnum.OPTION_A
node = EnumData(instance)
assert isinstance(node, EnumData)
assert not node.is_stored
@pytest.mark.parametrize('value', (None, 'string'))
def test_construct_invalid_type(value):
"""Test the ``EnumData`` constructor raises if object is ``None``."""
with pytest.raises(TypeError, match=r'Got object of type .*, expecting .*.'):
EnumData(value)
def test_load_node():
"""Test loading a stored ``EnumData`` node."""
member = DummyEnum.OPTION_A
node = EnumData(member)
node.store()
loaded = load_node(node.pk)
assert isinstance(loaded, EnumData)
assert loaded.is_stored
def test_name():
"""Test the ``name`` property."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.name == member.name
node.store()
assert node.name == member.name
loaded = load_node(node.pk)
assert loaded.name == member.name
def test_value():
"""Test the ``value`` property."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.value == member.value
node.store()
assert node.value == member.value
loaded = load_node(node.pk)
assert loaded.value == member.value
def METHOD_NAME():
"""Test the ``get_enum`` method."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.get_enum() == DummyEnum
node.store()
assert node.get_enum() == DummyEnum
loaded = load_node(node.pk)
assert loaded.get_enum() == DummyEnum
def test_get_member():
"""Test the ``get_member`` method."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.get_member() == member
node.store()
assert node.get_member() == member
loaded = load_node(node.pk)
assert loaded.get_member() == member
def test_get_member_module_not_importable():
"""Test the ``get_member`` property when the enum cannot be imported from the identifier."""
member = DummyEnum.OPTION_A
node = EnumData(member)
node.base.attributes.set(EnumData.KEY_IDENTIFIER, 'aiida.common.links:NonExistingEnum')
node.store()
loaded = load_node(node.pk)
with pytest.raises(ImportError):
loaded.get_member() # pylint: disable=pointless-statement
def test_get_member_invalid_value(monkeypatch):
"""Test the ``get_member`` method when stored value is no longer valid for the class loaded from the identifier."""
member = links.LinkType.RETURN
node = EnumData(member).store()
class ChangedLinkType(enum.Enum):
"""Change the definition of the :class:`aiida.common.links.LinkType`"""
RETURN = 'different_return'
# And then monkeypatch the :mod:`aiida.common.links` module with the mock enum.
monkeypatch.setattr(links, 'LinkType', ChangedLinkType)
loaded = load_node(node.pk)
with pytest.raises(ValueError, match=r'The stored value `return` is no longer a valid value for the enum `.*`'):
loaded.get_member() # pylint: disable=pointless-statement
def test_eq():
"""Test the ``__eq__`` implementation."""
node_a = EnumData(DummyEnum.OPTION_A)
node_b = EnumData(DummyEnum.OPTION_B)
assert node_a == DummyEnum.OPTION_A
assert node_a != DummyEnum.OPTION_B
assert node_a == node_a # pylint: disable=comparison-with-itself
assert node_a != node_b
assert node_a != DummyEnum.OPTION_A.value
# If the identifier cannot be resolved, the equality should not raise but simply return ``False``.
node_a.base.attributes.set(EnumData.KEY_IDENTIFIER, 'aiida.common.links:NonExistingEnum')
assert node_a != DummyEnum.OPTION_A
# If the value is incorrect for the resolved identifier, the equality should not raise but simply return ``False``.
node_b.base.attributes.set(EnumData.KEY_VALUE, 'c')
assert node_b != DummyEnum.OPTION_B
|
2,128 |
test user can be fetched
|
#!/usr/bin/env python
"""Tests for root API user management calls."""
from absl import app
from grr_api_client import errors as grr_api_errors
from grr_api_client import root as grr_api_root
from grr_response_server import data_store
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import test_lib
class RootApiUserManagementTest(api_integration_test_lib.RootApiIntegrationTest
):
"""E2E test for root API user management calls."""
def _GetPassword(self, username):
user = data_store.REL_DB.ReadGRRUser(username)
return user.password if user.HasField("password") else None
def testStandardUserIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(username="user_foo")
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
def testAdminUserIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_ADMIN)
self.assertIsNone(self._GetPassword("user_foo"))
def testStandardUserWithPasswordIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(username="user_foo", password="blah")
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
password = self._GetPassword("user_foo")
self.assertTrue(password.CheckPassword("blah"))
def testUserModificationWorksCorrectly(self):
user = self.api.root.CreateGrrUser(username="user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
user = user.Modify(user_type=user.USER_TYPE_ADMIN)
self.assertEqual(user.data.user_type, user.USER_TYPE_ADMIN)
user = user.Modify(user_type=user.USER_TYPE_STANDARD)
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
def testUserPasswordCanBeModified(self):
user = self.api.root.CreateGrrUser(username="user_foo", password="blah")
password = self._GetPassword("user_foo")
self.assertTrue(password.CheckPassword("blah"))
user.Modify(password="ohno")
password = self._GetPassword("user_foo")
self.assertTrue(password.CheckPassword("ohno"))
def testUsersAreCorrectlyListed(self):
for i in range(10):
self.api.root.CreateGrrUser(username="user_%d" % i)
users = sorted(self.api.root.ListGrrUsers(), key=lambda u: u.username)
# skip user that issues the request, which is implicitly created
users = [u for u in users if u.username != self.test_username]
self.assertLen(users, 10)
for i, u in enumerate(users):
self.assertEqual(u.username, "user_%d" % i)
self.assertEqual(u.username, u.data.username)
def METHOD_NAME(self):
self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
user = self.api.root.GrrUser("user_foo").Get()
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.user_type, grr_api_root.GrrUser.USER_TYPE_ADMIN)
def testUserCanBeDeleted(self):
self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
user = self.api.root.GrrUser("user_foo").Get()
user.Delete()
with self.assertRaises(grr_api_errors.ResourceNotFoundError):
self.api.root.GrrUser("user_foo").Get()
def testCreateUserWithEmail_configOff(self):
with self.assertRaises(grr_api_errors.InvalidArgumentError):
self.api.root.CreateGrrUser(username="user_foo", email="[email protected]")
def testModifyUserSetEmail_configOff(self):
user = self.api.root.CreateGrrUser(username="user_foo")
with self.assertRaises(grr_api_errors.InvalidArgumentError):
user = user.Modify(email="[email protected]")
def testCreateUserWithEmail_configOn(self):
with test_lib.ConfigOverrider({"Email.enable_custom_email_address": True}):
user = self.api.root.CreateGrrUser(
username="user_foo", email="[email protected]")
self.assertEqual(user.data.email, "[email protected]")
def testModifyUserSetEmail_configOn(self):
user = self.api.root.CreateGrrUser(username="user_foo")
with test_lib.ConfigOverrider({"Email.enable_custom_email_address": True}):
user = user.Modify(email="[email protected]")
self.assertEqual(user.data.email, "[email protected]")
def testGetUser_configOff(self):
with test_lib.ConfigOverrider({"Email.enable_custom_email_address": True}):
self.api.root.CreateGrrUser(username="user_foo", email="[email protected]")
user = self.api.root.GrrUser("user_foo").Get()
self.assertEqual(user.data.email, "")
def testGetUser_configOn(self):
with test_lib.ConfigOverrider({"Email.enable_custom_email_address": True}):
self.api.root.CreateGrrUser(username="user_foo", email="[email protected]")
user = self.api.root.GrrUser("user_foo").Get()
self.assertEqual(user.data.email, "[email protected]")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
2,129 |
test chrono different resolutions
|
import datetime
import pytest
import env # noqa: F401
from pybind11_tests import chrono as m
def test_chrono_system_clock():
# Get the time from both c++ and datetime
date0 = datetime.datetime.today()
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
# The returned value should be a datetime
assert isinstance(date1, datetime.datetime)
# The numbers should vary by a very small amount (time it took to execute)
diff_python = abs(date2 - date0)
diff = abs(date1 - date2)
# There should never be a days difference
assert diff.days == 0
# Since datetime.datetime.today() calls time.time(), and on some platforms
# that has 1 second accuracy, we compare this way
assert diff.seconds <= diff_python.seconds
def test_chrono_system_clock_roundtrip():
date1 = datetime.datetime.today()
# Roundtrip the time
date2 = m.test_chrono2(date1)
# The returned value should be a datetime
assert isinstance(date2, datetime.datetime)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff == datetime.timedelta(0)
def test_chrono_system_clock_roundtrip_date():
date1 = datetime.date.today()
# Roundtrip the time
datetime2 = m.test_chrono2(date1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# They should be identical (no information lost on roundtrip)
diff = abs(date1 - date2)
assert diff.days == 0
assert diff.seconds == 0
assert diff.microseconds == 0
# Year, Month & Day should be the same after the round trip
assert date1 == date2
# There should be no time information
assert time2.hour == 0
assert time2.minute == 0
assert time2.second == 0
assert time2.microsecond == 0
SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif(
"env.WIN", reason="TZ environment variable only supported on POSIX"
)
@pytest.mark.parametrize(
"time1",
[
datetime.datetime.today().time(),
datetime.time(0, 0, 0),
datetime.time(0, 0, 0, 1),
datetime.time(0, 28, 45, 109827),
datetime.time(0, 59, 59, 999999),
datetime.time(1, 0, 0),
datetime.time(5, 59, 59, 0),
datetime.time(5, 59, 59, 1),
],
)
@pytest.mark.parametrize(
"tz",
[
None,
pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN),
pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN),
],
)
def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch):
if tz is not None:
monkeypatch.setenv("TZ", f"/usr/share/zoneinfo/{tz}")
# Roundtrip the time
datetime2 = m.test_chrono2(time1)
date2 = datetime2.date()
time2 = datetime2.time()
# The returned value should be a datetime
assert isinstance(datetime2, datetime.datetime)
assert isinstance(date2, datetime.date)
assert isinstance(time2, datetime.time)
# Hour, Minute, Second & Microsecond should be the same after the round trip
assert time1 == time2
# There should be no date information (i.e. date = python base date)
assert date2.year == 1970
assert date2.month == 1
assert date2.day == 1
def test_chrono_duration_roundtrip():
# Get the difference between two times (a timedelta)
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
# Make sure this is a timedelta
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff == diff
# Negative timedelta roundtrip
diff = datetime.timedelta(microseconds=-1)
cpp_diff = m.test_chrono3(diff)
assert cpp_diff == diff
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff == diff
def test_chrono_duration_subtraction_equivalence_date():
date1 = datetime.date.today()
date2 = datetime.date.today()
diff = date2 - date1
cpp_diff = m.test_chrono4(date2, date1)
assert cpp_diff == diff
def test_chrono_steady_clock():
time1 = m.test_chrono5()
assert isinstance(time1, datetime.timedelta)
def test_chrono_steady_clock_roundtrip():
time1 = datetime.timedelta(days=10, seconds=10, microseconds=100)
time2 = m.test_chrono6(time1)
assert isinstance(time2, datetime.timedelta)
# They should be identical (no information lost on roundtrip)
assert time1 == time2
def test_floating_point_duration():
# Test using a floating point number in seconds
time = m.test_chrono7(35.525123)
assert isinstance(time, datetime.timedelta)
assert time.seconds == 35
assert 525122 <= time.microseconds <= 525123
diff = m.test_chrono_float_diff(43.789012, 1.123456)
assert diff.seconds == 42
assert 665556 <= diff.microseconds <= 665557
def test_nano_timepoint():
time = datetime.datetime.now()
time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60))
assert time1 == time + datetime.timedelta(seconds=60)
def METHOD_NAME():
resolutions = m.different_resolutions()
time = datetime.datetime.now()
resolutions.timestamp_h = time
resolutions.timestamp_m = time
resolutions.timestamp_s = time
resolutions.timestamp_ms = time
resolutions.timestamp_us = time
|
2,130 |
setup
|
# -*- coding: utf-8 -*-
from future import standard_library
standard_library.install_aliases()
from builtins import object
import sys
from requests import Session
import requests
import urllib.request, urllib.parse, urllib.error
from sqlalchemy import Column, Integer, String, ForeignKey
from monitorrent.db import Base, DBSession
from monitorrent.plugins import Topic
from monitorrent.plugin_managers import register_plugin
from monitorrent.utils.soup import get_soup
from monitorrent.utils.bittorrent_ex import Torrent
from monitorrent.plugins.trackers import TrackerPluginBase, WithCredentialsMixin, ExecuteWithHashChangeMixin, LoginResult
from urllib.parse import urlparse, unquote
from phpserialize import loads
PLUGIN_NAME = 'nnmclub.to'
class NnmClubCredentials(Base):
__tablename__ = "nnmclub_credentials"
username = Column(String, primary_key=True)
password = Column(String, primary_key=True)
user_id = Column(String, nullable=True)
sid = Column(String, nullable=True)
class NnmClubTopic(Topic):
__tablename__ = "nnmclub_topics"
id = Column(Integer, ForeignKey('topics.id'), primary_key=True)
hash = Column(String, nullable=True)
__mapper_args__ = {
'polymorphic_identity': PLUGIN_NAME
}
class NnmClubLoginFailedException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
class NnmClubTracker(object):
tracker_settings = None
tracker_domains = [u'nnmclub.to']
title_headers = [u'torrent :: nnm-club', ' :: nnm-club']
_login_url = u'https://nnmclub.to/forum/login.php'
_profile_page = u"https://nnmclub.to/forum/profile.php?mode=viewprofile&u={}"
def __init__(self, user_id=None, sid=None):
self.user_id = user_id
self.sid = sid
def METHOD_NAME(self, user_id=None, sid=None):
self.user_id = user_id
self.sid = sid
def can_parse_url(self, url):
parsed_url = urlparse(url)
return any([parsed_url.netloc == tracker_domain for tracker_domain in self.tracker_domains])
def parse_url(self, url):
url = self.get_url(url)
if not url or not self.can_parse_url(url):
return None
parsed_url = urlparse(url)
if not parsed_url.path == '/forum/viewtopic.php':
return None
r = requests.get(url, allow_redirects=False, **self.tracker_settings.get_requests_kwargs())
if r.status_code != 200:
return None
soup = get_soup(r.text)
title = soup.title.string.strip()
for title_header in self.title_headers:
if title.lower().endswith(title_header):
title = title[:-len(title_header)].strip()
break
return self._get_title(title)
def login(self, username, password):
s = Session()
data = {"username": username, "password": password, "autologin": "on", "login": "%C2%F5%EE%E4"}
login_result = s.post(self._login_url, data, **self.tracker_settings.get_requests_kwargs())
if login_result.url.startswith(self._login_url):
# TODO get error info (although it shouldn't contain anything useful
raise NnmClubLoginFailedException(1, "Invalid login or password")
else:
sid = s.cookies[u'phpbb2mysql_4_sid']
data = s.cookies[u'phpbb2mysql_4_data']
parsed_data = loads(unquote(data).encode('utf-8'))
self.user_id = parsed_data[u'userid'.encode('utf-8')].decode('utf-8')
self.sid = sid
def verify(self):
cookies = self.get_cookies()
if not cookies:
return False
profile_page_url = self._profile_page.format(self.user_id)
profile_page_result = requests.get(profile_page_url, cookies=cookies,
**self.tracker_settings.get_requests_kwargs())
return profile_page_result.url == profile_page_url
def get_cookies(self):
if not self.sid:
return False
return {'phpbb2mysql_4_sid': self.sid}
def get_download_url(self, url):
cookies = self.get_cookies()
page = requests.get(url, cookies=cookies, **self.tracker_settings.get_requests_kwargs())
page_soup = get_soup(page.text, 'html5lib' if sys.platform == 'win32' else None)
anchors = page_soup.find_all("a")
da = list(filter(lambda tag: tag.has_attr('href') and tag.attrs['href'].startswith("download.php?id="),
anchors))
# not a free torrent
if len(da) == 0:
return None
download_url = 'https://' + self.tracker_domains[0] + '/forum/' + da[0].attrs['href']
return download_url
def get_url(self, url):
if not self.can_parse_url(url):
return False
parsed_url = urlparse(url)
parsed_url = parsed_url._replace(netloc=self.tracker_domains[0])
return parsed_url.geturl()
@staticmethod
def _get_title(title):
return {'original_name': title}
class NnmClubPlugin(WithCredentialsMixin, ExecuteWithHashChangeMixin, TrackerPluginBase):
tracker = NnmClubTracker()
topic_class = NnmClubTopic
credentials_class = NnmClubCredentials
topic_form = [{
'type': 'row',
'content': [{
'type': 'text',
'model': 'display_name',
'label': 'Name',
'flex': 100
}]
}]
def login(self):
with DBSession() as db:
cred = db.query(self.credentials_class).first()
if not cred:
return LoginResult.CredentialsNotSpecified
username = cred.username
password = cred.password
if not username or not password:
return LoginResult.CredentialsNotSpecified
try:
self.tracker.login(username, password)
with DBSession() as db:
cred = db.query(self.credentials_class).first()
cred.user_id = self.tracker.user_id
cred.sid = self.tracker.sid
return LoginResult.Ok
except NnmClubLoginFailedException as e:
if e.code == 1:
return LoginResult.IncorrentLoginPassword
return LoginResult.Unknown
except Exception as e:
# TODO: Log unexpected excepton
return LoginResult.Unknown
def verify(self):
with DBSession() as db:
cred = db.query(self.credentials_class).first()
if not cred:
return False
username = cred.username
password = cred.password
if not username or not password or not cred.user_id or not cred.sid:
return False
self.tracker.METHOD_NAME(cred.user_id, cred.sid)
return self.tracker.verify()
def can_parse_url(self, url):
return self.tracker.can_parse_url(url)
def parse_url(self, url):
return self.tracker.parse_url(url)
def _prepare_request(self, topic):
cookies = self.tracker.get_cookies()
request = requests.Request('GET', self.tracker.get_download_url(topic.url), cookies=cookies)
return request.prepare()
register_plugin('tracker', PLUGIN_NAME, NnmClubPlugin())
|
2,131 |
prepare data
|
"""Run as `python un_wpp.py`"""
import glob
import os
import shutil
import tempfile
import time
import zipfile
from pathlib import Path
import requests
from structlog import get_logger
from owid.walden import add_to_catalog
from owid.walden.catalog import Dataset
log = get_logger()
URLS = {
"fertility": [
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_Fertility_by_Age5.zip",
],
"demographics": [
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_Demographic_Indicators_Medium.zip",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_Demographic_Indicators_OtherVariants.zip",
],
"population": [
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_PopulationBySingleAgeSex_Medium_1950-2021.zip",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_PopulationBySingleAgeSex_Medium_2022-2100.zip",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_PopulationBySingleAgeSex_High_2022-2100.zip",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_PopulationBySingleAgeSex_Low_2022-2100.zip",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/CSV_FILES/WPP2022_PopulationBySingleAgeSex_Constant%20fertility_2022-2100.zip",
],
"deaths": [
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/4_Mortality/WPP2022_MORT_F01_1_DEATHS_SINGLE_AGE_BOTH_SEXES.xlsx",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/4_Mortality/WPP2022_MORT_F01_2_DEATHS_SINGLE_AGE_MALE.xlsx",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/4_Mortality/WPP2022_MORT_F01_3_DEATHS_SINGLE_AGE_FEMALE.xlsx",
],
"dependency_ratio": [
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/2_Population/WPP2022_POP_F07_1_DEPENDENCY_RATIOS_BOTH_SEXES.xlsx",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/2_Population/WPP2022_POP_F07_2_DEPENDENCY_RATIOS_MALE.xlsx",
"https://population.un.org/wpp/Download/Files/1_Indicators%20(Standard)/EXCEL_FILES/2_Population/WPP2022_POP_F07_3_DEPENDENCY_RATIOS_FEMALE.xlsx",
],
}
def download_data(output_dir):
"""Download all data."""
log.info("Downloading data...")
for category, urls in URLS.items():
t0 = time.time()
log.info(category)
for url in urls:
filename = os.path.basename(url)
log.info(f"\t {filename}")
output_path = os.path.join(output_dir, filename)
_download_file(url, output_path)
t = time.time() - t0
log.info(f"{t} seconds")
log.info("---")
def unzip_data(output_dir):
"""Unzip downloaded files (only compressed files)."""
log.info("Unzipping data...")
files = [os.path.join(output_dir, f) for f in os.listdir(output_dir)]
for f in files:
log.info(f)
if f.endswith(".zip"):
_unzip_file(f)
def _download_file(url, output_path):
"""Download individual file."""
response = requests.get(url, stream=True)
with open(output_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024 * 1024 * 10):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def _unzip_file(f):
"""Unzip individual file."""
output_dir = os.path.dirname(f)
z = zipfile.ZipFile(f)
z.extractall(output_dir)
def clean_directory(directory):
"""Remove all zip files.
This should be applied after uncompressing files.
"""
log.info("Removing zipped data...")
files = glob.glob(os.path.join(directory, "*.zip"))
for f in files:
os.remove(f)
def compress_directory(directory, output_zip):
"""Compress directory."""
log.info("Zipping data...")
shutil.make_archive("un_wpp", "zip", directory)
return f"{output_zip}.zip"
def METHOD_NAME(directory):
"""Download, unzip, clean and compress all data files.
Accesses UN WPP data portal, downloads all necessary files (see `URLS`), and creates a zip folder with all of them
named 'un_wpp.zip'
"""
output_zip = "un_wpp"
download_data(directory)
unzip_data(directory)
clean_directory(directory)
output_file = compress_directory(directory, output_zip)
return output_file
def prepare_metadata():
log.info("Preparing metadata...")
path = Path(__file__).parent / f"{Path(__file__).stem}.meta.yml"
return Dataset.from_yaml(path)
def main():
with tempfile.TemporaryDirectory() as tmp_dir:
metadata = prepare_metadata()
output_file = METHOD_NAME(tmp_dir)
add_to_catalog(metadata, output_file, upload=True, public=True)
if __name__ == "__main__":
main()
|
2,132 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetMonitoredResourceResult',
'AwaitableGetMonitoredResourceResult',
'get_monitored_resource',
'get_monitored_resource_output',
]
@pulumi.output_type
class GetMonitoredResourceResult:
"""
A monitored resource.
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MonitoredResourcePropertiesResponse':
"""
The properties of a monitored resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetMonitoredResourceResult(GetMonitoredResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMonitoredResourceResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_monitored_resource(instance_name: Optional[str] = None,
monitored_resource_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMonitoredResourceResult:
"""
Retrieve the details of the monitored resource.
Azure REST API version: 2023-07-07-preview.
:param str instance_name: Name of the SCOM managed instance.
:param str monitored_resource_name: The monitored resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['instanceName'] = instance_name
__args__['monitoredResourceName'] = monitored_resource_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:scom:getMonitoredResource', __args__, opts=opts, typ=GetMonitoredResourceResult).value
return AwaitableGetMonitoredResourceResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_monitored_resource)
def get_monitored_resource_output(instance_name: Optional[pulumi.Input[str]] = None,
monitored_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMonitoredResourceResult]:
"""
Retrieve the details of the monitored resource.
Azure REST API version: 2023-07-07-preview.
:param str instance_name: Name of the SCOM managed instance.
:param str monitored_resource_name: The monitored resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,133 |
raw dir
|
import os.path as osp
from typing import Callable, List, Optional
import numpy as np
import torch
from torch_geometric.data import InMemoryDataset, download_url
from torch_geometric.io import read_planetoid_data
class Planetoid(InMemoryDataset):
r"""The citation network datasets :obj:`"Cora"`, :obj:`"CiteSeer"` and
:obj:`"PubMed"` from the `"Revisiting Semi-Supervised Learning with Graph
Embeddings" <https://arxiv.org/abs/1603.08861>`_ paper.
Nodes represent documents and edges represent citation links.
Training, validation and test splits are given by binary masks.
Args:
root (str): Root directory where the dataset should be saved.
name (str): The name of the dataset (:obj:`"Cora"`, :obj:`"CiteSeer"`,
:obj:`"PubMed"`).
split (str, optional): The type of dataset split (:obj:`"public"`,
:obj:`"full"`, :obj:`"geom-gcn"`, :obj:`"random"`).
If set to :obj:`"public"`, the split will be the public fixed split
from the `"Revisiting Semi-Supervised Learning with Graph
Embeddings" <https://arxiv.org/abs/1603.08861>`_ paper.
If set to :obj:`"full"`, all nodes except those in the validation
and test sets will be used for training (as in the
`"FastGCN: Fast Learning with Graph Convolutional Networks via
Importance Sampling" <https://arxiv.org/abs/1801.10247>`_ paper).
If set to :obj:`"geom-gcn"`, the 10 public fixed splits from the
`"Geom-GCN: Geometric Graph Convolutional Networks"
<https://openreview.net/forum?id=S1e2agrFvS>`_ paper are given.
If set to :obj:`"random"`, train, validation, and test sets will be
randomly generated, according to :obj:`num_train_per_class`,
:obj:`num_val` and :obj:`num_test`. (default: :obj:`"public"`)
num_train_per_class (int, optional): The number of training samples
per class in case of :obj:`"random"` split. (default: :obj:`20`)
num_val (int, optional): The number of validation samples in case of
:obj:`"random"` split. (default: :obj:`500`)
num_test (int, optional): The number of test samples in case of
:obj:`"random"` split. (default: :obj:`1000`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
**STATS:**
.. list-table::
:widths: 10 10 10 10 10
:header-rows: 1
* - Name
- #nodes
- #edges
- #features
- #classes
* - Cora
- 2,708
- 10,556
- 1,433
- 7
* - CiteSeer
- 3,327
- 9,104
- 3,703
- 6
* - PubMed
- 19,717
- 88,648
- 500
- 3
"""
url = 'https://github.com/kimiyoung/planetoid/raw/master/data'
geom_gcn_url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/'
'geom-gcn/master')
def __init__(self, root: str, name: str, split: str = "public",
num_train_per_class: int = 20, num_val: int = 500,
num_test: int = 1000, transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None):
self.name = name
self.split = split.lower()
assert self.split in ['public', 'full', 'geom-gcn', 'random']
super().__init__(root, transform, pre_transform)
self.load(self.processed_paths[0])
if split == 'full':
data = self.get(0)
data.train_mask.fill_(True)
data.train_mask[data.val_mask | data.test_mask] = False
self.data, self.slices = self.collate([data])
elif split == 'random':
data = self.get(0)
data.train_mask.fill_(False)
for c in range(self.num_classes):
idx = (data.y == c).nonzero(as_tuple=False).view(-1)
idx = idx[torch.randperm(idx.size(0))[:num_train_per_class]]
data.train_mask[idx] = True
remaining = (~data.train_mask).nonzero(as_tuple=False).view(-1)
remaining = remaining[torch.randperm(remaining.size(0))]
data.val_mask.fill_(False)
data.val_mask[remaining[:num_val]] = True
data.test_mask.fill_(False)
data.test_mask[remaining[num_val:num_val + num_test]] = True
self.data, self.slices = self.collate([data])
@property
def METHOD_NAME(self) -> str:
if self.split == 'geom-gcn':
return osp.join(self.root, self.name, 'geom-gcn', 'raw')
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self) -> str:
if self.split == 'geom-gcn':
return osp.join(self.root, self.name, 'geom-gcn', 'processed')
return osp.join(self.root, self.name, 'processed')
@property
def raw_file_names(self) -> List[str]:
names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index']
return [f'ind.{self.name.lower()}.{name}' for name in names]
@property
def processed_file_names(self) -> str:
return 'data.pt'
def download(self):
for name in self.raw_file_names:
download_url(f'{self.url}/{name}', self.METHOD_NAME)
if self.split == 'geom-gcn':
for i in range(10):
url = f'{self.geom_gcn_url}/splits/{self.name.lower()}'
download_url(f'{url}_split_0.6_0.2_{i}.npz', self.METHOD_NAME)
def process(self):
data = read_planetoid_data(self.METHOD_NAME, self.name)
if self.split == 'geom-gcn':
train_masks, val_masks, test_masks = [], [], []
for i in range(10):
name = f'{self.name.lower()}_split_0.6_0.2_{i}.npz'
splits = np.load(osp.join(self.METHOD_NAME, name))
train_masks.append(torch.from_numpy(splits['train_mask']))
val_masks.append(torch.from_numpy(splits['val_mask']))
test_masks.append(torch.from_numpy(splits['test_mask']))
data.train_mask = torch.stack(train_masks, dim=1)
data.val_mask = torch.stack(val_masks, dim=1)
data.test_mask = torch.stack(test_masks, dim=1)
data = data if self.pre_transform is None else self.pre_transform(data)
self.save([data], self.processed_paths[0])
def __repr__(self) -> str:
return f'{self.name}()'
|
2,134 |
location
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
'get_connection_output',
]
@pulumi.output_type
class GetConnectionResult:
"""
API connection
"""
def __init__(__self__, etag=None, id=None, METHOD_NAME=None, name=None, properties=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Resource ETag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ApiConnectionDefinitionResponseProperties':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
subscription_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Get a specific connection
:param str connection_name: Connection name
:param str resource_group_name: The resource group
:param str subscription_id: Subscription Id
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['subscriptionId'] = subscription_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20160601:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_connection)
def get_connection_output(connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionResult]:
"""
Get a specific connection
:param str connection_name: Connection name
:param str resource_group_name: The resource group
:param str subscription_id: Subscription Id
"""
...
|
2,135 |
samples
|
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import zipfile
from concurrent.futures import ThreadPoolExecutor
from typing import Sequence
import PIL.Image
import cvat_sdk.core
import cvat_sdk.core.exceptions
import cvat_sdk.models as models
from cvat_sdk.datasets.caching import UpdatePolicy, make_cache_manager
from cvat_sdk.datasets.common import FrameAnnotations, MediaElement, Sample, UnsupportedDatasetError
_NUM_DOWNLOAD_THREADS = 4
class TaskDataset:
"""
Represents a task on a CVAT server as a collection of samples.
Each sample corresponds to one frame in the task, and provides access to
the corresponding annotations and media data. Deleted frames are omitted.
This class caches all data and annotations for the task on the local file system
during construction.
Limitations:
* Only tasks with image (not video) data are supported at the moment.
* Track annotations are currently not accessible.
"""
class _TaskMediaElement(MediaElement):
def __init__(self, dataset: TaskDataset, frame_index: int) -> None:
self._dataset = dataset
self._frame_index = frame_index
def load_image(self) -> PIL.Image.Image:
return self._dataset._load_frame_image(self._frame_index)
def __init__(
self,
client: cvat_sdk.core.Client,
task_id: int,
*,
update_policy: UpdatePolicy = UpdatePolicy.IF_MISSING_OR_STALE,
) -> None:
"""
Creates a dataset corresponding to the task with ID `task_id` on the
server that `client` is connected to.
`update_policy` determines when and if the local cache will be updated.
"""
self._logger = client.logger
cache_manager = make_cache_manager(client, update_policy)
self._task = cache_manager.retrieve_task(task_id)
if not self._task.size or not self._task.data_chunk_size:
raise UnsupportedDatasetError("The task has no data")
if self._task.data_original_chunk_type != "imageset":
raise UnsupportedDatasetError(
f"{self.__class__.__name__} only supports tasks with image chunks;"
f" current chunk type is {self._task.data_original_chunk_type!r}"
)
self._logger.info("Fetching labels...")
self._labels = tuple(self._task.get_labels())
data_meta = cache_manager.ensure_task_model(
self._task.id,
"data_meta.json",
models.DataMetaRead,
self._task.get_meta,
"data metadata",
)
active_frame_indexes = set(range(self._task.size)) - set(data_meta.deleted_frames)
self._logger.info("Downloading chunks...")
self._chunk_dir = cache_manager.chunk_dir(task_id)
self._chunk_dir.mkdir(exist_ok=True, parents=True)
needed_chunks = {index // self._task.data_chunk_size for index in active_frame_indexes}
with ThreadPoolExecutor(_NUM_DOWNLOAD_THREADS) as pool:
def ensure_chunk(chunk_index):
cache_manager.ensure_chunk(self._task, chunk_index)
for _ in pool.map(ensure_chunk, sorted(needed_chunks)):
# just need to loop through all results so that any exceptions are propagated
pass
self._logger.info("All chunks downloaded")
annotations = cache_manager.ensure_task_model(
self._task.id,
"annotations.json",
models.LabeledData,
self._task.get_annotations,
"annotations",
)
self._frame_annotations = {
frame_index: FrameAnnotations() for frame_index in sorted(active_frame_indexes)
}
for tag in annotations.tags:
# Some annotations may belong to deleted frames; skip those.
if tag.frame in self._frame_annotations:
self._frame_annotations[tag.frame].tags.append(tag)
for shape in annotations.shapes:
if shape.frame in self._frame_annotations:
self._frame_annotations[shape.frame].shapes.append(shape)
# TODO: tracks?
self._samples = [
Sample(
frame_index=k,
frame_name=data_meta.frames[k].name,
annotations=v,
media=self._TaskMediaElement(self, k),
)
for k, v in self._frame_annotations.items()
]
@property
def labels(self) -> Sequence[models.ILabel]:
"""
Returns the labels configured in the task.
Clients must not modify the object returned by this property or its components.
"""
return self._labels
@property
def METHOD_NAME(self) -> Sequence[Sample]:
"""
Returns a sequence of all samples, in order of their frame indices.
Note that the frame indices may not be contiguous, as deleted frames will not be included.
Clients must not modify the object returned by this property or its components.
"""
return self._samples
def _load_frame_image(self, frame_index: int) -> PIL.Image:
assert frame_index in self._frame_annotations
chunk_index = frame_index // self._task.data_chunk_size
member_index = frame_index % self._task.data_chunk_size
with zipfile.ZipFile(self._chunk_dir / f"{chunk_index}.zip", "r") as chunk_zip:
with chunk_zip.open(chunk_zip.infolist()[member_index]) as chunk_member:
image = PIL.Image.open(chunk_member)
image.load()
return image
|
2,136 |
as expr
|
"""Implementation of :class:`ExpressionDomain` class."""
from ..core.sympify import SympifyError, sympify
from .characteristiczero import CharacteristicZero
from .field import Field
from .simpledomain import SimpleDomain
class ExpressionDomain(CharacteristicZero, SimpleDomain, Field):
"""A class for arbitrary expressions."""
is_ExpressionDomain = True
class Expression:
"""A class for elements of :class:`ExpressionDomain`."""
def __init__(self, ex):
"""Initialize self."""
if not isinstance(ex, self.__class__):
self.ex = sympify(ex)
else:
self.ex = ex.ex
def __str__(self):
return f'EX({self.ex})'
def __hash__(self):
return hash((self.__class__.__name__, self.ex))
def METHOD_NAME(self):
return self.ex
@property
def numerator(self):
return self.__class__(self.ex.as_numer_denom()[0])
@property
def denominator(self):
return self.__class__(self.ex.as_numer_denom()[1])
def simplify(self, ex):
return self.__class__(ex.cancel().expand())
def __abs__(self):
return self.__class__(abs(self.ex))
def __neg__(self):
return self.__class__(-self.ex)
def _to_ex(self, other):
try:
return self.__class__(other)
except SympifyError:
return
def __add__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex + other.ex)
return NotImplemented
def __radd__(self, other):
return self.simplify(self.__class__(other).ex + self.ex)
def __sub__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex - other.ex)
return NotImplemented
def __rsub__(self, other):
return self.simplify(self.__class__(other).ex - self.ex)
def __mul__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex*other.ex)
return NotImplemented
def __rmul__(self, other):
return self.simplify(self.__class__(other).ex*self.ex)
def __pow__(self, n):
n = self._to_ex(n)
if n is not None:
return self.simplify(self.ex**n.ex)
return NotImplemented
def __truediv__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex/other.ex)
return NotImplemented
def __rtruediv__(self, other):
return self.simplify(self.__class__(other).ex/self.ex)
def __eq__(self, other):
return self.ex == self.__class__(other).ex
def __bool__(self):
return self.ex != 0
def gcd(self, other):
from ..polys import gcd
return self.__class__(gcd(self.ex, self.__class__(other).ex))
def lcm(self, other):
from ..polys import lcm
return self.__class__(lcm(self.ex, self.__class__(other).ex))
dtype = Expression
zero = Expression(0)
one = Expression(1)
rep = 'EX'
def to_expr(self, element):
return element.METHOD_NAME()
def from_expr(self, expr):
return self.dtype(expr)
def _from_PythonIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_PythonRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_RealField(self, a, K0):
return self(K0.to_expr(a))
def _from_PolynomialRing(self, a, K0):
return self(K0.to_expr(a))
def _from_FractionField(self, a, K0):
return self(K0.to_expr(a))
def _from_AlgebraicField(self, a, K0):
return self(K0.to_expr(a))
@property
def ring(self):
return self # XXX: EX is not a ring but we don't have much choice here.
def is_normal(self, a):
return a.ex.as_coeff_mul()[0].is_nonnegative
def gcd(self, a, b):
return a.gcd(b)
def lcm(self, a, b):
return a.lcm(b)
EX = ExpressionDomain()
|
2,137 |
get type docs
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
from robot.errors import DataError
from robot.running import (ArgInfo, ResourceFileBuilder, TestLibrary, TestSuiteBuilder,
TypeInfo, UserLibrary, UserErrorHandler)
from robot.utils import is_string, split_tags_from_doc, unescape
from robot.variables import search_variable
from .datatypes import TypeDoc
from .model import LibraryDoc, KeywordDoc
class LibraryDocBuilder:
_argument_separator = '::'
def build(self, library):
name, args = self._split_library_name_and_args(library)
lib = TestLibrary(name, args)
libdoc = LibraryDoc(name=lib.name,
doc=self._get_doc(lib),
version=lib.version,
scope=str(lib.scope),
doc_format=lib.doc_format,
source=lib.source,
lineno=lib.lineno)
libdoc.inits = self._get_initializers(lib)
libdoc.keywords = KeywordDocBuilder().build_keywords(lib)
libdoc.type_docs = self.METHOD_NAME(libdoc.inits + libdoc.keywords,
lib.converters)
return libdoc
def _split_library_name_and_args(self, library):
args = library.split(self._argument_separator)
name = args.pop(0)
return self._normalize_library_path(name), args
def _normalize_library_path(self, library):
path = library.replace('/', os.sep)
if os.path.exists(path):
return os.path.abspath(path)
return library
def _get_doc(self, lib):
return lib.doc or f"Documentation for library ``{lib.name}``."
def _get_initializers(self, lib):
if lib.init.arguments:
return [KeywordDocBuilder().build_keyword(lib.init)]
return []
def METHOD_NAME(self, keywords, custom_converters):
type_docs = {}
for kw in keywords:
for arg in kw.args:
kw.type_docs[arg.name] = {}
for type_info in self._yield_type_info(arg.type):
type_doc = TypeDoc.for_type(type_info.type, custom_converters)
if type_doc:
kw.type_docs[arg.name][type_info.name] = type_doc.name
type_docs.setdefault(type_doc, set()).add(kw.name)
for type_doc, usages in type_docs.items():
type_doc.usages = sorted(usages, key=str.lower)
return set(type_docs)
def _yield_type_info(self, info: TypeInfo):
if not info.is_union:
yield info
for nested in info.nested:
yield from self._yield_type_info(nested)
class ResourceDocBuilder:
type = 'RESOURCE'
def build(self, path):
path = self._find_resource_file(path)
res, name = self._import_resource(path)
libdoc = LibraryDoc(name=name,
doc=self._get_doc(res, name),
type=self.type,
scope='GLOBAL',
source=res.source,
lineno=1)
libdoc.keywords = KeywordDocBuilder(resource=True).build_keywords(res)
return libdoc
def _import_resource(self, path):
model = ResourceFileBuilder(process_curdir=False).build(path)
resource = UserLibrary(model)
return resource, resource.name
def _find_resource_file(self, path):
if os.path.isfile(path):
return os.path.normpath(os.path.abspath(path))
for dire in [item for item in sys.path if os.path.isdir(item)]:
candidate = os.path.normpath(os.path.join(dire, path))
if os.path.isfile(candidate):
return os.path.abspath(candidate)
raise DataError(f"Resource file '{path}' does not exist.")
def _get_doc(self, resource, name):
if resource.doc:
return unescape(resource.doc)
return f"Documentation for resource file ``{name}``."
class SuiteDocBuilder(ResourceDocBuilder):
type = 'SUITE'
def _import_resource(self, path):
builder = TestSuiteBuilder(process_curdir=False)
if os.path.basename(path).lower() == '__init__.robot':
path = os.path.dirname(path)
builder.included_suites = ()
builder.allow_empty_suite = True
suite = builder.build(path)
return UserLibrary(suite.resource), suite.name
def _get_doc(self, resource, name):
return f"Documentation for keywords in suite ``{name}``."
class KeywordDocBuilder:
def __init__(self, resource=False):
self._resource = resource
def build_keywords(self, lib):
return [self.build_keyword(kw) for kw in lib.handlers]
def build_keyword(self, kw):
doc, tags = self._get_doc_and_tags(kw)
if not self._resource:
self._escape_strings_in_defaults(kw.arguments.defaults)
return KeywordDoc(name=kw.name,
args=kw.arguments,
doc=doc,
tags=tags,
private=tags.robot('private'),
deprecated=doc.startswith('*DEPRECATED') and '*' in doc[1:],
source=kw.source,
lineno=kw.lineno)
def _escape_strings_in_defaults(self, defaults):
for name, value in defaults.items():
if is_string(value):
value = re.sub(r'[\\\r\n\t]', lambda x: repr(str(x.group()))[1:-1], value)
value = self._escape_variables(value)
defaults[name] = re.sub('^(?= )|(?<= )$|(?<= )(?= )', r'\\', value)
def _escape_variables(self, value):
result = ''
match = search_variable(value)
while match:
result += r'%s\%s{%s}' % (match.before, match.identifier,
self._escape_variables(match.base))
for item in match.items:
result += '[%s]' % self._escape_variables(item)
match = search_variable(match.after)
return result + match.string
def _get_doc_and_tags(self, kw):
doc = self._get_doc(kw)
doc, tags = split_tags_from_doc(doc)
return doc, kw.tags + tags
def _get_doc(self, kw):
if self._resource and not isinstance(kw, UserErrorHandler):
return unescape(kw.doc)
return kw.doc
|
2,138 |
xy z to os a ucs
|
"""
Optical Society of America Uniform Colour Scales (OSA UCS)
==========================================================
Defines the *OSA UCS* colourspace:
- :func:`colour.XYZ_to_OSA_UCS`
- :func:`colour.OSA_UCS_to_XYZ`
References
----------
- :cite:`Cao2013` : Cao, R., Trussell, H. J., & Shamey, R. (2013). Comparison
of the performance of inverse transformation methods from OSA-UCS to
CIEXYZ. Journal of the Optical Society of America A, 30(8), 1508.
doi:10.1364/JOSAA.30.001508
- :cite:`Moroney2003` : Moroney, N. (2003). A Radial Sampling of the OSA
Uniform Color Scales. Color and Imaging Conference, 2003(1), 175-180.
ISSN:2166-9635
"""
from __future__ import annotations
import numpy as np
from scipy.optimize import fmin
from colour.algebra import sdiv, sdiv_mode, spow, vector_dot
from colour.hints import ArrayLike, NDArrayFloat
from colour.models import XYZ_to_xyY
from colour.utilities import (
as_float,
as_float_array,
domain_range_scale,
from_range_100,
to_domain_100,
tsplit,
tstack,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"XYZ_to_OSA_UCS",
"OSA_UCS_to_XYZ",
]
MATRIX_XYZ_TO_RGB_OSA_UCS: NDArrayFloat = np.array(
[
[0.799, 0.4194, -0.1648],
[-0.4493, 1.3265, 0.0927],
[-0.1149, 0.3394, 0.717],
]
)
"""
*OSA UCS* matrix converting from *CIE XYZ* tristimulus values to *RGB*
colourspace.
"""
def METHOD_NAME(XYZ: ArrayLike) -> NDArrayFloat:
"""
Convert from *CIE XYZ* tristimulus values under the
*CIE 1964 10 Degree Standard Observer* to *OSA UCS* colourspace.
The lightness axis, *L* is usually in range [-9, 5] and centered around
middle gray (Munsell N/6). The yellow-blue axis, *j* is usually in range
[-15, 15]. The red-green axis, *g* is usually in range [-20, 15].
Parameters
----------
XYZ
*CIE XYZ* tristimulus values under the
*CIE 1964 10 Degree Standard Observer*.
Returns
-------
:class:`numpy.ndarray`
*OSA UCS* :math:`Ljg` lightness, jaune (yellowness), and greenness.
Notes
-----
+------------+-----------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+====================+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+--------------------+
+------------+-----------------------+--------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+====================+
| ``Ljg`` | ``L`` : [-100, 100] | ``L`` : [-1, 1] |
| | | |
| | ``j`` : [-100, 100] | ``j`` : [-1, 1] |
| | | |
| | ``g`` : [-100, 100] | ``g`` : [-1, 1] |
+------------+-----------------------+--------------------+
- *OSA UCS* uses the *CIE 1964 10 Degree Standard Observer*.
References
----------
:cite:`Cao2013`, :cite:`Moroney2003`
Examples
--------
>>> import numpy as np
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100
>>> XYZ_to_OSA_UCS(XYZ) # doctest: +ELLIPSIS
array([-3.0049979..., 2.9971369..., -9.6678423...])
"""
XYZ = to_domain_100(XYZ)
x, y, Y = tsplit(XYZ_to_xyY(XYZ))
Y_0 = Y * (
4.4934 * x**2
+ 4.3034 * y**2
- 4.276 * x * y
- 1.3744 * x
- 2.5643 * y
+ 1.8103
)
o_3 = 1 / 3
Y_0_es = spow(Y_0, o_3) - 2 / 3
# Gracefully handles Y_0 < 30.
Y_0_s = Y_0 - 30
Lambda = 5.9 * (Y_0_es + 0.042 * spow(Y_0_s, o_3))
RGB = vector_dot(MATRIX_XYZ_TO_RGB_OSA_UCS, XYZ)
RGB_3 = spow(RGB, 1 / 3)
with sdiv_mode():
C = sdiv(Lambda, 5.9 * Y_0_es)
L = (Lambda - 14.4) / spow(2, 1 / 2)
j = C * np.dot(RGB_3, np.array([1.7, 8, -9.7]))
g = C * np.dot(RGB_3, np.array([-13.7, 17.7, -4]))
Ljg = tstack([L, j, g])
return from_range_100(Ljg)
def OSA_UCS_to_XYZ(
Ljg: ArrayLike, optimisation_kwargs: dict | None = None
) -> NDArrayFloat:
"""
Convert from *OSA UCS* colourspace to *CIE XYZ* tristimulus values under
the *CIE 1964 10 Degree Standard Observer*.
Parameters
----------
Ljg
*OSA UCS* :math:`Ljg` lightness, jaune (yellowness), and greenness.
optimisation_kwargs
Parameters for :func:`scipy.optimize.fmin` definition.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values under the
*CIE 1964 10 Degree Standard Observer*.
Warnings
--------
There is no analytical inverse transformation from *OSA UCS* to :math:`Ljg`
lightness, jaune (yellowness), and greenness to *CIE XYZ* tristimulus
values, the current implementation relies on optimisation using
:func:`scipy.optimize.fmin` definition and thus has reduced precision and
poor performance.
Notes
-----
+------------+-----------------------+--------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+====================+
| ``Ljg`` | ``L`` : [-100, 100] | ``L`` : [-1, 1] |
| | | |
| | ``j`` : [-100, 100] | ``j`` : [-1, 1] |
| | | |
| | ``g`` : [-100, 100] | ``g`` : [-1, 1] |
+------------+-----------------------+--------------------+
+------------+-----------------------+--------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+====================+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+--------------------+
- *OSA UCS* uses the *CIE 1964 10 Degree Standard Observer*.
References
----------
:cite:`Cao2013`, :cite:`Moroney2003`
Examples
--------
>>> import numpy as np
>>> Ljg = np.array([-3.00499790, 2.99713697, -9.66784231])
>>> OSA_UCS_to_XYZ(Ljg) # doctest: +ELLIPSIS
array([ 20.6540240..., 12.1972369..., 5.1369372...])
"""
Ljg = to_domain_100(Ljg)
shape = Ljg.shape
Ljg = np.atleast_1d(Ljg.reshape([-1, 3]))
optimisation_settings = {"disp": False}
if optimisation_kwargs is not None:
optimisation_settings.update(optimisation_kwargs)
def error_function(XYZ: NDArrayFloat, Ljg: NDArrayFloat) -> NDArrayFloat:
"""Error function."""
# Error must be computed in "reference" domain and range.
with domain_range_scale("ignore"):
error = np.linalg.norm(METHOD_NAME(XYZ) - Ljg)
return as_float(error)
x_0 = np.array([30, 30, 30])
XYZ = as_float_array(
[
fmin(error_function, x_0, (Ljg_i,), **optimisation_settings)
for Ljg_i in Ljg
]
)
return from_range_100(np.reshape(XYZ, shape))
|
2,139 |
create vendor account
|
# Copyright 2020 DSR Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import string
import subprocess
import tempfile
import time
from pathlib import Path
DCLCLI = "dcld"
DCL_CHAIN_ID = "dclchain"
def METHOD_NAME(vendor_name, vid, trustee_account_name):
try:
keys_delete(vendor_name)
except Exception:
print("We don't remove that user, because that user does not exist in dcld")
keys_add(vendor_name)
# Get a Vendor address and pubkey
vendor_address = keys_show_address(vendor_name)
vendor_pubkey = keys_show_pubkey(vendor_name)
# Send to request to another node to propose
cmd = [
DCLCLI,
"tx",
"auth",
"propose-add-account",
"--address=" + vendor_address,
"--pubkey=" + vendor_pubkey,
"--roles=Vendor",
"--vid=" + str(vid),
"--from=" + trustee_account_name,
"--yes",
]
result = run_shell_cmd(cmd)
while "account sequence mismatch" in str(result):
time.sleep(random.randint(1, 20))
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print("[WARNING! READDING VENDOR ACCOUNT!]")
result = run_shell_cmd(cmd)
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print("[SUCCESS! VENDOR ACCOUNT HAS ADDED!]")
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
def generate_txns(model_id, model_sequence, vendor_address, vendor_id, account_number):
with tempfile.TemporaryDirectory() as tmpdirname:
random_file_name = generate_random_name()
tmp_file = (Path(tmpdirname) / random_file_name).resolve()
tmp_file.write_text(create_model(vendor_address, model_id, vendor_id))
tmp_file.write_text(
txn_sign(vendor_address, account_number, model_sequence, str(tmp_file))
)
return txn_encode(str(tmp_file))
def keys_delete(key_name):
cmd = [DCLCLI, "keys", "delete", key_name, "--yes"]
return run_shell_cmd(cmd).stdout
def keys_add(key_name):
cmd = [DCLCLI, "keys", "add", key_name]
return run_shell_cmd(cmd).stdout
def keys_show_address(key_name):
cmd = [DCLCLI, "keys", "show", key_name, "-a"]
return run_shell_cmd(cmd).stdout.rstrip("\n")
def keys_show_pubkey(key_name):
cmd = [DCLCLI, "keys", "show", key_name, "-p"]
return run_shell_cmd(cmd).stdout.rstrip("\n")
def get_account_number(vendor_address):
cmd = [DCLCLI, "query", "auth", "account", "--address", vendor_address]
result = run_shell_cmd(cmd).stdout
json_result = json.loads(result)
return int(json_result["base_account"]["account_number"])
def create_model(vendor_address, current_model_id, vendor_id):
cmd = [
DCLCLI,
"tx",
"model",
"add-model",
"--vid=" + str(vendor_id),
"--pid=" + str(current_model_id),
"--deviceTypeID=" + str(current_model_id),
"--productName=ProductName" + str(current_model_id),
"--productLabel=ProductLabel" + str(current_model_id),
"--partNumber=PartNumber" + str(current_model_id),
"--from=" + vendor_address,
"--yes",
"--generate-only",
]
return run_shell_cmd(cmd).stdout
def txn_sign(vendor_address, account, sequence_n, f_path):
cmd = [DCLCLI, "tx", "sign", "--chain-id", DCL_CHAIN_ID]
params = {"from": vendor_address}
cmd += to_cli_args(
account_number=account, sequence=sequence_n, gas="auto", **params
)
cmd.extend(["--offline", f_path])
return run_shell_cmd(cmd).stdout
def txn_encode(f_path):
cmd = [DCLCLI, "tx", "encode", f_path]
return run_shell_cmd(cmd).stdout
def generate_random_name():
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(10))
def generate_random_number():
return random.randint(1000, 65000)
def run_shell_cmd(cmd, **kwargs):
_kwargs = dict(
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_kwargs.update(kwargs)
if not _kwargs.get("shell") and type(cmd) is str:
cmd = cmd.split()
try:
return subprocess.run(cmd, **_kwargs)
except (subprocess.CalledProcessError, FileNotFoundError) as exc:
raise RuntimeError(f"command '{cmd}' failed: {exc.stderr}") from exc
def to_cli_args(**kwargs):
res = []
for k, v in kwargs.items():
k = "--{}".format(k.replace("_", "-"))
res.extend([k, str(v)])
return res
|
2,140 |
test move child after itself
|
from __future__ import annotations
import pytest
from textual.app import App
from textual.widget import Widget, WidgetError
async def test_move_child_no_direction() -> None:
"""Test moving a widget in a child list."""
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
with pytest.raises(WidgetError):
pilot.app.screen.move_child(child)
async def test_move_child_both_directions() -> None:
"""Test calling move_child with more than one direction."""
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
with pytest.raises(WidgetError):
pilot.app.screen.move_child(child, before=1, after=2)
async def test_move_child_not_our_child() -> None:
"""Test attempting to move a child that isn't ours."""
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
with pytest.raises(WidgetError):
pilot.app.screen.move_child(Widget(), before=child)
async def test_move_child_to_outside() -> None:
"""Test attempting to move relative to a widget that isn't a child."""
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
with pytest.raises(WidgetError):
pilot.app.screen.move_child(child, before=Widget())
async def test_move_child_before_itself() -> None:
"""Test moving a widget before itself."""
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
pilot.app.screen.move_child(child, before=child)
async def METHOD_NAME() -> None:
"""Test moving a widget after itself."""
# Regression test for https://github.com/Textualize/textual/issues/1743
async with App().run_test() as pilot:
child = Widget(Widget())
await pilot.app.mount(child)
pilot.app.screen.move_child(child, after=child)
async def test_move_past_end_of_child_list() -> None:
"""Test attempting to move past the end of the child list."""
async with App().run_test() as pilot:
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
container = Widget(*widgets)
await pilot.app.mount(container)
with pytest.raises(WidgetError):
container.move_child(widgets[0], before=len(widgets) + 10)
async def test_move_before_end_of_child_list() -> None:
"""Test attempting to move before the end of the child list."""
async with App().run_test() as pilot:
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
container = Widget(*widgets)
await pilot.app.mount(container)
with pytest.raises(WidgetError):
container.move_child(widgets[0], before=-(len(widgets) + 10))
async def test_move_before_permutations() -> None:
"""Test the different permutations of moving one widget before another."""
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
perms = ((1, 0), (widgets[1], 0), (1, widgets[0]), (widgets[1], widgets[0]))
for child, target in perms:
async with App().run_test() as pilot:
container = Widget(*widgets)
await pilot.app.mount(container)
container.move_child(child, before=target)
assert container._nodes[0].id == "widget-1"
assert container._nodes[1].id == "widget-0"
assert container._nodes[2].id == "widget-2"
async def test_move_after_permutations() -> None:
"""Test the different permutations of moving one widget after another."""
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
perms = ((0, 1), (widgets[0], 1), (0, widgets[1]), (widgets[0], widgets[1]))
for child, target in perms:
async with App().run_test() as pilot:
container = Widget(*widgets)
await pilot.app.mount(container)
container.move_child(child, after=target)
assert container._nodes[0].id == "widget-1"
assert container._nodes[1].id == "widget-0"
assert container._nodes[2].id == "widget-2"
async def test_move_child_after_last_child() -> None:
"""Test moving after a child after the last child."""
async with App().run_test() as pilot:
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
container = Widget(*widgets)
await pilot.app.mount(container)
container.move_child(widgets[0], after=widgets[-1])
assert container._nodes[0].id == "widget-1"
assert container._nodes[-1].id == "widget-0"
async def test_move_child_after_last_numeric_location() -> None:
"""Test moving after a child after the last child's numeric position."""
async with App().run_test() as pilot:
widgets = [Widget(id=f"widget-{n}") for n in range(10)]
container = Widget(*widgets)
await pilot.app.mount(container)
container.move_child(widgets[0], after=widgets[9])
assert container._nodes[0].id == "widget-1"
assert container._nodes[-1].id == "widget-0"
|
2,141 |
to generator
|
"""
This example shows how you could use Keras `Sequence`s and multiprocessing/multithreading for Keras
models in Determined. Information for how this can be configured can be found in
`make_data_loaders()`.
Tutorial based on this example:
https://docs.determined.ai/latest/tutorials/tf-cifar-tutorial.html
Useful References:
https://docs.determined.ai/latest/reference/api/keras.html
https://www.tensorflow.org/guide/keras
Based on: https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py
"""
from typing import Generator, List, Tuple
import numpy as np
import tensorflow as tf
from cifar_model import build_model, build_optimizer, compile_model
from tensorflow.keras.models import Sequential
import determined as det
from determined import keras
def load_numpy_data(
context: det.core.Context,
) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
# When running distributed, we don't want multiple ranks on the same node to download the
# data simultaneously, since they'll overwrite each other. So we only download on
# local rank 0.
if context.distributed.get_local_rank() == 0:
tf.keras.datasets.cifar10.load_data()
# Wait until local rank 0 is done downloading.
context.distributed.allgather_local(None)
# Now that the data is downloaded, each rank can load it.
(X_train, Y_train), (X_test, Y_test) = tf.keras.datasets.cifar10.load_data()
# Convert from pixel values to [0, 1] range floats, and one-hot encode labels.
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
Y_train = tf.keras.utils.to_categorical(Y_train, num_classes=10)
Y_test = tf.keras.utils.to_categorical(Y_test, num_classes=10)
return (X_train, Y_train), (X_test, Y_test)
def METHOD_NAME(
xs: np.ndarray, ys: np.ndarray
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
n = xs.shape[0]
for i in range(n):
yield xs[i], ys[i]
class CIFARTrial(keras.TFKerasTrial):
def __init__(self, context: keras.TFKerasTrialContext) -> None:
self.context = context
self.train_np, self.test_np = load_numpy_data(self.context)
def session_config(self) -> tf.compat.v1.ConfigProto:
if self.context.get_hparams().get("disable_CPU_parallelism", False):
return tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)
else:
return tf.compat.v1.ConfigProto()
def build_model(self) -> Sequential:
# Create model.
model = build_model(
layer1_dropout=self.context.get_hparam("layer1_dropout"),
layer2_dropout=self.context.get_hparam("layer2_dropout"),
layer3_dropout=self.context.get_hparam("layer3_dropout"),
)
# Wrap the model.
model = self.context.wrap_model(model)
# Create and wrap optimizer.
optimizer = build_optimizer(
learning_rate=self.context.get_hparam("learning_rate"),
learning_rate_decay=self.context.get_hparam("learning_rate_decay"),
)
optimizer = self.context.wrap_optimizer(optimizer)
# Compile model.
compile_model(model=model, optimizer=optimizer)
return model
def keras_callbacks(self) -> List[tf.keras.callbacks.Callback]:
return [keras.callbacks.TensorBoard(update_freq="batch", profile_batch=0, histogram_freq=1)]
def build_training_data_loader(self) -> keras.InputData:
hparams = self.context.get_hparams()
train_ds = self.context.wrap_dataset(
tf.data.Dataset.from_generator(
lambda: METHOD_NAME(*self.train_np),
output_signature=(
tf.TensorSpec(shape=(32, 32, 3), dtype=tf.float32),
tf.TensorSpec(shape=(10,), dtype=tf.float32),
),
)
)
augmentation = tf.keras.Sequential(
[
tf.keras.layers.RandomFlip(mode="horizontal"),
tf.keras.layers.RandomTranslation(
height_factor=hparams.get("height_factor", 0.0),
width_factor=hparams.get("width_factor", 0.0),
),
]
)
train_ds = train_ds.batch(self.context.get_per_slot_batch_size())
train_ds = train_ds.map(
lambda x, y: (augmentation(x), y), num_parallel_calls=tf.data.experimental.AUTOTUNE
)
train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
return train_ds
def build_validation_data_loader(self) -> keras.InputData:
test_ds = self.context.wrap_dataset(
tf.data.Dataset.from_generator(
lambda: METHOD_NAME(*self.test_np),
output_signature=(
tf.TensorSpec(shape=(32, 32, 3), dtype=tf.float32),
tf.TensorSpec(shape=(10,), dtype=tf.float32),
),
)
)
test_ds = test_ds.batch(self.context.get_per_slot_batch_size())
return test_ds
|
2,142 |
run on dataset
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from tsbench.analysis.utils import (
loocv_split,
num_fitting_processes,
run_parallel,
)
from tsbench.config import Config, ModelConfig
from tsbench.evaluations.metrics import Performance
from tsbench.evaluations.tracking import ModelTracker
from tsbench.surrogate import AutoGluonSurrogate, Surrogate
from .metrics import mrr, ndcg, nrmse, precision_k, smape
class SurrogateAnalyzer:
"""
The surrogate analyzer evaluates the performance of a surrogate model with
respect to ranking and regression metrics.
The analysis is run in parallel and should, thus, not be used in a Jupyter
notebook. Instead, consider using the `tsbench` CLI.
"""
def __init__(
self,
surrogate: Surrogate[ModelConfig],
tracker: ModelTracker,
metrics: Optional[List[str]] = None,
):
"""
Args:
surrogate: The surrogate model to evaluate.
tracker: The collector from which to obtain the data for evaluation.
metrics: The metrics to evaluate. If not provided, evaluates all metrics.
"""
self.surrogate = surrogate
self.tracker = tracker
self.metrics = metrics
def run(self) -> pd.DataFrame:
"""
Runs the evaluation on the surrogate by applying LOOCV on the datasets
being trained on. Metrics are then provided per test dataset.
Returns:
A data frame with the results for each fold, the metrics being the columns. The rows
are indexed by the dataset which was left out.
"""
if isinstance(self.surrogate, AutoGluonSurrogate):
metrics = [
self.METHOD_NAME(x)
for x in tqdm(list(loocv_split(self.tracker)))
]
else:
data = list(loocv_split(self.tracker))
metrics = run_parallel(
self.METHOD_NAME,
data=data,
num_processes=min(
num_fitting_processes(
cpus_per_process=self.surrogate.required_cpus,
memory_per_process=self.surrogate.required_memory,
),
len(data),
),
)
return pd.concat(metrics).set_index("test_dataset")
def METHOD_NAME(
self,
data: Tuple[
Tuple[List[Config[ModelConfig]], List[Performance]],
Tuple[List[Config[ModelConfig]], List[Performance]],
],
) -> pd.DataFrame:
(X_train, y_train), (X_test, y_test) = data
# Fit model and predict
self.surrogate.fit(X_train, y_train)
y_pred = self.surrogate.predict(X_test)
# Compute metrics
scores = self._score(y_pred, y_test)
return scores.assign(test_dataset=X_test[0].dataset.name())
def _score(
self, y_pred: List[Performance], y_true: List[Performance]
) -> pd.DataFrame:
df_pred = Performance.to_dataframe(y_pred)
df_true = Performance.to_dataframe(y_true)
if self.metrics is not None:
df_pred = df_pred[self.metrics]
df_true = df_true[self.metrics]
# We extract the NumPy arrays so that indexing is easier. Each metric is computed such that
# it results in an array of shape [D] where D is the number of metrics.
columns = df_pred.columns
y_pred_min = df_pred.to_numpy() # type: ignore
y_true_min = df_true.to_numpy() # type: ignore
# Return all results
metrics = {
"nrmse": nrmse(y_pred_min, y_true_min),
"smape": smape(y_pred_min, y_true_min),
"mrr": mrr(y_pred_min, y_true_min),
**{
f"precision_{k}": precision_k(k, y_pred_min, y_true_min)
for k in (5, 10, 20)
},
"ndcg": ndcg(y_pred_min, y_true_min),
}
column_index = pd.MultiIndex.from_tuples(
[(c, m) for m in sorted(metrics) for c in columns]
)
values = np.concatenate([metrics[m] for m in sorted(metrics)])
return pd.DataFrame(np.reshape(values, (1, -1)), columns=column_index)
|
2,143 |
has enddate
|
"""
Mixins to be inherited by AbstractEvent
They are split up in order make them easier to read,
and because there was (once upon a time) an idea to split up the information
about an event and the registration info into different models.
"""
from datetime import datetime
from django.db import models
from six.moves.urllib.parse import urlparse
from nablapps.accounts.models import FysmatClass
from ..exceptions import (
RegistrationNotAllowed,
RegistrationNotOpen,
RegistrationNotRequiredException,
)
class EventInfoMixin(models.Model):
"""Abstract model defining info about an event, excluding registration info"""
short_name = models.CharField(
verbose_name="kort navn",
max_length=20,
blank=True,
null=True,
help_text="Brukes på steder hvor det ikke er plass til å skrive hele overskriften, "
"for eksempel kalenderen.",
)
organizer = models.CharField(
verbose_name="organisert av",
max_length=100,
blank=True,
help_text="Den som står bak arrangementet",
)
location = models.CharField(verbose_name="sted", max_length=100, blank=False)
event_start = models.DateTimeField(verbose_name="start", null=True, blank=False)
event_end = models.DateTimeField(verbose_name="slutt", null=True, blank=True)
facebook_url = models.CharField(
verbose_name="facebook-url",
blank=True,
max_length=100,
help_text="URL-en til det tilsvarende arrangementet på Facebook",
)
class Meta:
abstract = True
def has_started(self):
"""Has the event started?"""
return self.event_start < datetime.now()
def has_finished(self):
"""Is the event finished?"""
return self.event_end and self.event_end < datetime.now()
def METHOD_NAME(self):
return self.event_end is not None
def clean(self):
self.clean_facebook_url()
super().clean()
def clean_facebook_url(self):
"""Verifiserer formen på facebook-urlen, og endrer den hvis den er feil."""
parsed = urlparse(self.facebook_url)
noscheme = parsed.netloc + parsed.path
self.facebook_url = (
"http" + "://" + noscheme.replace("http://", "").replace("https://", "")
)
if self.facebook_url == "http://":
self.facebook_url = ""
class RegistrationInfoMixin(models.Model):
"""Abstract model containing info about the registration.
Most of these fields don't make any sense unless registration_required is set.
"""
registration_required = models.BooleanField(
verbose_name="påmelding", default=False, null=False, blank=False
)
registration_deadline = models.DateTimeField(
verbose_name="påmeldingsfrist", null=True, blank=True
)
registration_start = models.DateTimeField(
verbose_name="påmelding åpner", null=True, blank=True
)
deregistration_deadline = models.DateTimeField(
verbose_name="avmeldingsfrist", null=True, blank=True
)
places = models.PositiveIntegerField(
verbose_name="antall plasser", null=True, blank=True
)
has_queue = models.BooleanField(
verbose_name="har venteliste",
null=True,
blank=True,
help_text=(
"Om ventelisten er på, vil det være mulig å melde seg på "
"selv om arrangementet er fullt. "
"De som er i ventelisten vil automatisk bli påmeldt "
"etter hvert som plasser blir ledige."
),
)
open_for = models.ManyToManyField(
FysmatClass,
verbose_name="Åpen for",
blank=True,
help_text=(
"Hvilke grupper som får lov til å melde seg på arrangementet. "
"Hvis ingen grupper er valgt er det åpent for alle."
),
)
class Meta:
abstract = True
def allowed_to_attend(self, user):
"""Indikerer om en bruker har lov til å melde seg på arrangementet"""
return (not self.open_for.exists()) or self.open_for.filter(user=user).exists()
def registration_has_started(self):
"""Return whether registration has started"""
return self.registration_required and self.registration_start < datetime.now()
def registration_open(self):
"""Return whether it is possible to register for the event"""
return (
self.registration_has_started()
and datetime.now() < self.registration_deadline
)
def deregistration_closed(self):
"""Return whether the event is closed for deregistration."""
return self.deregistration_deadline and (
self.deregistration_deadline < datetime.now()
)
def user_penalty_limit(self, user):
"""Counts the users penalties this term, used in _asser_user_allowed_to_register"""
MAX_PENALTY = 4 # This is the limit at which one is not allowed to register
# user.get_penalties returns EventRegistrations where the user has penalties
penalty_count = sum([reg.penalty for reg in user.get_penalties()])
return False if penalty_count >= MAX_PENALTY else True
def _assert_user_allowed_to_register(self, user):
if not self.registration_required:
raise RegistrationNotRequiredException(event=self, user=user)
elif not self.registration_open():
raise RegistrationNotOpen(event=self, user=user)
elif not self.allowed_to_attend(user):
raise RegistrationNotAllowed(
"Arrangementet er ikke åpent for ditt kull.", event=self, user=user
)
elif not self.user_penalty_limit(user):
raise RegistrationNotAllowed(
"Du har for mange prikker!", event=self, user=user
)
|
2,144 |
testfile
|
import types
import unittest
from _typeshed import ExcInfo
from collections.abc import Callable
from typing import Any, NamedTuple
from typing_extensions import TypeAlias
__all__ = [
"register_optionflag",
"DONT_ACCEPT_TRUE_FOR_1",
"DONT_ACCEPT_BLANKLINE",
"NORMALIZE_WHITESPACE",
"ELLIPSIS",
"SKIP",
"IGNORE_EXCEPTION_DETAIL",
"COMPARISON_FLAGS",
"REPORT_UDIFF",
"REPORT_CDIFF",
"REPORT_NDIFF",
"REPORT_ONLY_FIRST_FAILURE",
"REPORTING_FLAGS",
"FAIL_FAST",
"Example",
"DocTest",
"DocTestParser",
"DocTestFinder",
"DocTestRunner",
"OutputChecker",
"DocTestFailure",
"UnexpectedException",
"DebugRunner",
"testmod",
"testfile",
"run_docstring_examples",
"DocTestSuite",
"DocFileSuite",
"set_unittest_reportflags",
"script_from_examples",
"testsource",
"debug_src",
"debug",
]
class TestResults(NamedTuple):
failed: int
attempted: int
OPTIONFLAGS_BY_NAME: dict[str, int]
def register_optionflag(name: str) -> int: ...
DONT_ACCEPT_TRUE_FOR_1: int
DONT_ACCEPT_BLANKLINE: int
NORMALIZE_WHITESPACE: int
ELLIPSIS: int
SKIP: int
IGNORE_EXCEPTION_DETAIL: int
COMPARISON_FLAGS: int
REPORT_UDIFF: int
REPORT_CDIFF: int
REPORT_NDIFF: int
REPORT_ONLY_FIRST_FAILURE: int
FAIL_FAST: int
REPORTING_FLAGS: int
BLANKLINE_MARKER: str
ELLIPSIS_MARKER: str
class Example:
source: str
want: str
exc_msg: str | None
lineno: int
indent: int
options: dict[int, bool]
def __init__(
self,
source: str,
want: str,
exc_msg: str | None = None,
lineno: int = 0,
indent: int = 0,
options: dict[int, bool] | None = None,
) -> None: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class DocTest:
examples: list[Example]
globs: dict[str, Any]
name: str
filename: str | None
lineno: int | None
docstring: str | None
def __init__(
self,
examples: list[Example],
globs: dict[str, Any],
name: str,
filename: str | None,
lineno: int | None,
docstring: str | None,
) -> None: ...
def __hash__(self) -> int: ...
def __lt__(self, other: DocTest) -> bool: ...
def __eq__(self, other: object) -> bool: ...
class DocTestParser:
def parse(self, string: str, name: str = "<string>") -> list[str | Example]: ...
def get_doctest(self, string: str, globs: dict[str, Any], name: str, filename: str | None, lineno: int | None) -> DocTest: ...
def get_examples(self, string: str, name: str = "<string>") -> list[Example]: ...
class DocTestFinder:
def __init__(
self, verbose: bool = False, parser: DocTestParser = ..., recurse: bool = True, exclude_empty: bool = True
) -> None: ...
def find(
self,
obj: object,
name: str | None = None,
module: None | bool | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
) -> list[DocTest]: ...
_Out: TypeAlias = Callable[[str], object]
class DocTestRunner:
DIVIDER: str
optionflags: int
original_optionflags: int
tries: int
failures: int
test: DocTest
def __init__(self, checker: OutputChecker | None = None, verbose: bool | None = None, optionflags: int = 0) -> None: ...
def report_start(self, out: _Out, test: DocTest, example: Example) -> None: ...
def report_success(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_failure(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_unexpected_exception(self, out: _Out, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
def run(
self, test: DocTest, compileflags: int | None = None, out: _Out | None = None, clear_globs: bool = True
) -> TestResults: ...
def summarize(self, verbose: bool | None = None) -> TestResults: ...
def merge(self, other: DocTestRunner) -> None: ...
class OutputChecker:
def check_output(self, want: str, got: str, optionflags: int) -> bool: ...
def output_difference(self, example: Example, got: str, optionflags: int) -> str: ...
class DocTestFailure(Exception):
test: DocTest
example: Example
got: str
def __init__(self, test: DocTest, example: Example, got: str) -> None: ...
class UnexpectedException(Exception):
test: DocTest
example: Example
exc_info: ExcInfo
def __init__(self, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
class DebugRunner(DocTestRunner): ...
master: DocTestRunner | None
def testmod(
m: types.ModuleType | None = None,
name: str | None = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
exclude_empty: bool = False,
) -> TestResults: ...
def METHOD_NAME(
filename: str,
module_relative: bool = True,
name: str | None = None,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
parser: DocTestParser = ...,
encoding: str | None = None,
) -> TestResults: ...
def run_docstring_examples(
f: object,
globs: dict[str, Any],
verbose: bool = False,
name: str = "NoName",
compileflags: int | None = None,
optionflags: int = 0,
) -> None: ...
def set_unittest_reportflags(flags: int) -> int: ...
class DocTestCase(unittest.TestCase):
def __init__(
self,
test: DocTest,
optionflags: int = 0,
setUp: Callable[[DocTest], Any] | None = None,
tearDown: Callable[[DocTest], Any] | None = None,
checker: OutputChecker | None = None,
) -> None: ...
def runTest(self) -> None: ...
def format_failure(self, err: str) -> str: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class SkipDocTestCase(DocTestCase):
def __init__(self, module: types.ModuleType) -> None: ...
def test_skip(self) -> None: ...
class _DocTestSuite(unittest.TestSuite): ...
def DocTestSuite(
module: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
test_finder: DocTestFinder | None = None,
**options: Any,
) -> _DocTestSuite: ...
class DocFileCase(DocTestCase): ...
def DocFileTest(
path: str,
module_relative: bool = True,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
parser: DocTestParser = ...,
encoding: str | None = None,
**options: Any,
) -> DocFileCase: ...
def DocFileSuite(*paths: str, **kw: Any) -> _DocTestSuite: ...
def script_from_examples(s: str) -> str: ...
def testsource(module: None | str | types.ModuleType, name: str) -> str: ...
def debug_src(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug_script(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug(module: None | str | types.ModuleType, name: str, pm: bool = False) -> None: ...
|
2,145 |
test interp dfs1
|
import numpy as np
import pytest
import pandas as pd
import mikeio
from mikeio import Dfs1
from mikeio import EUMType, EUMUnit
def test_filenotexist():
with pytest.raises(FileNotFoundError):
mikeio.open("file_that_does_not_exist.dfs1")
def test_repr():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
text = repr(dfs)
assert "Dfs1" in text
assert "items" in text
assert "dx" in text
def test_repr_empty():
dfs = Dfs1()
text = repr(dfs)
assert "Dfs1" in text
def test_properties():
filename = r"tests/testdata/tide1.dfs1"
dfs = mikeio.open(filename)
assert dfs.dx == 0.06666692346334457
assert dfs.x0 == 0.0
assert dfs.nx == 10
assert dfs.projection_string == "LONG/LAT"
assert dfs.longitude == -5.0
assert dfs.latitude == 51.20000076293945
assert dfs.orientation == 180
g = dfs.geometry
assert isinstance(g, mikeio.Grid1D)
assert g.dx == 0.06666692346334457
assert g._x0 == 0.0
assert g.nx == 10
assert g.projection == "LONG/LAT"
assert g.origin == (-5.0, 51.20000076293945)
assert g.orientation == 180
def test_read_write_properties(tmp_path):
# test that properties are the same after read-write
filename = r"tests/testdata/tide1.dfs1"
ds1 = mikeio.read(filename)
fp = tmp_path / "tide1.dfs1"
ds1.to_dfs(fp)
ds2 = mikeio.read(fp)
assert ds1.geometry == ds2.geometry
def test_read():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(items=[0])
data = ds[0].to_numpy()
assert data.shape == (100, 3) # time, x
def test_read_item_names():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(items=["testing water level"])
data = ds[0].to_numpy()
assert data.shape == (100, 3) # time, x
def test_read_time_steps():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(time=[3, 5])
data = ds[0].to_numpy()
assert data.shape == (2, 3) # time, x
def test_write_some_time_steps_new_file(tmp_path):
fp = tmp_path / "random.dfs1"
dfs = mikeio.open("tests/testdata/random.dfs1")
ds = dfs.read(time=[0, 1, 2, 3, 4, 5])
data = ds[0].to_numpy()
assert data.shape == (6, 3) # time, x
dfs.write(fp, ds)
dfsnew = mikeio.open(fp)
dsnew = dfsnew.read()
assert dsnew["testing water level"].shape == (6, 3)
def test_read_item_names_not_in_dataset_fails():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
with pytest.raises(Exception):
dfs.read(["NOTAREALVARIABLE"])
def test_read_names_access():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
res = dfs.read(items=[0])
item_data = res[0].to_numpy()
time = res.time
assert item_data.shape == (100, 3) # time, x
assert len(time) == 100
assert res.items[0].name == "testing water level"
assert res.items[0].type == EUMType.Water_Level
assert res.items[0].unit == EUMUnit.meter
def test_read_start_end_time():
dfs = mikeio.open("tests/testdata/random.dfs1")
ds = dfs.read()
assert dfs.start_time == ds.start_time
assert dfs.end_time == ds.end_time
def test_read_start_end_time_relative_time():
dfs = mikeio.open("tests/testdata/physical_basin_wave_maker_signal.dfs1")
ds = dfs.read()
assert dfs.start_time == ds.start_time
assert dfs.end_time == ds.end_time
def test_get_time_axis_without_reading_data():
dfs0file = r"tests/testdata/random.dfs1"
dfs = mikeio.open(dfs0file)
assert isinstance(dfs.time, pd.DatetimeIndex)
assert len(dfs.time) == 100
def test_get_time_axis_without_reading_data_relative():
dfs0file = r"tests/testdata/physical_basin_wave_maker_signal.dfs1"
dfs = mikeio.open(dfs0file)
assert isinstance(dfs.time, pd.DatetimeIndex) # start time is not correct !
assert len(dfs.time) == 200
def test_select_point_dfs1_to_dfs0(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0.to_dfs(outfilename)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == ds.n_timesteps
def test_select_point_and_single_step_dfs1_to_dfs0(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0_0 = ds_0.isel(0)
assert ds_0_0.n_timesteps == 1
ds_0_0.to_dfs(outfilename)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == 1
def test_select_point_dfs1_to_dfs0_double(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0_dbl.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0.to_dfs(outfilename, dtype=np.float64)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == ds.n_timesteps
def METHOD_NAME():
ds = mikeio.read("tests/testdata/waterlevel_north.dfs1")
da: mikeio.DataArray = ds.North_WL
assert da.geometry.x[-1] == 8800
dai = da.interp(x=0)
assert dai[0].values == pytest.approx(-0.33)
dai = da.interp(x=4000)
assert dai[0].values == pytest.approx(-0.3022272830659693)
dai = da.interp(x=8800)
assert dai[-1].values == pytest.approx(-0.0814)
dai = da.interp(x=8900) # outside the domain
assert np.isnan(dai[-1].values)
dai = da.interp(x=-10) # outside the domain
assert np.isnan(dai[-1].values)
def test_interp_onepoint_dfs1():
ds = mikeio.read("tests/testdata/nx1.dfs1")
assert ds.geometry.nx == 1
with pytest.raises(AssertionError, match="not possible for Grid1D with one point"):
ds[0].interp(x=0)
|
2,146 |
internal paging
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ExpressRouteServiceProvidersOperations(object):
"""ExpressRouteServiceProvidersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the available express route service providers.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteServiceProvider
:rtype:
~azure.mgmt.network.v2018_04_01.models.ExpressRouteServiceProviderPaged[~azure.mgmt.network.v2018_04_01.models.ExpressRouteServiceProvider]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def METHOD_NAME(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteServiceProviderPaged(METHOD_NAME, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteServiceProviderPaged(METHOD_NAME, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'}
|
2,147 |
preprocess grid
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for pallas-core functionality."""
from __future__ import annotations
from collections.abc import Sequence
import contextlib
import dataclasses
import functools
from typing import Any, Callable, Iterator
from jax._src import core as jax_core
from jax._src import linear_util as lu
from jax._src import state
from jax._src import tree_util
from jax._src import util
from jax._src.interpreters import partial_eval as pe
from jax._src.state import discharge as state_discharge
import jax.numpy as jnp
# TODO(sharadmv): enable type checking
# mypy: ignore-errors
partial = functools.partial
Grid = tuple[int, ...]
split_list = util.split_list
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
@dataclasses.dataclass
class GridEnv:
axis_index: Any
axis_size: int
_grid_env_stack: list[tuple[GridEnv, ...]] = []
@contextlib.contextmanager
def grid_env(env: tuple[tuple[Any, int], ...]) -> Iterator[None]:
_grid_env_stack.append(tuple(GridEnv(axis_index, axis_size)
for axis_index, axis_size in env))
try:
yield
finally:
_grid_env_stack.pop()
def current_grid_env() -> tuple[GridEnv, ...] | None:
if not _grid_env_stack:
return None
return _grid_env_stack[-1]
class Mapped:
pass
mapped = Mapped()
@dataclasses.dataclass(frozen=True)
class BlockSpec:
index_map: Callable[..., Any]
block_shape: tuple[int | None, ...]
def compute_index(self, *args):
out = self.index_map(*args)
if not isinstance(out, tuple):
out = (out,)
return out
@dataclasses.dataclass(frozen=True)
class BlockMapping:
block_shape: tuple[Mapped | int, ...]
index_map_jaxpr: jax_core.ClosedJaxpr
def compute_start_indices(self, loop_idx, *args):
discharged_jaxpr, discharged_consts = state_discharge.discharge_state(
self.index_map_jaxpr.jaxpr, self.index_map_jaxpr.consts
)
jaxpr = jax_core.ClosedJaxpr(discharged_jaxpr, discharged_consts)
block_indices_and_rest = jax_core.jaxpr_as_fun(jaxpr)(*loop_idx, *args)
# Since we're passing in `Ref`s potentially, we need to split out their
# updated values since we only care about the return values.
block_indices, _ = split_list(block_indices_and_rest,
[len(self.block_shape)])
return tuple(i if b is mapped else b * i
for b, i in zip(self.block_shape, block_indices))
replace = dataclasses.replace
@dataclasses.dataclass(frozen=True)
class GridMapping:
grid: tuple[int, ...]
block_mappings: tuple[BlockMapping | None, ...]
mapped_dims: tuple[int, ...]
num_index_operands: int
replace = dataclasses.replace
def METHOD_NAME(grid: Grid | int | None) -> Grid:
if grid is None:
return ()
if isinstance(grid, int):
return (grid,)
return grid
def _convert_block_spec_to_block_mapping(
in_avals: list[jax_core.ShapedArray], block_spec: BlockSpec | None,
) -> BlockSpec | None:
if block_spec is _no_block_spec:
return None
block_shape = tuple(
mapped if s is None else s for s in block_spec.block_shape)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(block_spec.compute_index), in_avals)
return BlockMapping(block_shape, jax_core.ClosedJaxpr(jaxpr, consts))
def _compute_shape_from_block_spec(block_spec: BlockSpec | None,
arg_shape: tuple[int, ...]
) -> tuple[int, ...]:
if block_spec is _no_block_spec:
return arg_shape
return tuple(s for s in block_spec.block_shape if s is not None)
def _get_ref_avals(grid, in_avals, in_specs, out_avals, out_specs):
if grid is None:
in_specs = [None] * len(in_avals)
out_specs = [None] * len(out_avals)
in_ref_avals = [state.shaped_array_ref(arg.shape, arg.dtype)
for arg in in_avals]
out_ref_avals = [state.shaped_array_ref(arg.shape, arg.dtype)
for arg in out_avals]
else:
in_ref_avals = [
state.shaped_array_ref(
_compute_shape_from_block_spec(
block_spec, arg.shape), arg.dtype)
for block_spec, arg in zip(in_specs, in_avals)]
out_ref_avals = [
state.shaped_array_ref(
_compute_shape_from_block_spec(
block_spec, arg.shape), arg.dtype)
for block_spec, arg in zip(out_specs, out_avals)]
return in_specs, in_ref_avals, out_specs, out_ref_avals
_no_block_spec = object()
@dataclasses.dataclass(init=False)
class GridSpec:
grid: Grid
in_specs: Sequence[BlockSpec | None] | None
out_specs: tuple[BlockSpec | None, ...] | None
def __init__(
self,
grid: Grid | None = None,
in_specs: Sequence[BlockSpec | None] | None = None,
out_specs: BlockSpec | Sequence[BlockSpec | None] | None = None,
):
if grid is None:
if in_specs is not None:
raise ValueError("Cannot specify `in_specs` with a `None` grid.")
if out_specs is not None:
raise ValueError("Cannot specify `out_specs` with a `None` grid.")
self.grid = METHOD_NAME(grid)
self.in_specs = in_specs
if out_specs is not None and not isinstance(out_specs, (tuple, list)):
out_specs = (out_specs,)
if out_specs is not None and not isinstance(out_specs, tuple):
out_specs = tuple(out_specs)
self.out_specs = out_specs
def get_grid_mapping(
self, in_avals, in_tree, out_avals, out_tree
) -> tuple[tuple[jax_core.AbstractValue, ...], GridMapping]:
if self.in_specs is not None:
in_specs = self.in_specs
in_spec_tree = tree_util.tree_structure(tuple(in_specs))
if in_spec_tree != in_tree:
raise ValueError(
"Pytree specs for arguments and `in_specs` must match: "
f"{in_tree} vs. {in_spec_tree}")
else:
in_specs = [_no_block_spec] * len(in_avals)
if self.out_specs is not None:
out_specs = self.out_specs
out_spec_tree = tree_util.tree_structure(out_specs)
if out_spec_tree != out_tree:
raise ValueError(
"Pytree specs for `out_shape` and `out_specs` must match: "
f"{out_tree} vs. {out_spec_tree}")
else:
out_specs = [_no_block_spec] * len(out_avals)
flat_in_specs = tree_util.tree_leaves(in_specs)
flat_out_specs = tree_util.tree_leaves(out_specs)
in_specs, in_ref_avals, out_specs, out_ref_avals = _get_ref_avals(
self.grid, in_avals, flat_in_specs, out_avals,
flat_out_specs)
grid_avals = [jax_core.ShapedArray((), jnp.dtype("int32"))] * len(self.grid)
in_block_mappings = map(
partial(_convert_block_spec_to_block_mapping, grid_avals), in_specs)
out_block_mappings = map(
partial(_convert_block_spec_to_block_mapping, grid_avals), out_specs)
grid_mapping = GridMapping(
self.grid, (*in_block_mappings, *out_block_mappings), (),
num_index_operands=0)
jaxpr_in_avals = tree_util.tree_unflatten(in_tree, in_ref_avals)
jaxpr_out_avals = tree_util.tree_unflatten(out_tree, out_ref_avals)
return (*jaxpr_in_avals, *jaxpr_out_avals), grid_mapping
|
2,148 |
test sparse sample
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for NovoGrad Optimizer."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.optimizers import NovoGrad
def run_dense_sample(iterations, expected, optimizer, dtype):
var_0 = tf.Variable([1.0, 2.0], dtype=dtype)
var_1 = tf.Variable([3.0, 4.0], dtype=dtype)
grad_0 = tf.constant([0.1, 0.2], dtype=dtype)
grad_1 = tf.constant([0.3, 0.4], dtype=dtype)
grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))
for _ in range(iterations):
optimizer.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var_0.read_value(), expected[0], atol=2e-4)
np.testing.assert_allclose(var_1.read_value(), expected[1], atol=2e-4)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_dense_sample(dtype):
run_dense_sample(
iterations=1,
expected=[[0.9552786425, 1.9105572849], [2.9400000012, 3.9200000016]],
optimizer=NovoGrad(lr=0.1, epsilon=1e-8),
dtype=dtype,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_dense_sample_with_weight_decay(dtype):
run_dense_sample(
iterations=1,
expected=[[0.945278642, 1.8905572849], [2.9100000012, 3.8800000016]],
optimizer=NovoGrad(lr=0.1, weight_decay=0.1, epsilon=1e-8),
dtype=dtype,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_dense_sample_with_grad_averaging(dtype):
run_dense_sample(
iterations=2,
expected=[[0.9105572849, 1.8211145698], [2.8800000024, 3.8400000032]],
optimizer=NovoGrad(lr=0.1, grad_averaging=True, epsilon=1e-8),
dtype=dtype,
)
def run_sparse_sample(iterations, expected, optimizer, dtype):
var_0 = tf.Variable([1.0, 2.0], dtype=dtype)
var_1 = tf.Variable([3.0, 4.0], dtype=dtype)
grad_0 = tf.IndexedSlices(
tf.constant([0.1], dtype=dtype), tf.constant([0]), tf.constant([2])
)
grad_1 = tf.IndexedSlices(
tf.constant([0.4], dtype=dtype), tf.constant([1]), tf.constant([2])
)
grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))
for _ in range(iterations):
optimizer.apply_gradients(grads_and_vars)
np.testing.assert_allclose(var_0.read_value(), expected[0])
np.testing.assert_allclose(var_1.read_value(), expected[1])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def METHOD_NAME(dtype):
run_sparse_sample(
iterations=2,
expected=[[0.71, 2.0], [3.0, 3.71]],
optimizer=NovoGrad(lr=0.1, epsilon=1e-8),
dtype=dtype,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_sparse_sample_with_weight_decay(dtype):
run_sparse_sample(
iterations=2,
expected=[[0.6821, 2.0], [3.0, 3.5954]],
optimizer=NovoGrad(lr=0.1, weight_decay=0.1, epsilon=1e-8),
dtype=dtype,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_sparse_sample_with_grad_averaging(dtype):
run_sparse_sample(
iterations=2,
expected=[[0.8, 2.0], [3.0, 3.8]],
optimizer=NovoGrad(lr=0.1, grad_averaging=True, epsilon=1e-8),
dtype=dtype,
)
def test_fit_simple_linear_model():
np.random.seed(0x2020)
tf.random.set_seed(0x2020)
x = np.random.standard_normal((100000, 3))
w = np.random.standard_normal((3, 1))
y = np.dot(x, w) + np.random.standard_normal((100000, 1)) * 1e-5
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(input_shape=(3,), units=1))
model.compile(NovoGrad(), loss="mse")
model.fit(x, y, epochs=2)
x = np.random.standard_normal((100, 3))
y = np.dot(x, w)
predicted = model.predict(x)
max_abs_diff = np.max(np.abs(predicted - y))
assert max_abs_diff < 1e-2
def test_get_config():
opt = NovoGrad(lr=1e-4, weight_decay=0.0, grad_averaging=False)
config = opt.get_config()
assert config["learning_rate"] == 1e-4
assert config["weight_decay"] == 0.0
assert config["grad_averaging"] is False
def test_serialization():
optimizer = NovoGrad(lr=1e-4, weight_decay=0.0, grad_averaging=False)
config = tf.keras.optimizers.serialize(optimizer)
new_optimizer = tf.keras.optimizers.deserialize(config)
assert new_optimizer.get_config() == optimizer.get_config()
|
2,149 |
image verify
|
#!/usr/bin/env python3
#
# Tests for shrinking images
#
# Copyright (c) 2016-2017 Parallels International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, random, iotests, struct, qcow2, sys
from iotests import qemu_img, qemu_io, image_size
test_img = os.path.join(iotests.test_dir, 'test.img')
check_img = os.path.join(iotests.test_dir, 'check.img')
def size_to_int(str):
suff = ['B', 'K', 'M', 'G', 'T']
return int(str[:-1]) * 1024**suff.index(str[-1:])
class ShrinkBaseClass(iotests.QMPTestCase):
image_len = '128M'
shrink_size = '10M'
chunk_size = '16M'
refcount_bits = '16'
def __qcow2_check(self, filename):
entry_bits = 3
entry_size = 1 << entry_bits
l1_mask = 0x00fffffffffffe00
div_roundup = lambda n, d: (n + d - 1) // d
def split_by_n(data, n):
for x in range(0, len(data), n):
yield struct.unpack('>Q', data[x:x + n])[0] & l1_mask
def check_l1_table(h, l1_data):
l1_list = list(split_by_n(l1_data, entry_size))
real_l1_size = div_roundup(h.size,
1 << (h.cluster_bits*2 - entry_size))
used, unused = l1_list[:real_l1_size], l1_list[real_l1_size:]
self.assertTrue(len(used) != 0, "Verifying l1 table content")
self.assertFalse(any(unused), "Verifying l1 table content")
def check_reftable(fd, h, reftable):
for offset in split_by_n(reftable, entry_size):
if offset != 0:
fd.seek(offset)
cluster = fd.read(1 << h.cluster_bits)
self.assertTrue(any(cluster), "Verifying reftable content")
with open(filename, "rb") as fd:
h = qcow2.QcowHeader(fd)
fd.seek(h.l1_table_offset)
l1_table = fd.read(h.l1_size << entry_bits)
fd.seek(h.refcount_table_offset)
reftable = fd.read(h.refcount_table_clusters << h.cluster_bits)
check_l1_table(h, l1_table)
check_reftable(fd, h, reftable)
def __raw_check(self, filename):
pass
image_check = {
'qcow2' : __qcow2_check,
'raw' : __raw_check
}
def setUp(self):
if iotests.imgfmt == 'raw':
qemu_img('create', '-f', iotests.imgfmt, test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt, check_img,
self.shrink_size)
else:
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=' + self.cluster_size +
',refcount_bits=' + self.refcount_bits,
test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=%s'% self.cluster_size,
check_img, self.shrink_size)
qemu_io('-c', 'write -P 0xff 0 ' + self.shrink_size, check_img)
def tearDown(self):
os.remove(test_img)
os.remove(check_img)
def METHOD_NAME(self):
self.assertEqual(image_size(test_img), image_size(check_img),
"Verifying image size")
self.image_check[iotests.imgfmt](self, test_img)
if iotests.imgfmt == 'raw':
return
self.assertEqual(qemu_img('check', test_img), 0,
"Verifying image corruption")
def test_empty_image(self):
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
self.assertEqual(
qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, test_img),
qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, check_img),
"Verifying image content")
self.METHOD_NAME()
def test_sequential_write(self):
for offs in range(0, size_to_int(self.image_len),
size_to_int(self.chunk_size)):
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
self.assertEqual(qemu_img("compare", test_img, check_img), 0,
"Verifying image content")
self.METHOD_NAME()
def test_random_write(self):
offs_list = list(range(0, size_to_int(self.image_len),
size_to_int(self.chunk_size)))
random.shuffle(offs_list)
for offs in offs_list:
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
self.assertEqual(qemu_img("compare", test_img, check_img), 0,
"Verifying image content")
self.METHOD_NAME()
class TestShrink512(ShrinkBaseClass):
image_len = '3M'
shrink_size = '1M'
chunk_size = '256K'
cluster_size = '512'
refcount_bits = '64'
class TestShrink64K(ShrinkBaseClass):
cluster_size = '64K'
class TestShrink1M(ShrinkBaseClass):
cluster_size = '1M'
refcount_bits = '1'
ShrinkBaseClass = None
if __name__ == '__main__':
iotests.main(supported_fmts=['raw', 'qcow2'],
supported_protocols=['file'])
|
2,150 |
get file
|
import struct
from dataclasses import dataclass
from enum import IntEnum
from pathlib import Path
from typing import Optional
import numpy as np
import numpy.typing as npt
from ..utils import Buffer, FileBuffer
def make_texture(indices, palette, use_alpha: bool = False) -> npt.NDArray[np.float32]:
new_palette = np.full((len(palette), 4), 255, dtype=np.uint8)
new_palette[:, :3] = palette
colors: np.ndarray = new_palette[indices]
if use_alpha:
transparency_key = new_palette[-1]
alpha = np.where((colors == transparency_key).all(axis=1))[0]
colors[alpha] = [0, 0, 0, 0]
return np.divide(colors.astype(np.float32), 255)
def flip_texture(pixels: npt.NDArray, width: int, height: int) -> npt.NDArray[np.float32]:
pixels = pixels.reshape((height, width, 4))
pixels = np.flip(pixels, 0)
pixels = pixels.reshape((-1, 4))
return pixels
class WadEntryType(IntEnum):
PALETTE = 64
COLORMAP = 65
QPIC = 66
MIPTEX = 67
RAW = 68
COLORMAP2 = 69
FONT = 70
class WadLump:
def __init__(self, buffer: Buffer):
self.buffer = buffer
self._entry_offset = buffer.tell()
class MipTex(WadLump):
def __init__(self, buffer: Buffer):
super().__init__(buffer)
self.name = ''
self.width, self.height = 0, 0
self.offsets = []
self.read(buffer)
def read(self, handle):
self.name = handle.read(16)
self.name = self.name[:self.name.index(b'\x00')].decode().upper()
self.width, self.height = struct.unpack('II', handle.read(8))
self.offsets = struct.unpack('4I', handle.read(16))
def load_texture(self, texture_mip: int = 0) -> npt.NDArray:
handle = self.buffer
has_alpha = self.name.startswith('{')
handle.seek(self._entry_offset + self.offsets[texture_mip])
texture_size = (self.width * self.height) >> (texture_mip * 2)
texture_indices = np.frombuffer(handle.read(texture_size), np.uint8)
handle.seek(self._entry_offset + self.offsets[-1] + ((self.width * self.height) >> (3 * 2)))
assert handle.read(2) == b'\x00\x01', 'Invalid palette start anchor'
texture_palette = np.frombuffer(handle.read(256 * 3), np.uint8).reshape((-1, 3))
assert handle.read(2) == b'\x00\x00', 'Invalid palette end anchor'
texture_data = make_texture(texture_indices, texture_palette, has_alpha)
texture_data = flip_texture(texture_data, self.width >> texture_mip, self.height >> texture_mip)
return texture_data
class Font(MipTex):
def __init__(self, buffer: Buffer):
super().__init__(buffer)
self.row_count = 0
self.row_height = 0
self.char_info = []
def read(self, handle):
self.width, self.height = struct.unpack('II', handle.read(8))
self.row_count, self.row_height = struct.unpack('II', handle.read(8))
self.char_info = [struct.unpack('HH', handle.read(4)) for _ in range(256)]
self.offsets = [handle.tell() - self._entry_offset]
self.load_texture(0)
def load_texture(self, texture_mip=0):
handle = self.buffer
has_alpha = self.name.startswith('{')
offset = self.offsets[0]
handle.seek(self._entry_offset + offset)
texture_size = (256 * self.height)
texture_indices = np.frombuffer(handle.read(texture_size), np.uint8)
flags = struct.unpack('H', handle.read(2))[0]
texture_palette = np.frombuffer(handle.read(256 * 3), np.uint8).reshape((-1, 3))
# assert handle.read(2) == b'\x00\x00', 'Invalid palette end anchor'
texture_data = make_texture(texture_indices, texture_palette, has_alpha)
texture_data = flip_texture(texture_data, 256, self.height)
return texture_data
@dataclass(slots=True)
class WadEntry:
offset: int
size: int
uncompressed_size: int
type: WadEntryType
compression: int
name: str
@classmethod
def from_buffer(cls, buffer: Buffer) -> 'WadEntry':
(offset,
size,
uncompressed_size,
entry_type,
compression,
name) = buffer.read_fmt("IIIBBxx16s")
name = name[:name.index(b'\x00')].decode().upper()
return cls(offset, size, uncompressed_size, WadEntryType(entry_type), compression, name)
def __repr__(self):
return f'<WadEntry "{self.name}" type:{self.type.name} size:{self.size}>'
class WadFile:
def __init__(self, file: Path):
self.buffer = FileBuffer(file)
self.version = self.buffer.read(4)
self.count, self.offset = struct.unpack('II', self.buffer.read(8))
assert self.version in (b'WAD3', b'WAD4')
self.buffer.seek(self.offset)
self.entries = {}
for _ in range(self.count):
entry = WadEntry.from_buffer(self.buffer)
self.entries[entry.name] = entry
self._entry_cache = {}
def METHOD_NAME(self, name: str) -> Optional[WadLump]:
name = name.upper()
if name in self._entry_cache:
return self._entry_cache[name]
if name in self.entries:
entry = self.entries[name]
self.buffer.seek(entry.offset)
if entry.type == WadEntryType.MIPTEX:
entry = self._entry_cache[entry.name] = MipTex(self.buffer)
elif entry.type == WadEntryType.FONT:
entry = self._entry_cache[entry.name] = Font(self.buffer)
return entry
return None
|
2,151 |
tear down
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ctypes
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
class TestAttachEsmInformationWrongApn(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def METHOD_NAME(self):
self._s1ap_wrapper.cleanup()
def test_attach_esm_information_wrong_apn(self):
""" Testing of sending wrong APN in Esm Information procedure """
num_ues = 1
self._s1ap_wrapper.configUEDevice_ues_same_imsi(num_ues)
print("************************* sending Attach Request for ue-id : 1")
attach_req = s1ap_types.ueAttachRequest_t()
attach_req.ue_Id = 1
sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT
id_type = s1ap_types.TFW_MID_TYPE_IMSI
eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH
attach_req.mIdType = id_type
attach_req.epsAttachType = eps_type
attach_req.useOldSecCtxt = sec_ctxt
# enabling ESM Information transfer flag
attach_req.eti.pres = 1
attach_req.eti.esm_info_transfer_flag = 1
print("Sending Attach Request ue-id", attach_req.ue_Id)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value
print("Received auth req ind ")
auth_res = s1ap_types.ueAuthResp_t()
auth_res.ue_Id = 1
sqn_recvd = s1ap_types.ueSqnRcvd_t()
sqn_recvd.pres = 0
auth_res.sqnRcvd = sqn_recvd
print("Sending Auth Response ue-id", auth_res.ue_Id)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_AUTH_RESP, auth_res,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_SEC_MOD_CMD_IND.value
print("Received Security Mode Command ue-id", auth_res.ue_Id)
time.sleep(1)
sec_mode_complete = s1ap_types.ueSecModeComplete_t()
sec_mode_complete.ue_Id = 1
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_SEC_MOD_COMPLETE, sec_mode_complete,
)
# Esm Information Request indication
print(
"Received Esm Information Request ue-id", sec_mode_complete.ue_Id,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ESM_INFORMATION_REQ.value
esm_info_req = response.cast(s1ap_types.ueEsmInformationReq_t)
# Sending Esm Information Response
print(
"Sending Esm Information Response ue-id", sec_mode_complete.ue_Id,
)
esm_info_response = s1ap_types.ueEsmInformationRsp_t()
esm_info_response.ue_Id = 1
esm_info_response.tId = esm_info_req.tId
esm_info_response.pdnAPN_pr.pres = 1
s = "oai"
esm_info_response.pdnAPN_pr.len = len(s)
esm_info_response.pdnAPN_pr.pdn_apn = (ctypes.c_ubyte * 100)(
*[ctypes.c_ubyte(ord(c)) for c in s[:100]],
)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ESM_INFORMATION_RSP, esm_info_response,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ATTACH_REJECT_IND.value
print("*** Running UE detach ***")
# Now detach the UE
detach_req = s1ap_types.uedetachReq_t()
detach_req.ue_Id = 1
detach_req.ueDetType = (
s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value
)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_DETACH_REQUEST, detach_req,
)
# Wait for UE context release command
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_CTX_REL_IND.value
if __name__ == "__main__":
unittest.main()
|
2,152 |
exchange bind
|
# twisted is optional and self-contained in this module.
# We don't want to force it as a dependency but that means we also can't test it with type-checkers given the current setup.
from _typeshed import Incomplete
from typing import Generic, NamedTuple, TypeVar
import pika.connection
from pika.adapters.utils import nbio_interface
from twisted.internet.base import DelayedCall # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.defer import Deferred, DeferredQueue # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.interfaces import ITransport # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.internet.protocol import Protocol # type: ignore[import] # pyright: ignore[reportMissingImports]
from twisted.python.failure import Failure # type: ignore[import] # pyright: ignore[reportMissingImports]
_T = TypeVar("_T")
LOGGER: Incomplete
class ClosableDeferredQueue(DeferredQueue[_T], Generic[_T]): # pyright: ignore[reportUntypedBaseClass]
closed: Failure | BaseException | None
def __init__(self, size: Incomplete | None = ..., backlog: Incomplete | None = ...) -> None: ...
# Returns a Deferred with an error if fails. None if success
def put(self, obj: _T) -> Deferred[Failure | BaseException] | None: ... # type: ignore[override]
def get(self) -> Deferred[Failure | BaseException | _T]: ... # type: ignore[override]
pending: Incomplete
def close(self, reason: BaseException | None) -> None: ...
class ReceivedMessage(NamedTuple):
channel: Incomplete
method: Incomplete
properties: Incomplete
body: Incomplete
class TwistedChannel:
on_closed: Deferred[Incomplete | Failure | BaseException | None]
def __init__(self, channel) -> None: ...
@property
def channel_number(self): ...
@property
def connection(self): ...
@property
def is_closed(self): ...
@property
def is_closing(self): ...
@property
def is_open(self): ...
@property
def flow_active(self): ...
@property
def consumer_tags(self): ...
def callback_deferred(self, deferred, replies) -> None: ...
def add_on_return_callback(self, callback): ...
def basic_ack(self, delivery_tag: int = ..., multiple: bool = ...): ...
def basic_cancel(self, consumer_tag: str = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_consume(
self,
queue,
auto_ack: bool = ...,
exclusive: bool = ...,
consumer_tag: Incomplete | None = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_get(self, queue, auto_ack: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_nack(self, delivery_tag: Incomplete | None = ..., multiple: bool = ..., requeue: bool = ...): ...
def basic_publish(
self, exchange, routing_key, body, properties: Incomplete | None = ..., mandatory: bool = ...
) -> Deferred[Incomplete | Failure | BaseException]: ...
def basic_qos(
self, prefetch_size: int = ..., prefetch_count: int = ..., global_qos: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def basic_reject(self, delivery_tag, requeue: bool = ...): ...
def basic_recover(self, requeue: bool = ...) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def close(self, reply_code: int = ..., reply_text: str = ...): ...
def confirm_delivery(self) -> Deferred[Incomplete | None]: ...
def METHOD_NAME(
self, destination, source, routing_key: str = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_declare(
self,
exchange,
exchange_type=...,
passive: bool = ...,
durable: bool = ...,
auto_delete: bool = ...,
internal: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_delete(
self, exchange: Incomplete | None = ..., if_unused: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def exchange_unbind(
self,
destination: Incomplete | None = ...,
source: Incomplete | None = ...,
routing_key: str = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def flow(self, active) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def open(self): ...
def queue_bind(
self, queue, exchange, routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_declare(
self,
queue,
passive: bool = ...,
durable: bool = ...,
exclusive: bool = ...,
auto_delete: bool = ...,
arguments: Incomplete | None = ...,
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_delete(
self, queue, if_unused: bool = ..., if_empty: bool = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_purge(self, queue) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def queue_unbind(
self, queue, exchange: Incomplete | None = ..., routing_key: Incomplete | None = ..., arguments: Incomplete | None = ...
) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_commit(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_rollback(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
def tx_select(self) -> Deferred[Incomplete | Failure | BaseException | None]: ...
class _TwistedConnectionAdapter(pika.connection.Connection):
def __init__(self, parameters, on_open_callback, on_open_error_callback, on_close_callback, custom_reactor) -> None: ...
def connection_made(self, transport: ITransport) -> None: ...
def connection_lost(self, error: Exception) -> None: ...
def data_received(self, data) -> None: ...
class TwistedProtocolConnection(Protocol): # pyright: ignore[reportUntypedBaseClass]
ready: Deferred[None] | None
closed: Deferred[None] | Failure | BaseException | None
def __init__(self, parameters: Incomplete | None = ..., custom_reactor: Incomplete | None = ...) -> None: ...
def channel(self, channel_number: Incomplete | None = ...): ...
@property
def is_open(self): ...
@property
def is_closed(self): ...
def close(self, reply_code: int = ..., reply_text: str = ...) -> Deferred[None] | Failure | BaseException | None: ...
def dataReceived(self, data) -> None: ...
def connectionLost(self, reason: Failure | BaseException = ...) -> None: ...
def makeConnection(self, transport: ITransport) -> None: ...
def connectionReady(self): ...
class _TimerHandle(nbio_interface.AbstractTimerReference):
def __init__(self, handle: DelayedCall) -> None: ...
def cancel(self) -> None: ...
|
2,153 |
test condition fails on timeout
|
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2022 NV Access Limited.
"""Unit tests for the blockUntilConditionMet submodule.
"""
from typing import (
Any,
Callable,
Type,
)
import unittest
from unittest.mock import patch
from utils.blockUntilConditionMet import blockUntilConditionMet
class _FakeTimer():
"""
Used to simulate the passage of time.
Patches sleeping and getting the current time,
so that the module under test is not dependent on real world time.
"""
POLL_INTERVAL = 0.1
def __init__(self) -> None:
self._fakeTime: float = 0.0
def sleep(self, secs: float) -> None:
"""Patch for utils.blockUntilConditionMet.sleep"""
self._fakeTime += secs
def time(self) -> float:
"""Patch for utils.blockUntilConditionMet.timer"""
return self._fakeTime
def getValue(self) -> float:
"""Used to test the getValue parameter of utils.blockUntilConditionMet.blockUntilConditionMet"""
return self.time()
def createShouldStopEvaluator(self, succeedAfterSeconds: float) -> Callable[[Any], bool]:
"""Used to test the shouldStopEvaluator parameter of utils.blockUntilConditionMet.blockUntilConditionMet"""
def _shouldStopEvaluator(_value: Any) -> bool:
return self._fakeTime >= succeedAfterSeconds
return _shouldStopEvaluator
class _Timer_SlowSleep(_FakeTimer):
"""
Adds an extra amount of sleep when sleep is called to simulate
the device taking longer than expected.
"""
def sleep(self, secs: float) -> None:
return super().sleep(secs + 2 * self.POLL_INTERVAL)
class _Timer_SlowGetValue(_FakeTimer):
"""
Adds an extra amount of sleep when getValue is called to simulate
the function taking a significant amount of time.
"""
def getValue(self) -> float:
self._fakeTime += 2 * self.POLL_INTERVAL
return super().getValue()
class _Timer_SlowShouldStop(_FakeTimer):
"""
Adds an extra amount of sleep when shouldStopEvaluator is called to simulate
the function taking a significant amount of time.
"""
def createShouldStopEvaluator(self, succeedAfterSeconds: float) -> Callable[[Any], bool]:
"""Used to test the shouldStopEvaluator parameter of utils.blockUntilConditionMet.blockUntilConditionMet"""
def _shouldStopEvaluator(_value: Any):
self._fakeTime += 2 * self.POLL_INTERVAL
return self._fakeTime >= succeedAfterSeconds
return _shouldStopEvaluator
class Test_blockUntilConditionMet_Timer(unittest.TestCase):
"""
Tests blockUntilConditionMet against a timer, which simulates the passage of time.
Ensures that blockUntilConditionMet succeeds just before timeout, and fails just after timeout.
"""
_TimerClass: Type[_FakeTimer] = _FakeTimer
"""Test suites which inherit from Test_blockUntilConditionMet_Timer will override the TimerClass"""
def setUp(self) -> None:
self._timer = self._TimerClass()
self._sleepPatch = patch("utils.blockUntilConditionMet.sleep", new_callable=lambda: self._timer.sleep)
self._sleepPatch.start()
self._timerPatch = patch("utils.blockUntilConditionMet.timer", new_callable=lambda: self._timer.time)
self._timerPatch.start()
def tearDown(self) -> None:
self._sleepPatch.stop()
self._timerPatch.stop()
def test_condition_succeeds_before_timeout(self):
giveUpAfterSeconds = 5
success, _endTimeOrNone = blockUntilConditionMet(
getValue=self._timer.getValue,
giveUpAfterSeconds=giveUpAfterSeconds,
shouldStopEvaluator=self._timer.createShouldStopEvaluator(
succeedAfterSeconds=giveUpAfterSeconds - _FakeTimer.POLL_INTERVAL
),
intervalBetweenSeconds=_FakeTimer.POLL_INTERVAL,
)
timeElapsed = self._timer.time()
self.assertTrue(
success,
msg=f"Test condition failed unexpectedly due to timeout. Elapsed time: {timeElapsed:.2f}s"
)
self.assertGreater(giveUpAfterSeconds, timeElapsed)
def METHOD_NAME(self):
giveUpAfterSeconds = 5
success, _endTimeOrNone = blockUntilConditionMet(
getValue=self._timer.getValue,
giveUpAfterSeconds=giveUpAfterSeconds,
shouldStopEvaluator=self._timer.createShouldStopEvaluator(
succeedAfterSeconds=giveUpAfterSeconds + _FakeTimer.POLL_INTERVAL
),
intervalBetweenSeconds=_FakeTimer.POLL_INTERVAL,
)
timeElapsed = self._timer.time()
self.assertFalse(
success,
msg=f"Test condition succeeded unexpectedly before timeout. Elapsed time: {timeElapsed:.2f}s"
)
self.assertGreaterEqual(timeElapsed, giveUpAfterSeconds)
class Test_blockUntilConditionMet_Timer_SlowShouldStop(Test_blockUntilConditionMet_Timer):
TimerClass = _Timer_SlowShouldStop
class Test_blockUntilConditionMet_Timer_SlowGetValue(Test_blockUntilConditionMet_Timer):
TimerClass = _Timer_SlowGetValue
class Test_blockUntilConditionMet_Timer_SlowSleep(Test_blockUntilConditionMet_Timer):
TimerClass = _Timer_SlowSleep
class Test_blockUntilConditionMet_general(unittest.TestCase):
def test_lowPollRate_Raises(self):
with self.assertRaises(AssertionError):
blockUntilConditionMet(
getValue=lambda: None,
giveUpAfterSeconds=1,
intervalBetweenSeconds=1 / 1000,
)
|
2,154 |
farm block
|
from __future__ import annotations
from secrets import token_bytes
from typing import Dict, List
from chia.rpc.full_node_rpc_api import FullNodeRpcApi
from chia.rpc.rpc_server import Endpoint, EndpointResult
from chia.simulator.full_node_simulator import FullNodeSimulator
from chia.simulator.simulator_protocol import FarmNewBlockProtocol, GetAllCoinsProtocol, ReorgProtocol
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.full_block import FullBlock
from chia.util.bech32m import decode_puzzle_hash
from chia.util.ints import uint32
class SimulatorFullNodeRpcApi(FullNodeRpcApi):
@property
def simulator_api(self) -> FullNodeSimulator:
assert isinstance(self.service.server.api, FullNodeSimulator)
return self.service.server.api
def get_routes(self) -> Dict[str, Endpoint]:
routes = super().get_routes()
routes["/get_all_blocks"] = self.get_all_blocks
routes["/farm_block"] = self.METHOD_NAME
routes["/set_auto_farming"] = self.set_auto_farming
routes["/get_auto_farming"] = self.get_auto_farming
routes["/get_farming_ph"] = self.get_farming_ph
routes["/get_all_coins"] = self.get_all_coins
routes["/get_all_puzzle_hashes"] = self.get_all_puzzle_hashes
routes["/revert_blocks"] = self.revert_blocks
routes["/reorg_blocks"] = self.reorg_blocks
return routes
async def get_all_blocks(self, _request: Dict[str, object]) -> EndpointResult:
all_blocks: List[FullBlock] = await self.simulator_api.get_all_full_blocks()
return {"blocks": [block.to_json_dict() for block in all_blocks]}
async def METHOD_NAME(self, _request: Dict[str, object]) -> EndpointResult:
request_address = str(_request["address"])
guarantee_tx_block = bool(_request.get("guarantee_tx_block", False))
blocks = int(str(_request.get("blocks", 1))) # mypy made me do this
ph = decode_puzzle_hash(request_address)
req = FarmNewBlockProtocol(ph)
cur_height = self.service.blockchain.get_peak_height()
if guarantee_tx_block:
for i in range(blocks): # these can only be tx blocks
await self.simulator_api.farm_new_transaction_block(req)
else:
for i in range(blocks): # these can either be full blocks or tx blocks
await self.simulator_api.farm_new_block(req)
return {"new_peak_height": (cur_height if cur_height is not None else 0) + blocks}
async def set_auto_farming(self, _request: Dict[str, object]) -> EndpointResult:
auto_farm = bool(_request["auto_farm"])
result = await self.simulator_api.update_autofarm_config(auto_farm)
return {"auto_farm_enabled": result}
async def get_auto_farming(self, _request: Dict[str, object]) -> EndpointResult:
return {"auto_farm_enabled": self.simulator_api.auto_farm}
async def get_farming_ph(self, _request: Dict[str, object]) -> EndpointResult:
return {"puzzle_hash": self.simulator_api.bt.farmer_ph.hex()}
async def get_all_coins(self, _request: Dict[str, object]) -> EndpointResult:
p_request = GetAllCoinsProtocol(bool((_request.get("include_spent_coins", False))))
result: List[CoinRecord] = await self.simulator_api.get_all_coins(p_request)
return {"coin_records": [coin_record.to_json_dict() for coin_record in result]}
async def get_all_puzzle_hashes(self, _request: Dict[str, object]) -> EndpointResult:
result = await self.simulator_api.get_all_puzzle_hashes()
return {
"puzzle_hashes": {puzzle_hash.hex(): (amount, num_tx) for (puzzle_hash, (amount, num_tx)) in result.items()}
}
async def revert_blocks(self, _request: Dict[str, object]) -> EndpointResult:
blocks = int(str(_request.get("num_of_blocks", 1))) # number of blocks to revert
all_blocks = bool(_request.get("delete_all_blocks", False)) # revert all blocks
height = self.service.blockchain.get_peak_height()
if height is None:
raise ValueError("No blocks to revert")
new_height = (height - blocks) if not all_blocks else 1
assert new_height >= 1
await self.simulator_api.revert_block_height(uint32(new_height))
return {"new_peak_height": new_height}
async def reorg_blocks(self, _request: Dict[str, object]) -> EndpointResult:
fork_blocks = int(str(_request.get("num_of_blocks_to_rev", 1))) # number of blocks to go back
new_blocks = int(str(_request.get("num_of_new_blocks", 1))) # how many extra blocks should we add
all_blocks = bool(_request.get("revert_all_blocks", False)) # fork all blocks
use_random_seed = bool(_request.get("random_seed", True)) # randomize the seed to differentiate reorgs
random_seed = bytes32(token_bytes(32)) if use_random_seed else None
cur_height = self.service.blockchain.get_peak_height()
if cur_height is None:
raise ValueError("No blocks to revert")
fork_height = (cur_height - fork_blocks) if not all_blocks else 1
new_height = cur_height + new_blocks # any number works as long as its not 0
assert fork_height >= 1 and new_height - 1 >= cur_height
request = ReorgProtocol(uint32(fork_height), uint32(new_height), self.simulator_api.bt.farmer_ph, random_seed)
await self.simulator_api.reorg_from_index_to_new_index(request)
return {"new_peak_height": new_height}
|
2,155 |
transform
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_tag_by_endpoint(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = get_check({'metrics': ['.+'], 'tag_by_endpoint': False})
dd_run_check(check)
aggregator.assert_metric('test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['foo:baz'])
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
def test_custom_transformer(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def __init__(self, name, init_config, instances):
super().__init__(name, init_config, instances)
self.check_initializations.append(self.configure_additional_transformers)
def configure_transformer_watchdog_mega_miss(self):
method = self.gauge
def METHOD_NAME(metric, sample_data, runtime_data):
for sample, tags, hostname in sample_data:
method('server.watchdog_mega_miss', sample.value, tags=tags, hostname=hostname)
return METHOD_NAME
def configure_additional_transformers(self):
metric = r"^envoy_server_(.+)_watchdog_mega_miss$"
self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer(
metric, self.configure_transformer_watchdog_mega_miss(), pattern=True
)
mock_http_response(
"""
# TYPE envoy_server_worker_0_watchdog_mega_miss counter
envoy_server_worker_0_watchdog_mega_miss{} 1
# TYPE envoy_server_worker_1_watchdog_mega_miss counter
envoy_server_worker_1_watchdog_mega_miss{} 0
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric('test.server.watchdog_mega_miss', metric_type=aggregator.GAUGE, count=2)
|
2,156 |
test context
|
# pylint: disable=redefined-outer-name
from __future__ import annotations
import asyncio
import time
from pathlib import Path
import pytest
import bentoml
from bentoml.testing.utils import async_request
@pytest.mark.asyncio
async def test_api_server_load(host: str):
for _ in range(20):
status, _, _ = await async_request(
"POST",
f"http://{host}/echo_json",
headers={"Content-Type": "application/json"},
data='"hi"',
)
@pytest.mark.asyncio
async def test_api_server_meta(host: str) -> None:
status, _, _ = await async_request("GET", f"http://{host}/")
assert status == 200
status, _, _ = await async_request("GET", f"http://{host}/healthz")
assert status == 200
status, _, _ = await async_request("GET", f"http://{host}/livez")
assert status == 200
status, _, _ = await async_request("GET", f"http://{host}/ping")
assert status == 200
status, _, body = await async_request("GET", f"http://{host}/hello")
assert status == 200
assert b'{"Hello":"World"}' == body
status, _, _ = await async_request("GET", f"http://{host}/docs.json")
assert status == 200
status, _, body = await async_request("GET", f"http://{host}/metrics")
assert status == 200
assert body
status, _, body = await async_request("POST", f"http://{host}//api/v1/with_prefix")
assert status == 404
@pytest.mark.asyncio
async def METHOD_NAME(host: str):
status, _, body = await async_request(
"POST", f"http://{host}/use_context?error=yes"
)
assert status == 400
assert body == b"yes"
@pytest.mark.asyncio
async def test_runner_readiness(host: str) -> None:
timeout = 20
start_time = time.time()
status = ""
while (time.time() - start_time) < timeout:
status, _, _ = await async_request("GET", f"http://{host}/readyz")
await asyncio.sleep(5)
if status == 200:
break
assert status == 200
@pytest.mark.asyncio
async def test_cors(host: str, server_config_file: str) -> None:
ORIGIN = "http://bentoml.ai:8080"
status, headers, body = await async_request(
"OPTIONS",
f"http://{host}/echo_json",
headers={
"Content-Type": "application/json",
"Origin": ORIGIN,
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "Content-Type",
},
)
# all test configs lives under ../configs, but we are only interested in name.
fname = Path(server_config_file).name
if fname == "cors_enabled.yml":
assert status == 200
else:
assert status != 200
status, headers, body = await async_request(
"POST",
f"http://{host}/echo_json",
headers={"Content-Type": "application/json", "Origin": ORIGIN},
data='"hi"',
)
if fname == "cors_enabled.yml":
assert status == 200
assert body == b'"hi"'
assert headers["Access-Control-Allow-Origin"] in ("*", ORIGIN)
assert "Content-Length" in headers.get("Access-Control-Expose-Headers", [])
assert "Server" not in headers.get("Access-Control-Expect-Headers", [])
else:
assert status == 200
assert body == b'"hi"'
assert headers.get("Access-Control-Allow-Origin") not in ("*", ORIGIN)
assert "Content-Length" not in headers.get("Access-Control-Expose-Headers", [])
# a origin mismatch test
if fname == "cors_enabled.yml":
ORIGIN2 = "http://bentoml.ai"
status, headers, body = await async_request(
"OPTIONS",
f"http://{host}/echo_json",
headers={
"Content-Type": "application/json",
"Origin": ORIGIN2,
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "Content-Type",
},
)
assert status != 200
status, headers, body = await async_request(
"POST",
f"http://{host}/echo_json",
headers={"Content-Type": "application/json", "Origin": ORIGIN2},
data='"hi"',
)
assert status == 200
assert body == b'"hi"'
assert headers.get("Access-Control-Allow-Origin") not in ("*", ORIGIN2)
def test_service_init_checks():
py_model1 = bentoml.picklable_model.get("py_model.case-1.http.e2e").to_runner(
name="invalid"
)
py_model2 = bentoml.picklable_model.get("py_model.case-1.http.e2e").to_runner(
name="invalid"
)
with pytest.raises(ValueError) as excinfo:
_ = bentoml.Service(name="duplicates_runners", runners=[py_model1, py_model2])
assert "Found duplicate name" in str(excinfo.value)
with pytest.raises(AssertionError) as excinfo:
_ = bentoml.Service(name="invalid_model_type", models=[1])
assert "Service models list can only" in str(excinfo.value)
def test_dunder_string():
runner = bentoml.picklable_model.get("py_model.case-1.http.e2e").to_runner()
svc = bentoml.Service(name="dunder_string", runners=[runner])
assert (
str(svc)
== 'bentoml.Service(name="dunder_string", runners=[py_model.case-1.http.e2e])'
)
@pytest.mark.asyncio
async def test_metrics_type(host: str, deployment_mode: str):
await async_request(
"POST",
f"http://{host}/echo_data_metric",
headers={"Content-Type": "application/json"},
data="input_string",
)
# The reason we have to do this is that there is no way
# to access the metrics inside a running container.
# This will ensure the test will pass
await async_request(
"POST",
f"http://{host}/ensure_metrics_are_registered",
headers={"Content-Type": "application/json"},
data="input_string",
assert_status=200,
)
|
2,157 |
dmatrix from cupy
|
import json
import sys
import numpy as np
import pytest
import xgboost as xgb
sys.path.append("tests/python")
from test_dmatrix import set_base_margin_info
from xgboost import testing as tm
cupy = pytest.importorskip("cupy")
def test_array_interface() -> None:
arr = cupy.array([[1, 2, 3, 4], [1, 2, 3, 4]])
i_arr = arr.__cuda_array_interface__
i_arr = json.loads(json.dumps(i_arr))
ret = xgb.core.from_array_interface(i_arr)
np.testing.assert_equal(cupy.asnumpy(arr), cupy.asnumpy(ret))
def METHOD_NAME(input_type, DMatrixT, missing=np.NAN):
'''Test constructing DMatrix from cupy'''
import cupy as cp
kRows = 80
kCols = 3
np_X = np.random.randn(kRows, kCols).astype(dtype=input_type)
X = cp.array(np_X)
X[5, 0] = missing
X[3, 1] = missing
y = cp.random.randn(kRows).astype(dtype=input_type)
dtrain = DMatrixT(X, missing=missing, label=y)
assert dtrain.num_col() == kCols
assert dtrain.num_row() == kRows
if DMatrixT is xgb.QuantileDMatrix:
# Slice is not supported by QuantileDMatrix
with pytest.raises(xgb.core.XGBoostError):
dtrain.slice(rindex=[0, 1, 2])
dtrain.slice(rindex=[0, 1, 2])
else:
dtrain.slice(rindex=[0, 1, 2])
dtrain.slice(rindex=[0, 1, 2])
return dtrain
def _test_from_cupy(DMatrixT):
'''Test constructing DMatrix from cupy'''
import cupy as cp
METHOD_NAME(np.float16, DMatrixT, np.NAN)
METHOD_NAME(np.float32, DMatrixT, np.NAN)
METHOD_NAME(np.float64, DMatrixT, np.NAN)
METHOD_NAME(np.uint8, DMatrixT, 2)
METHOD_NAME(np.uint32, DMatrixT, 3)
METHOD_NAME(np.uint64, DMatrixT, 4)
METHOD_NAME(np.int8, DMatrixT, 2)
METHOD_NAME(np.int32, DMatrixT, -2)
METHOD_NAME(np.int64, DMatrixT, -3)
with pytest.raises(ValueError):
X = cp.random.randn(2, 2, dtype="float32")
y = cp.random.randn(2, 2, 3, dtype="float32")
DMatrixT(X, label=y)
def _test_cupy_training(DMatrixT):
import cupy as cp
np.random.seed(1)
cp.random.seed(1)
X = cp.random.randn(50, 10, dtype="float32")
y = cp.random.randn(50, dtype="float32")
weights = np.random.random(50) + 1
cupy_weights = cp.array(weights)
base_margin = np.random.random(50)
cupy_base_margin = cp.array(base_margin)
evals_result_cupy = {}
dtrain_cp = DMatrixT(X, y, weight=cupy_weights, base_margin=cupy_base_margin)
params = {'gpu_id': 0, 'nthread': 1, 'tree_method': 'gpu_hist'}
xgb.train(params, dtrain_cp, evals=[(dtrain_cp, "train")],
evals_result=evals_result_cupy)
evals_result_np = {}
dtrain_np = xgb.DMatrix(cp.asnumpy(X), cp.asnumpy(y), weight=weights,
base_margin=base_margin)
xgb.train(params, dtrain_np, evals=[(dtrain_np, "train")],
evals_result=evals_result_np)
assert np.array_equal(evals_result_cupy["train"]["rmse"], evals_result_np["train"]["rmse"])
def _test_cupy_metainfo(DMatrixT):
import cupy as cp
n = 100
X = np.random.random((n, 2))
dmat_cupy = DMatrixT(cp.array(X))
dmat = xgb.DMatrix(X)
floats = np.random.random(n)
uints = np.array([4, 2, 8]).astype("uint32")
cupy_floats = cp.array(floats)
cupy_uints = cp.array(uints)
dmat.set_float_info('weight', floats)
dmat.set_float_info('label', floats)
dmat.set_float_info('base_margin', floats)
dmat.set_uint_info('group', uints)
dmat_cupy.set_info(weight=cupy_floats)
dmat_cupy.set_info(label=cupy_floats)
dmat_cupy.set_info(base_margin=cupy_floats)
dmat_cupy.set_info(group=cupy_uints)
# Test setting info with cupy
assert np.array_equal(dmat.get_float_info('weight'),
dmat_cupy.get_float_info('weight'))
assert np.array_equal(dmat.get_float_info('label'),
dmat_cupy.get_float_info('label'))
assert np.array_equal(dmat.get_float_info('base_margin'),
dmat_cupy.get_float_info('base_margin'))
assert np.array_equal(dmat.get_uint_info('group_ptr'),
dmat_cupy.get_uint_info('group_ptr'))
set_base_margin_info(cp.asarray, DMatrixT, "gpu_hist")
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.skipif(**tm.no_sklearn())
def test_cupy_training_with_sklearn():
import cupy as cp
np.random.seed(1)
cp.random.seed(1)
X = cp.random.randn(50, 10, dtype="float32")
y = (cp.random.randn(50, dtype="float32") > 0).astype("int8")
weights = np.random.random(50) + 1
cupy_weights = cp.array(weights)
base_margin = np.random.random(50)
cupy_base_margin = cp.array(base_margin)
clf = xgb.XGBClassifier(gpu_id=0, tree_method="gpu_hist")
clf.fit(
X,
y,
sample_weight=cupy_weights,
base_margin=cupy_base_margin,
eval_set=[(X, y)],
)
pred = clf.predict(X)
assert np.array_equal(np.unique(pred), np.array([0, 1]))
class TestFromCupy:
'''Tests for constructing DMatrix from data structure conforming Apache
Arrow specification.'''
@pytest.mark.skipif(**tm.no_cupy())
def test_simple_dmat_from_cupy(self):
_test_from_cupy(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_device_dmat_from_cupy(self):
_test_from_cupy(xgb.QuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_device_dmat(self):
_test_cupy_training(xgb.QuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_simple_dmat(self):
_test_cupy_training(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_simple_dmat(self):
_test_cupy_metainfo(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_device_dmat(self):
_test_cupy_metainfo(xgb.QuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_simple_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
xgb.DMatrix(X.toDlpack())
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_categorical(self):
import cupy as cp
n_features = 10
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
X = cp.asarray(X.values.astype(cp.float32))
y = cp.array(y)
feature_types = ['c'] * n_features
assert isinstance(X, cp.ndarray)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_device_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
m = xgb.QuantileDMatrix(X.toDlpack())
with pytest.raises(xgb.core.XGBoostError):
m.slice(rindex=[0, 1, 2])
@pytest.mark.skipif(**tm.no_cupy())
def test_qid(self):
import cupy as cp
rng = cp.random.RandomState(1994)
rows = 100
cols = 10
X, y = rng.randn(rows, cols), rng.randn(rows)
qid = rng.randint(low=0, high=10, size=rows, dtype=np.uint32)
qid = cp.sort(qid)
Xy = xgb.DMatrix(X, y)
Xy.set_info(qid=qid)
group_ptr = Xy.get_uint_info('group_ptr')
assert group_ptr[0] == 0
assert group_ptr[-1] == rows
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_specified_device(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
dtrain = METHOD_NAME(np.float32, xgb.QuantileDMatrix, np.nan)
with pytest.raises(
xgb.core.XGBoostError, match="Invalid device ordinal"
):
xgb.train(
{'tree_method': 'gpu_hist', 'gpu_id': 1}, dtrain, num_boost_round=10
)
|
2,158 |
test empty
|
from typing import List
import pytest
from beet import GenericPipeline, PluginError, PluginImportError
TestPipeline = GenericPipeline[List[str]]
def METHOD_NAME():
pipeline = TestPipeline([])
pipeline.run()
assert pipeline.ctx == []
def test_basic():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
def p2(ctx: List[str]):
ctx.append("p2")
pipeline.run([p1, p2])
assert pipeline.ctx == ["p1", "p2"]
def test_with_yield():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
yield
ctx.append("p1-bis")
def p2(ctx: List[str]):
ctx.append("p2")
yield
ctx.append("p2-bis")
pipeline.run([p1, p2])
assert pipeline.ctx == ["p1", "p2", "p2-bis", "p1-bis"]
def test_with_multiple_yield():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
yield
ctx.append("p1-bis")
yield
ctx.append("p1-bis-bis")
def p2(ctx: List[str]):
ctx.append("p2")
yield
ctx.append("p2-bis")
yield
ctx.append("p2-bis-bis")
pipeline.run([p1, p2])
assert pipeline.ctx == ["p1", "p2", "p2-bis", "p2-bis-bis", "p1-bis", "p1-bis-bis"]
def test_with_multiple_yield_and_nested_require():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
yield
pipeline.require(p3)
ctx.append("p1-bis")
yield
ctx.append("p1-bis-bis")
def p2(ctx: List[str]):
ctx.append("p2")
yield
ctx.append("p2-bis")
yield
ctx.append("p2-bis-bis")
def p3(ctx: List[str]):
ctx.append("p3")
yield
ctx.append("p3-bis")
pipeline.run([p1, p2])
assert pipeline.ctx == [
"p1",
"p2",
"p2-bis",
"p2-bis-bis",
"p3",
"p1-bis",
"p1-bis-bis",
"p3-bis",
]
def test_self_require():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
pipeline.require(p1)
ctx.append("p1")
pipeline.run([p1])
assert pipeline.ctx == ["p1"]
def test_error():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
yield
ctx.append("p1-bis")
def p2(ctx: List[str]):
raise ValueError("nope")
with pytest.raises(PluginError):
pipeline.run([p1, p2])
assert pipeline.ctx == ["p1"]
def test_error_finally():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
try:
yield
finally:
ctx.append("p1-bis")
def p2(ctx: List[str]):
ctx.append("p2")
try:
yield
finally:
ctx.append("p2-bis")
def p3(ctx: List[str]):
raise ValueError("nope")
with pytest.raises(PluginError):
pipeline.run([p1, p2, p3])
assert pipeline.ctx == ["p1", "p2", "p2-bis", "p1-bis"]
def test_error_recover():
pipeline = TestPipeline([])
def p1(ctx: List[str]):
ctx.append("p1")
try:
yield
except PluginError as exc:
ctx.append(str(exc.__cause__))
def p2(ctx: List[str]):
raise ValueError("nope")
pipeline.run([p1, p2])
assert pipeline.ctx == ["p1", "nope"]
def some_plugin(ctx: List[str]):
ctx.append("hello")
def test_import_require():
pipeline = TestPipeline([])
pipeline.run([f"{__name__}.some_plugin"])
assert pipeline.ctx == ["hello"]
def test_import_require_not_found():
pipeline = TestPipeline([])
dotted_path = f"{__name__}.does_not_exist"
with pytest.raises(PluginImportError, match=dotted_path):
pipeline.run([dotted_path])
def test_import_require_whitelist():
pipeline = TestPipeline([], whitelist=["thing"])
dotted_path = f"{__name__}.some_plugin"
with pytest.raises(PluginImportError, match=dotted_path):
pipeline.run([dotted_path])
def test_import_require_whitelist_match():
dotted_path = f"{__name__}.some_plugin"
pipeline = TestPipeline([], whitelist=[dotted_path])
pipeline.run([dotted_path])
assert pipeline.ctx == ["hello"]
|
2,159 |
load
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import shutil
import tempfile
import unittest
from typing import Any
from mock import patch
from pyhocon import ConfigFactory, ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.job.job import DefaultJob
from databuilder.loader.base_loader import Loader
from databuilder.task.task import DefaultTask
from databuilder.transformer.base_transformer import Transformer
LOGGER = logging.getLogger(__name__)
class TestJob(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir_path = tempfile.mkdtemp()
self.dest_file_name = f'{self.temp_dir_path}/superhero.json'
self.conf = ConfigFactory.from_dict({'loader.superhero.dest_file': self.dest_file_name})
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir_path)
def test_job(self) -> None:
with patch("databuilder.job.job.StatsClient") as mock_statsd:
task = DefaultTask(SuperHeroExtractor(),
SuperHeroLoader(),
transformer=SuperHeroReverseNameTransformer())
job = DefaultJob(self.conf, task)
job.launch()
expected_list = ['{"hero": "Super man", "name": "tneK kralC"}',
'{"hero": "Bat man", "name": "enyaW ecurB"}']
with open(self.dest_file_name, 'r') as file:
for expected in expected_list:
actual = file.readline().rstrip('\n')
self.assertEqual(expected, actual)
self.assertFalse(file.readline())
self.assertEqual(mock_statsd.call_count, 0)
class TestJobNoTransform(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir_path = tempfile.mkdtemp()
self.dest_file_name = f'{self.temp_dir_path}/superhero.json'
self.conf = ConfigFactory.from_dict(
{'loader.superhero.dest_file': self.dest_file_name})
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir_path)
def test_job(self) -> None:
task = DefaultTask(SuperHeroExtractor(), SuperHeroLoader())
job = DefaultJob(self.conf, task)
job.launch()
expected_list = ['{"hero": "Super man", "name": "Clark Kent"}',
'{"hero": "Bat man", "name": "Bruce Wayne"}']
with open(self.dest_file_name, 'r') as file:
for expected in expected_list:
actual = file.readline().rstrip('\n')
self.assertEqual(expected, actual)
self.assertFalse(file.readline())
class TestJobStatsd(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir_path = tempfile.mkdtemp()
self.dest_file_name = f'{self.temp_dir_path}/superhero.json'
self.conf = ConfigFactory.from_dict(
{'loader.superhero.dest_file': self.dest_file_name,
'job.is_statsd_enabled': True,
'job.identifier': 'foobar'})
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir_path)
def test_job(self) -> None:
with patch("databuilder.job.job.StatsClient") as mock_statsd:
task = DefaultTask(SuperHeroExtractor(), SuperHeroLoader())
job = DefaultJob(self.conf, task)
job.launch()
expected_list = ['{"hero": "Super man", "name": "Clark Kent"}',
'{"hero": "Bat man", "name": "Bruce Wayne"}']
with open(self.dest_file_name, 'r') as file:
for expected in expected_list:
actual = file.readline().rstrip('\n')
self.assertEqual(expected, actual)
self.assertFalse(file.readline())
self.assertEqual(mock_statsd.return_value.incr.call_count, 1)
class SuperHeroExtractor(Extractor):
def __init__(self) -> None:
pass
def init(self, conf: ConfigTree) -> None:
self.records = [SuperHero(hero='Super man', name='Clark Kent'),
SuperHero(hero='Bat man', name='Bruce Wayne')]
self.iter = iter(self.records)
def extract(self) -> Any:
try:
return next(self.iter)
except StopIteration:
return None
def get_scope(self) -> str:
return 'extractor.superhero'
class SuperHero:
def __init__(self,
hero: str,
name: str) -> None:
self.hero = hero
self.name = name
def __repr__(self) -> str:
return f'SuperHero(hero={self.hero}, name={self.name})'
class SuperHeroReverseNameTransformer(Transformer):
def __init__(self) -> None:
pass
def init(self, conf: ConfigTree) -> None:
pass
def transform(self, record: Any) -> Any:
record.name = record.name[::-1]
return record
def get_scope(self) -> str:
return 'transformer.superhero'
class SuperHeroLoader(Loader):
def init(self, conf: ConfigTree) -> None:
self.conf = conf
dest_file_path = self.conf.get_string('dest_file')
LOGGER.info('Loading to %s', dest_file_path)
self.dest_file_obj = open(self.conf.get_string('dest_file'), 'w')
def METHOD_NAME(self, record: Any) -> None:
rec = json.dumps(record.__dict__, sort_keys=True)
LOGGER.info('Writing record: %s', rec)
self.dest_file_obj.write(f'{rec}\n')
self.dest_file_obj.flush()
def get_scope(self) -> str:
return 'loader.superhero'
if __name__ == '__main__':
unittest.main()
|
2,160 |
aestat rs
|
import numpy as np
from pyNastran.bdf import read_bdf, BDF
def partition_matrix(Maa):
str(Maa)
MLL = MLR = MRR = MRL = np.zeros((3, 3))
return MLL, MLR, MRR, MRL
def pfaero(model: BDF):
"""performs aero calcs that are independent of the structural model"""
str(model)
# ???
AJJ = np.zeros((3, 3))
SKL = np.zeros((3, 3))
SKJ = np.zeros((3, 3))
SKJF = np.zeros((3, 3))
#KAA = np.zeros((3, 3))
DJK = np.zeros((3, 3))
WTFACT = np.zeros((3, 3))
QKJ = np.zeros((3, 3))
WKK = np.zeros((3, 3))
D1JK = np.zeros((3, 3))
D2JK = np.zeros((3, 3))
DJKB = np.zeros((3, 3))
K2JK = np.zeros((3, 3))
DKJB = np.zeros((3, 3))
#q_inf = 1.0
# GI - spline matrix
unused_GI = np.zeros((3, 3))
# AIC matirx
AJJ = np.zeros((3, 3))
AJJT = AJJ.T
AJJi = np.linalg.inv(AJJ)
WSKJ = WKK @ SKL
unused_KKSJ = WSKJ @ AJJi @ DJK
unused_KX = WSKJ @ AJJi
SKJ1 = WTFACT @ SKJF
unused_KDJB = D1JK + 1j * K2JK
# SKJ - integration matrix
unused_QKK = SKJ @ AJJi @ (D1JK + 1j * D2JK)
is_strip_theory_or_mach_box = False
if not is_strip_theory_or_mach_box:
unused_KJ = AJJi @ SKJ1
unused_QKK = QKJ @ DJKB
unused_KK = SKJ1 @ AJJT @ DKJB
return SKJ, D1JK, AJJi
def METHOD_NAME(model: BDF, Kaa, Maa, TR, TRX):
"""static aero"""
SKJ, D1JK, AJJi = pfaero(model)
DJX = np.zeros((3, 3))
KAA = np.zeros((3, 3))
unused_KAAX = np.zeros((3, 3))
KSALL = np.zeros((3, 3))
KALX = np.zeros((3, 3))
KRXA = np.zeros((3, 3))
KLXA = np.zeros((3, 3))
GKA = np.zeros((3, 3))
GTKL = np.zeros((3, 3))
QLL = np.zeros((3, 3))
QKKS = np.zeros((3, 3))
QKX = np.zeros((3, 3))
WKK = np.zeros((3, 3))
SRKT = np.zeros((3, 3))
KALLA = np.zeros((3, 3))
fi_q = np.zeros(3)
u_l = np.zeros(3)
u_x = np.zeros(3)
PL = np.zeros(3)
WGJ = np.zeros(3)
FAJE = np.zeros(3)
q_inf = 1.0
KLL, KLR, unused_KRR, unused_KRL = partition_matrix(Kaa)
MLL, MLR, MRR, MRL = partition_matrix(Maa)
KLLi = np.linalg.inv(KLL)
D = -KLLi @ KLR
mr = D.T @ MLL @ D + MRL @ D + D.T @ MLR + MRR
#ZZX = mr @ TR.T @ TRX
MSLR = MLL @ D + MLR
unused_MSRR = MRL @ D + D.T @ MSLR + MRR
unused_M1RR = D.T @ MRL + MRR
unused_M1RL = D.T @ MLL + MRL
unused_RFSOP = TR.T @ TRX
QAA = GKA.T @ QKKS @ GKA
QAX = GKA.T @ QKX
KAAA = -q_inf * QAA
unused_KAAX = -q_inf * QAX
unused_KSAA1 = KAA + KAAA
# partition SUPORTs using KSAA1, KALX, KAAA
#KLLA, KLRA, KRRA, KRLA = partition_matrix(KAAA)
KALL = KLL - q_inf * QLL
# TRX - boolean matrix that selects accelerations from the
# aerodynamic extra points
# TR - transforms acceleratiosnf from aero ref point to supported
# DOFs form ALX
KSALLi = np.linalg.inv(KSALL)
unused_ALX = KSALLi @ KALX
# WGJ - user input downwash vector
# FAJE - user input pressure coefficient vector
# PSA - external load vector
# PZ - loads for trim calculation
# IPZ - restrained elastic dimensional intercepts
# IPZF2 - Unrestrained elastic dimensional intercepts
# RINT - Rigid, unsplined dimensional intercepts
# INTZ - Rigid, splined dimensional intercepts
# HP0 - Perturbation in the support point deformations relative to mean axes due to external loads
u_k = GTKL.T @ u_l
# total downwash velocity
w_j = D1JK @ u_k + DJX @ u_x + WGJ
# pressure on aero elements
unused_FFAJ = q_inf * AJJi @ w_j + q_inf * FAJE
# statibility
QKX = WKK @ SKJ @ AJJi @ DJX
unused_RSTAB = q_inf * SRKT.T @ QKX
RINT = q_inf * SRKT.T @ (WKK @ SKJ @ AJJi @ w_j + SKJ @ fi_q)
unused_KSAZX = D.T @ KLXA + KRXA
unused_INTZ = GKA.T @ RINT
mri = np.linalg.inv(mr)
KALLi = np.linalg.inv(KALL)
HP = mri @ (D.T @ MLL + MRL) @ KALLA @ ((MLL @ D + MLR) @ TR.T @ TRX + KALX)
HP0 = -mri @ (D.T @ MLL + MRL) @ KALLi @ PL
return HP, HP0
|
2,161 |
empty image
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from art.config import ART_NUMPY_DTYPE
from art.defences.preprocessor import CutoutTensorFlowV2
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.fixture(params=[1, 3], ids=["grayscale", "RGB"])
def image_batch(request, channels_first):
"""
Image fixtures of shape NHWC and NCHW.
"""
channels = request.param
if channels_first:
data_shape = (2, channels, 12, 8)
else:
data_shape = (2, 12, 8, channels)
return (255 * np.ones(data_shape)).astype(ART_NUMPY_DTYPE)
@pytest.fixture(params=[1, 3], ids=["grayscale", "RGB"])
def video_batch(request, channels_first):
"""
Video fixtures of shape NFHWC and NCFHW.
"""
channels = request.param
if channels_first:
data_shape = (2, 2, channels, 12, 8)
else:
data_shape = (2, 2, 12, 8, channels)
return (255 * np.ones(data_shape)).astype(ART_NUMPY_DTYPE)
@pytest.fixture(params=[1, 3], ids=["grayscale", "RGB"])
def METHOD_NAME(request, channels_first):
"""
Empty image fixtures of shape NHWC and NCHW.
"""
channels = request.param
if channels_first:
data_shape = (2, channels, 12, 8)
else:
data_shape = (2, 12, 8, channels)
return np.zeros(data_shape).astype(ART_NUMPY_DTYPE)
@pytest.mark.only_with_platform("tensorflow2")
@pytest.mark.parametrize("length", [2, 4])
@pytest.mark.parametrize("channels_first", [True, False])
def test_cutout_image_data(art_warning, image_batch, length, channels_first):
try:
cutout = CutoutTensorFlowV2(length=length, channels_first=channels_first)
count = np.not_equal(cutout(image_batch)[0], image_batch).sum()
n = image_batch.shape[0]
if channels_first:
channels = image_batch.shape[1]
else:
channels = image_batch.shape[-1]
assert count <= n * channels * length * length
except ARTTestException as e:
art_warning(e)
@pytest.mark.only_with_platform("tensorflow2")
@pytest.mark.parametrize("length", [4])
@pytest.mark.parametrize("channels_first", [True, False])
def test_cutout_video_data(art_warning, video_batch, length, channels_first):
try:
cutout = CutoutTensorFlowV2(length=length, channels_first=channels_first)
count = np.not_equal(cutout(video_batch)[0], video_batch).sum()
n = video_batch.shape[0]
frames = video_batch.shape[1]
if channels_first:
channels = video_batch.shape[2]
else:
channels = video_batch.shape[-1]
assert count <= n * frames * channels * length * length
except ARTTestException as e:
art_warning(e)
@pytest.mark.only_with_platform("tensorflow2")
@pytest.mark.parametrize("length", [4])
@pytest.mark.parametrize("channels_first", [True])
def test_cutout_empty_data(art_warning, METHOD_NAME, length, channels_first):
try:
cutout = CutoutTensorFlowV2(length=length, channels_first=channels_first)
assert_array_equal(cutout(METHOD_NAME)[0], METHOD_NAME)
except ARTTestException as e:
art_warning(e)
@pytest.mark.only_with_platform("tensorflow2")
def test_non_image_data_error(art_warning, tabular_batch):
try:
test_input = tabular_batch
cutout = CutoutTensorFlowV2(length=8, channels_first=True)
exc_msg = "Unrecognized input dimension. Cutout can only be applied to image and video data."
with pytest.raises(ValueError, match=exc_msg):
cutout(test_input)
except ARTTestException as e:
art_warning(e)
@pytest.mark.only_with_platform("tensorflow2")
def test_check_params(art_warning):
try:
with pytest.raises(ValueError):
_ = CutoutTensorFlowV2(length=-1)
with pytest.raises(ValueError):
_ = CutoutTensorFlowV2(length=0)
except ARTTestException as e:
art_warning(e)
|
2,162 |
type description
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from abc import abstractmethod
from typing import (
Optional,
Any,
Dict,
TYPE_CHECKING,
List,
Generic,
TypeVar,
Union,
cast,
)
from .imports import FileImport, ImportType, TypingSection
from .base import BaseType
if TYPE_CHECKING:
from .code_model import CodeModel
class _CredentialPolicyBaseType:
"""Base class for our different credential policy types.
Inherited by our BearerTokenCredentialPolicy and AzureKeyCredentialPolicy types.
"""
def __init__(self, yaml_data: Dict[str, Any], code_model: "CodeModel") -> None:
self.yaml_data = yaml_data
self.code_model = code_model
@abstractmethod
def call(self, async_mode: bool) -> str:
"""
How to call this credential policy. Used to initialize the credential policy in the config file.
"""
class BearerTokenCredentialPolicyType(_CredentialPolicyBaseType):
"""Credential policy type representing BearerTokenCredentialPolicy"""
def __init__(
self,
yaml_data: Dict[str, Any],
code_model: "CodeModel",
credential_scopes: List[str],
) -> None:
super().__init__(yaml_data, code_model)
self.credential_scopes = credential_scopes
def call(self, async_mode: bool) -> str:
policy_name = f"{'Async' if async_mode else ''}BearerTokenCredentialPolicy"
return f"policies.{policy_name}(self.credential, *self.credential_scopes, **kwargs)"
@classmethod
def from_yaml(
cls, yaml_data: Dict[str, Any], code_model: "CodeModel"
) -> "BearerTokenCredentialPolicyType":
return cls(yaml_data, code_model, yaml_data["credentialScopes"])
class ARMChallengeAuthenticationPolicyType(BearerTokenCredentialPolicyType):
"""Credential policy type representing ARMChallengeAuthenticationPolicy"""
def call(self, async_mode: bool) -> str:
policy_name = f"{'Async' if async_mode else ''}ARMChallengeAuthenticationPolicy"
return f"{policy_name}(self.credential, *self.credential_scopes, **kwargs)"
class AzureKeyCredentialPolicyType(_CredentialPolicyBaseType):
def __init__(
self,
yaml_data: Dict[str, Any],
code_model: "CodeModel",
key: str,
scheme: Optional[str] = None,
) -> None:
super().__init__(yaml_data, code_model)
self.key = key
self.scheme = scheme
def call(self, async_mode: bool) -> str:
params = f'"{self.key}", '
if self.scheme:
params += f'prefix="{self.scheme}", '
return f"policies.AzureKeyCredentialPolicy(self.credential, {params}**kwargs)"
@classmethod
def from_yaml(
cls, yaml_data: Dict[str, Any], code_model: "CodeModel"
) -> "AzureKeyCredentialPolicyType":
return cls(
yaml_data, code_model, yaml_data["key"], yaml_data.get("scheme", None)
)
CredentialPolicyType = TypeVar(
"CredentialPolicyType",
bound=Union[
BearerTokenCredentialPolicyType,
ARMChallengeAuthenticationPolicyType,
AzureKeyCredentialPolicyType,
],
)
class CredentialType(
Generic[CredentialPolicyType], BaseType
): # pylint:disable=abstract-method
"""Store info about the type of the credential. Can be either an AzureKeyCredential or a TokenCredential"""
def __init__(
self,
yaml_data: Dict[str, Any],
code_model: "CodeModel",
policy: CredentialPolicyType,
) -> None:
super().__init__(yaml_data, code_model)
self.policy = policy
def description(
self, *, is_operation_file: bool # pylint: disable=unused-argument
) -> str:
return ""
def get_json_template_representation(
self,
*,
optional: bool = True,
client_default_value_declaration: Optional[str] = None,
description: Optional[str] = None,
) -> Any:
raise TypeError(
"You should not try to get a JSON template representation of a CredentialSchema"
)
def docstring_text(self, **kwargs: Any) -> str:
return "credential"
@property
def serialization_type(self) -> str:
return self.docstring_type()
@classmethod
def from_yaml(
cls, yaml_data: Dict[str, Any], code_model: "CodeModel"
) -> "CredentialType":
from . import build_type
return cls(
yaml_data,
code_model,
policy=cast(
CredentialPolicyType, build_type(yaml_data["policy"], code_model)
),
)
class TokenCredentialType(
CredentialType[ # pylint: disable=unsubscriptable-object
Union[BearerTokenCredentialPolicyType, ARMChallengeAuthenticationPolicyType]
]
):
"""Type of a token credential. Used by BearerAuth and ARMChallenge policies"""
def type_annotation(self, **kwargs: Any) -> str:
if kwargs.get("async_mode"):
return '"AsyncTokenCredential"'
return '"TokenCredential"'
@property
def METHOD_NAME(self) -> str:
return "TokenCredential"
def docstring_type(self, **kwargs: Any) -> str:
if kwargs.get("async_mode"):
return "~azure.core.credentials_async.AsyncTokenCredential"
return "~azure.core.credentials.TokenCredential"
def imports(self, **kwargs: Any) -> FileImport:
file_import = FileImport()
if kwargs.get("async_mode"):
file_import.add_submodule_import(
"azure.core.credentials_async",
"AsyncTokenCredential",
ImportType.AZURECORE,
typing_section=TypingSection.TYPING,
)
else:
file_import.add_submodule_import(
"azure.core.credentials",
"TokenCredential",
ImportType.AZURECORE,
typing_section=TypingSection.TYPING,
)
return file_import
@property
def instance_check_template(self) -> str:
return "hasattr({}, 'get_token')"
class AzureKeyCredentialType(
# pylint: disable=unsubscriptable-object
CredentialType[AzureKeyCredentialPolicyType]
):
"""Type for an AzureKeyCredential"""
def docstring_type(self, **kwargs: Any) -> str: # pylint: disable=unused-argument
return "~azure.core.credentials.AzureKeyCredential"
def type_annotation(self, **kwargs: Any) -> str: # pylint: disable=unused-argument
return "AzureKeyCredential"
@property
def instance_check_template(self) -> str:
return "isinstance({}, AzureKeyCredential)"
def imports(self, **kwargs: Any) -> FileImport: # pylint: disable=unused-argument
file_import = FileImport()
file_import.add_submodule_import(
"azure.core.credentials",
"AzureKeyCredential",
ImportType.AZURECORE,
typing_section=TypingSection.CONDITIONAL,
)
return file_import
|
2,163 |
test zoom
|
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QDropEvent
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import QApplication
from tests.QtTestCase import QtTestCase
from urh.controller.dialogs.ModulatorDialog import ModulatorDialog
from urh.util.Logger import logger
class TestModulatorGUI(QtTestCase):
def setUp(self):
super().setUp()
self.form.ui.tabWidget.setCurrentIndex(2)
logger.debug("Preparing Modulation dialog")
self.dialog, _ = self.form.generator_tab_controller.prepare_modulation_dialog()
if self.SHOW:
self.dialog.show()
logger.debug("Initializing Modulation dialog")
self.dialog.initialize("1111")
logger.debug("Preparation success")
def test_add_remove_modulator(self):
self.assertEqual(len(self.dialog.modulators), 1)
self.dialog.ui.btnAddModulation.click()
self.assertEqual(len(self.dialog.modulators), 2)
self.dialog.ui.btnAddModulation.click()
self.assertEqual(len(self.dialog.modulators), 3)
self.app.processEvents()
self.dialog.ui.btnRemoveModulation.click()
self.assertEqual(len(self.dialog.modulators), 2)
self.dialog.ui.btnRemoveModulation.click()
self.assertEqual(len(self.dialog.modulators), 1)
self.assertFalse(self.dialog.ui.btnRemoveModulation.isEnabled())
def test_edit_carrier(self):
self.dialog.ui.doubleSpinBoxCarrierFreq.setValue(1e9)
self.dialog.ui.doubleSpinBoxCarrierFreq.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.carrier_freq_hz, 1e9)
self.dialog.ui.doubleSpinBoxCarrierPhase.setValue(100)
self.dialog.ui.doubleSpinBoxCarrierPhase.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.carrier_phase_deg, 100)
def test_edit_data(self):
bits = self.dialog.current_modulator.display_bits
self.dialog.ui.linEdDataBits.setText("10101010")
self.dialog.ui.linEdDataBits.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.display_bits, "10101010")
assert isinstance(self.dialog, ModulatorDialog)
self.dialog.restore_bits_action.trigger()
self.dialog.ui.linEdDataBits.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.display_bits, bits)
self.dialog.ui.spinBoxSamplesPerSymbol.setValue(1337)
self.dialog.ui.spinBoxSamplesPerSymbol.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.samples_per_symbol, 1337)
self.dialog.ui.spinBoxSampleRate.setValue(5e6)
self.dialog.ui.spinBoxSampleRate.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.sample_rate, 5e6)
def METHOD_NAME(self):
self.dialog.ui.gVModulated.zoom(1.1)
self.assertIn(int(self.dialog.ui.gVModulated.view_rect().width()),
[int(self.dialog.ui.gVCarrier.view_rect().width())-1,
int(self.dialog.ui.gVCarrier.view_rect().width()),
int(self.dialog.ui.gVCarrier.view_rect().width()+1)])
self.assertIn(int(self.dialog.ui.gVModulated.view_rect().width()),
[int(self.dialog.ui.gVData.view_rect().width())-1,
int(self.dialog.ui.gVData.view_rect().width()),
int(self.dialog.ui.gVData.view_rect().width()+1)])
self.dialog.ui.gVModulated.zoom(1.01)
self.assertIn(int(self.dialog.ui.gVModulated.view_rect().width()),
[int(self.dialog.ui.gVCarrier.view_rect().width())-1,
int(self.dialog.ui.gVCarrier.view_rect().width()),
int(self.dialog.ui.gVCarrier.view_rect().width()+1)])
self.assertIn(int(self.dialog.ui.gVModulated.view_rect().width()),
[int(self.dialog.ui.gVData.view_rect().width())-1,
int(self.dialog.ui.gVData.view_rect().width()),
int(self.dialog.ui.gVData.view_rect().width()+1)])
def test_edit_modulation(self):
self.dialog.ui.comboBoxModulationType.setCurrentText("Amplitude Shift Keying (ASK)")
self.assertEqual(self.dialog.ui.labelParameters.text(), "Amplitudes in %:")
self.dialog.ui.comboBoxModulationType.setCurrentText("Frequency Shift Keying (FSK)")
self.assertEqual(self.dialog.ui.labelParameters.text(), "Frequencies in Hz:")
self.dialog.ui.comboBoxModulationType.setCurrentText("Gaussian Frequency Shift Keying (GFSK)")
self.assertEqual(self.dialog.ui.labelParameters.text(), "Frequencies in Hz:")
self.dialog.ui.spinBoxGaussBT.setValue(0.5)
self.dialog.ui.spinBoxGaussBT.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.gauss_bt, 0.5)
self.dialog.ui.spinBoxGaussFilterWidth.setValue(5)
self.dialog.ui.spinBoxGaussFilterWidth.editingFinished.emit()
self.assertEqual(self.dialog.current_modulator.gauss_filter_width, 5)
self.dialog.ui.comboBoxModulationType.setCurrentText("Phase Shift Keying (PSK)")
self.assertEqual(self.dialog.ui.labelParameters.text(), "Phases in degree:")
self.dialog.ui.comboBoxModulationType.setCurrentText("Amplitude Shift Keying (ASK)")
self.assertEqual(self.dialog.ui.labelParameters.text(), "Amplitudes in %:")
self.assertEqual(int(self.dialog.ui.lSamplesInViewModulated.text()),
int(self.dialog.ui.gVModulated.view_rect().width()))
def test_signal_view(self):
self.add_signal_to_form("esaver.complex16s")
signal = self.form.signal_tab_controller.signal_frames[0].signal
tree_view = self.dialog.ui.treeViewSignals
tree_model = tree_view.model()
item = tree_model.rootItem.children[0].children[0]
index = tree_model.createIndex(0, 0, item)
rect = tree_view.visualRect(index)
QTest.mousePress(tree_view.viewport(), Qt.LeftButton, pos=rect.center())
mime_data = tree_model.mimeData([index])
drag_drop = QDropEvent(rect.center(), Qt.CopyAction | Qt.MoveAction, mime_data, Qt.LeftButton, Qt.NoModifier)
drag_drop.acceptProposedAction()
self.dialog.ui.gVOriginalSignal.dropEvent(drag_drop)
self.assertEqual(self.dialog.ui.gVOriginalSignal.sceneRect().width(), signal.num_samples)
self.dialog.ui.cbShowDataBitsOnly.click()
self.dialog.ui.chkBoxLockSIV.click()
self.assertEqual(int(self.dialog.ui.gVOriginalSignal.view_rect().width()),
int(self.dialog.ui.gVModulated.view_rect().width()))
freq = self.dialog.ui.doubleSpinBoxCarrierFreq.value()
self.dialog.ui.btnAutoDetect.click()
self.assertNotEqual(freq, self.dialog.ui.doubleSpinBoxCarrierFreq.value())
self.dialog.ui.comboBoxModulationType.setCurrentText("Frequency Shift Keying (FSK)")
self.dialog.ui.btnAutoDetect.click()
self.assertEqual(self.dialog.ui.lCurrentSearchResult.text(), "1")
self.dialog.ui.btnSearchNext.click()
self.assertEqual(self.dialog.ui.lCurrentSearchResult.text(), "2")
self.dialog.ui.btnSearchPrev.click()
self.assertEqual(self.dialog.ui.lCurrentSearchResult.text(), "1")
|
2,164 |
meshes container
|
"""
forward_meshes_container
========================
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class forward_meshes_container(Operator):
"""Returns the input mesh or meshes container into a meshes container.
Parameters
----------
meshes : MeshesContainer or MeshedRegion
default_label : str, optional
This default label is used if a new meshes
container needs to be created
(default is unknown)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.utility.forward_meshes_container()
>>> # Make input connections
>>> my_meshes = dpf.MeshesContainer()
>>> op.inputs.meshes.connect(my_meshes)
>>> my_default_label = str()
>>> op.inputs.default_label.connect(my_default_label)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.utility.forward_meshes_container(
... meshes=my_meshes,
... default_label=my_default_label,
... )
>>> # Get output data
>>> result_meshes_container = op.outputs.meshes_container()
"""
def __init__(self, meshes=None, default_label=None, config=None, server=None):
super().__init__(name="forward_meshes_container", config=config, server=server)
self._inputs = InputsForwardMeshesContainer(self)
self._outputs = OutputsForwardMeshesContainer(self)
if meshes is not None:
self.inputs.meshes.connect(meshes)
if default_label is not None:
self.inputs.default_label.connect(default_label)
@staticmethod
def _spec():
description = (
"""Returns the input mesh or meshes container into a meshes container."""
)
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="meshes",
type_names=["meshes_container", "abstract_meshed_region"],
optional=False,
document="""""",
),
1: PinSpecification(
name="default_label",
type_names=["string"],
optional=True,
document="""This default label is used if a new meshes
container needs to be created
(default is unknown)""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="meshes_container",
type_names=["meshes_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="forward_meshes_container", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsForwardMeshesContainer
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsForwardMeshesContainer
"""
return super().outputs
class InputsForwardMeshesContainer(_Inputs):
"""Intermediate class used to connect user inputs to
forward_meshes_container operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.forward_meshes_container()
>>> my_meshes = dpf.MeshesContainer()
>>> op.inputs.meshes.connect(my_meshes)
>>> my_default_label = str()
>>> op.inputs.default_label.connect(my_default_label)
"""
def __init__(self, op: Operator):
super().__init__(forward_meshes_container._spec().inputs, op)
self._meshes = Input(forward_meshes_container._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._meshes)
self._default_label = Input(
forward_meshes_container._spec().input_pin(1), 1, op, -1
)
self._inputs.append(self._default_label)
@property
def meshes(self):
"""Allows to connect meshes input to the operator.
Parameters
----------
my_meshes : MeshesContainer or MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.forward_meshes_container()
>>> op.inputs.meshes.connect(my_meshes)
>>> # or
>>> op.inputs.meshes(my_meshes)
"""
return self._meshes
@property
def default_label(self):
"""Allows to connect default_label input to the operator.
This default label is used if a new meshes
container needs to be created
(default is unknown)
Parameters
----------
my_default_label : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.forward_meshes_container()
>>> op.inputs.default_label.connect(my_default_label)
>>> # or
>>> op.inputs.default_label(my_default_label)
"""
return self._default_label
class OutputsForwardMeshesContainer(_Outputs):
"""Intermediate class used to get outputs from
forward_meshes_container operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.forward_meshes_container()
>>> # Connect inputs : op.inputs. ...
>>> result_meshes_container = op.outputs.meshes_container()
"""
def __init__(self, op: Operator):
super().__init__(forward_meshes_container._spec().outputs, op)
self._meshes_container = Output(
forward_meshes_container._spec().output_pin(0), 0, op
)
self._outputs.append(self._meshes_container)
@property
def METHOD_NAME(self):
"""Allows to get meshes_container output of the operator
Returns
----------
my_meshes_container : MeshesContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.forward_meshes_container()
>>> # Connect inputs : op.inputs. ...
>>> result_meshes_container = op.outputs.meshes_container()
""" # noqa: E501
return self._meshes_container
|
2,165 |
difference
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
from types import GenericAlias
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
pop = self._pending_removals.pop
discard = self.data.discard
while True:
try:
item = pop()
except IndexError:
return
discard(item)
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
# Caveat: the iterator will keep a strong reference to
# `item` until it is resumed or closed.
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return self.__class__, (list(self),), self.__getstate__()
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet') from None
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def METHOD_NAME(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = METHOD_NAME
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(map(ref, other))
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(map(ref, other))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(map(ref, other))
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
def __repr__(self):
return repr(self.data)
__class_getitem__ = classmethod(GenericAlias)
|
2,166 |
get description
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Profiler Plugin.
"""
# Standard library imports
from typing import List
# Third party imports
from qtpy.QtCore import Signal
# Local imports
from spyder.api.plugins import Plugins, SpyderDockablePlugin
from spyder.api.plugin_registration.decorators import (
on_plugin_available, on_plugin_teardown)
from spyder.api.translations import _
from spyder.plugins.editor.api.run import FileRun
from spyder.plugins.mainmenu.api import ApplicationMenus, RunMenuSections
from spyder.plugins.profiler.api import ProfilerPyConfiguration
from spyder.plugins.profiler.confpage import ProfilerConfigPage
from spyder.plugins.profiler.widgets.main_widget import (
ProfilerWidget, is_profiler_installed)
from spyder.plugins.profiler.widgets.run_conf import (
ProfilerPyConfigurationGroup)
from spyder.plugins.run.api import (
RunExecutor, run_execute, RunContext, RunConfiguration,
ExtendedRunExecutionParameters, PossibleRunResult)
class Profiler(SpyderDockablePlugin, RunExecutor):
"""
Profiler (after python's profile and pstats).
"""
NAME = 'profiler'
REQUIRES = [Plugins.Preferences, Plugins.Editor, Plugins.Run]
OPTIONAL = []
TABIFY = [Plugins.Help]
WIDGET_CLASS = ProfilerWidget
CONF_SECTION = NAME
CONF_WIDGET_CLASS = ProfilerConfigPage
CONF_FILE = False
# ---- Signals
# -------------------------------------------------------------------------
sig_started = Signal()
"""This signal is emitted to inform the profiling process has started."""
sig_finished = Signal()
"""This signal is emitted to inform the profile profiling has finished."""
# ---- SpyderDockablePlugin API
# -------------------------------------------------------------------------
@staticmethod
def get_name():
return _("Profiler")
@staticmethod
def METHOD_NAME():
return _("Profile Python files to find execution bottlenecks.")
@classmethod
def get_icon(cls):
return cls.create_icon('profiler')
def on_initialize(self):
widget = self.get_widget()
widget.sig_started.connect(self.sig_started)
widget.sig_finished.connect(self.sig_finished)
self.executor_configuration = [
{
'input_extension': ['py', 'ipy'],
'context': {
'name': 'File'
},
'output_formats': [],
'configuration_widget': ProfilerPyConfigurationGroup,
'requires_cwd': True,
'priority': 3
},
]
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
widget = self.get_widget()
editor = self.get_plugin(Plugins.Editor)
widget.sig_edit_goto_requested.connect(editor.load)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.Run)
def on_run_available(self):
run = self.get_plugin(Plugins.Run)
run.register_executor_configuration(self, self.executor_configuration)
if is_profiler_installed():
run.create_run_in_executor_button(
RunContext.File,
self.NAME,
text=_("Run profiler"),
tip=_("Run profiler"),
icon=self.create_icon('profiler'),
shortcut_context='profiler',
register_shortcut=True,
add_to_menu={
"menu": ApplicationMenus.Run,
"section": RunMenuSections.RunInExecutors
}
)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
widget = self.get_widget()
editor = self.get_plugin(Plugins.Editor)
widget.sig_edit_goto_requested.disconnect(editor.load)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Run)
def on_run_teardown(self):
run = self.get_plugin(Plugins.Run)
run.deregister_executor_configuration(
self, self.executor_configuration)
run.destroy_run_in_executor_button(
RunContext.File, self.NAME)
# ---- Public API
# -------------------------------------------------------------------------
def run_profiler(self):
"""
Run profiler.
Notes
-----
This method will check if the file on the editor can be saved first.
"""
editor = self.get_plugin(Plugins.Editor)
if editor.save():
self.switch_to_plugin()
self.analyze(editor.get_current_filename())
def stop_profiler(self):
"""
Stop profiler.
"""
self.get_widget().stop()
@run_execute(context=RunContext.File)
def run_file(
self,
input: RunConfiguration,
conf: ExtendedRunExecutionParameters
) -> List[PossibleRunResult]:
self.switch_to_plugin()
exec_params = conf['params']
cwd_opts = exec_params['working_dir']
params: ProfilerPyConfiguration = exec_params['executor_params']
run_input: FileRun = input['run_input']
filename = run_input['path']
wdir = cwd_opts['path']
args = params['args']
self.get_widget().analyze(
filename,
wdir=wdir,
args=args
)
|
2,167 |
qnn subtract driver
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def METHOD_NAME(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
# all x, y and golden outputs should be of the same length
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
METHOD_NAME(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
METHOD_NAME(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
# Same params
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
METHOD_NAME(x_data, y_data, golden_output, scale_and_zp)
# Same params, different scale
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
METHOD_NAME(x_data, y_data, golden_output, scale_and_zp)
# All params different
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
METHOD_NAME(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
|
2,168 |
register
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
"""
Tracking spatial indexes
"""
import logging
from threading import Lock
from typing import Mapping, Optional, Type, Union
from sqlalchemy import ForeignKey, select
from sqlalchemy.dialects import postgresql as postgres
from geoalchemy2 import Geometry
from sqlalchemy.engine import Connectable
from sqlalchemy import Column
from sqlalchemy.orm import Session
from datacube.utils.geometry import CRS, Geometry as Geom, multipolygon, polygon
from ._core import METADATA
from .sql import SCHEMA_NAME
from ._schema import orm_registry, Dataset, SpatialIndex, SpatialIndexRecord
_LOG = logging.getLogger(__name__)
# In theory we could just use the SQLAlchemy registry for this, but it is not indexed
# in a useful way.
class SpatialIndexORMRegistry:
"""Threadsafe global registry of SpatialIndex ORM classes, indexed by EPSG/SRID code."""
_registry: Mapping[int, Type[SpatialIndex]] = {}
_lock = Lock()
def __init__(self):
self._registry = self.__class__._registry
self._lock = self.__class__._lock
def _to_epsg(self, epsg_or_crs: Union[CRS, int]) -> int:
"""Utility method to convert a epsg_or_crs to an epsg."""
if isinstance(epsg_or_crs, CRS):
return epsg_or_crs.epsg
else:
return epsg_or_crs
def METHOD_NAME(self, epsg_or_crs: Union[CRS, int]) -> bool:
"""Ensure that SpatialIndex ORM clss is registered for this EPSG/SRID"""
epsg = self._to_epsg(epsg_or_crs)
added = False
with self._lock:
if epsg not in self._registry:
self._registry[epsg] = self._mint_new_spindex(epsg)
added = True
return added
def get(self, epsg_or_crs: Union[CRS, int]) -> Optional[Type[SpatialIndex]]:
"""Retrieve the registered SpatialIndex ORM class"""
epsg = self._to_epsg(epsg_or_crs)
return self._registry.get(epsg)
def _mint_new_spindex(self, epsg: int):
"""
Dynamically create a new ORM class for a EPSG/SRID.
Note: Called within registry lock.
"""
table_name = f"spatial_{epsg}"
attributes = {
'__tablename__': table_name,
'__table_args__': (
METADATA,
{
"schema": SCHEMA_NAME,
"comment": "A product or dataset type, family of related datasets."
}
),
"dataset_ref": Column(postgres.UUID(as_uuid=True), ForeignKey(Dataset.id),
primary_key=True,
nullable=False,
comment="The dataset being indexed")
}
# Add geometry column
attributes["extent"] = Column(Geometry('MULTIPOLYGON', srid=epsg),
nullable=False,
comment="The extent of the dataset")
return orm_registry.mapped(type(f'SpatialIdx{epsg}', (SpatialIndex,), attributes))
def spindex_for_epsg(epsg: int) -> Type[SpatialIndex]:
"""Return ORM class of a SpatialIndex for EPSG/SRID - dynamically creating if necessary"""
sir = SpatialIndexORMRegistry()
spindex = sir.get(epsg)
if spindex is None:
sir.METHOD_NAME(epsg)
spindex = sir.get(epsg)
return spindex
def spindex_for_crs(crs: CRS) -> Type[SpatialIndex]:
"""Return ORM class of a SpatialIndex for CRS - dynamically creating if necessary"""
if not str(crs).startswith("EPSG:") and crs.epsg is None:
# Postgis identifies CRSs by a numeric "SRID" which is equivalent to EPSG number.
_LOG.error("Cannot create a postgis spatial index for a non-EPSG-style CRS.")
return None
return spindex_for_epsg(crs.epsg)
def spindex_for_record(rec: SpatialIndexRecord) -> Type[SpatialIndex]:
"""Convert a Record of a SpatialIndex created in a particular database to an ORM class"""
return spindex_for_crs(rec.crs)
def ensure_spindex(engine: Connectable, sp_idx: Type[SpatialIndex]) -> None:
"""Ensure a Spatial Index exists in a particular database."""
with Session(engine) as session:
results = session.execute(
select(SpatialIndexRecord.srid).where(SpatialIndexRecord.srid == sp_idx.__tablename__[8:])
)
for result in results:
# SpatialIndexRecord exists - actual index assumed to exist too.
return
# SpatialIndexRecord doesn't exist - create the index table...
orm_registry.metadata.create_all(engine, [sp_idx.__table__])
# ... and add a SpatialIndexRecord
session.add(SpatialIndexRecord.from_spindex(sp_idx))
session.commit()
session.flush()
return
def spindexes(engine: Connectable) -> Mapping[CRS, Type[SpatialIndex]]:
"""
Return a CRS-to-Spatial Index ORM class mapping for indexes that exist in a particular database.
"""
out = {}
with Session(engine) as session:
results = session.execute(select(SpatialIndexRecord.srid))
for result in results:
epsg = int(result[0])
spindex = spindex_for_epsg(epsg)
crs = CRS(f'EPSG:{epsg}')
out[crs] = spindex
return out
def promote_to_multipolygon(geom: Geom) -> Geom:
# Assumes input is a polygon or multipolygon - does not work on lines or points
if geom.geom_type == "Multipolygon":
return geom
elif geom.geom_type == "Polygon":
# Promote to multipolygon (is there a more elegant way to do this??
polycoords = [list(geom.geom.exterior.coords)]
for interior in geom.geom.interiors:
polycoords.append(list(interior.coords))
geom = multipolygon([polycoords], crs=geom.crs)
return geom
else:
raise ValueError(f"Cannot promote geometry type {geom.geom_type} to multi-polygon")
def geom_alchemy(geom: Geom) -> str:
geom = promote_to_multipolygon(geom)
return f"SRID={geom.crs.epsg};{geom.wkt}"
def sanitise_extent(extent, crs, geo_extent=None):
if not crs.valid_region:
# No valid region on CRS, just reproject
return extent.to_crs(crs)
if geo_extent is None:
geo_extent = extent.to_crs(CRS("EPSG:4326"))
if crs.epsg == 4326:
# geo_extent is what we want anyway - shortcut
return geo_extent
if crs.valid_region.contains(geo_extent):
# Valid region contains extent, just reproject
return extent.to_crs(crs)
if not crs.valid_region.intersects(geo_extent):
# Extent is entirely outside of valid region - return None
return None
# Clip to valid region and reproject
valid_extent = geo_extent & crs.valid_region
if valid_extent.wkt == "POLYGON EMPTY":
# Extent is entirely outside of valid region - return None
return None
return valid_extent.to_crs(crs)
def generate_dataset_spatial_values(dataset_id, crs, extent, geo_extent=None):
extent = sanitise_extent(extent, crs, geo_extent=geo_extent)
if extent is None:
return None
geom_alch = geom_alchemy(extent)
return {"dataset_ref": dataset_id, "extent": geom_alch}
def extract_geometry_from_eo3_projection(eo3_gs_doc):
native_crs = CRS(eo3_gs_doc["spatial_reference"])
valid_data = eo3_gs_doc.get("valid_data")
if valid_data:
return Geom(valid_data, crs=native_crs)
else:
geo_ref_points = eo3_gs_doc.get('geo_ref_points')
if geo_ref_points:
return polygon(
[(geo_ref_points[key]['x'], geo_ref_points[key]['y']) for key in ('ll', 'ul', 'ur', 'lr', 'll')],
crs=native_crs
)
else:
return None
|
2,169 |
test roundtripable types
|
from __future__ import annotations
import hypothesis as h
import hypothesis.strategies as st
import pyarrow as pa
import pyarrow.tests.strategies as past
import pytest
import ibis.expr.datatypes as dt
from ibis.common.exceptions import IntegrityError
from ibis.formats.pyarrow import PyArrowSchema, PyArrowType
def assert_dtype_roundtrip(arrow_type, ibis_type=None, restored_type=None):
dtype = PyArrowType.to_ibis(arrow_type, nullable=False)
if ibis_type is not None:
assert dtype == ibis_type
patyp = PyArrowType.from_ibis(dtype)
if restored_type is None:
restored_type = arrow_type
assert patyp == restored_type
roundtripable_types = st.deferred(
lambda: (
past.null_type
| past.bool_type
| past.integer_types
| past.floating_types
| past.duration_types
| past.string_type
| past.binary_type
| past.timestamp_types
| st.builds(pa.list_, roundtripable_types)
| past.struct_types(roundtripable_types)
| past.map_types(roundtripable_types, roundtripable_types)
)
)
@h.given(roundtripable_types)
def METHOD_NAME(arrow_type):
assert_dtype_roundtrip(arrow_type)
@pytest.mark.parametrize(
("arrow_type", "ibis_type", "restored_type"),
[
(pa.decimal128(1, 1), dt.Decimal(1, 1, nullable=False), pa.decimal128(1, 1)),
(pa.decimal128(10, 3), dt.Decimal(10, 3, nullable=False), pa.decimal128(10, 3)),
(pa.decimal128(38, 3), dt.Decimal(38, 3, nullable=False), pa.decimal128(38, 3)),
(pa.decimal256(1, 1), dt.Decimal(1, 1, nullable=False), pa.decimal128(1, 1)),
(pa.decimal256(38, 5), dt.Decimal(38, 5, nullable=False), pa.decimal128(38, 5)),
(pa.decimal256(39, 6), dt.Decimal(39, 6, nullable=False), pa.decimal256(39, 6)),
(pa.decimal256(76, 6), dt.Decimal(76, 6, nullable=False), pa.decimal256(76, 6)),
(pa.date32(), dt.Date(nullable=False), pa.date64()),
(pa.date64(), dt.Date(nullable=False), pa.date64()),
(pa.time32("s"), dt.Time(nullable=False), pa.time64("ns")),
(pa.time32("ms"), dt.Time(nullable=False), pa.time64("ns")),
(pa.time64("us"), dt.Time(nullable=False), pa.time64("ns")),
(pa.time64("ns"), dt.Time(nullable=False), pa.time64("ns")),
(pa.large_binary(), dt.Binary(nullable=False), pa.binary()),
(pa.large_string(), dt.String(nullable=False), pa.string()),
(
pa.large_list(pa.int64()),
dt.Array(dt.Int64(nullable=True), nullable=False),
pa.list_(pa.int64()),
),
(
pa.list_(pa.int64(), list_size=3),
dt.Array(dt.Int64(nullable=True), nullable=False),
pa.list_(pa.int64()),
),
],
)
def test_non_roundtripable_types(arrow_type, ibis_type, restored_type):
assert_dtype_roundtrip(arrow_type, ibis_type, restored_type)
@pytest.mark.parametrize("timezone", [None, "UTC"])
@pytest.mark.parametrize("nullable", [True, False])
def test_timestamp_no_scale(timezone, nullable):
dtype = dt.Timestamp(scale=None, timezone=timezone, nullable=nullable)
assert dtype.to_pyarrow() == pa.timestamp("us", tz=timezone)
def test_month_day_nano_type_unsupported():
with pytest.raises(ValueError, match="Arrow interval type is not supported"):
PyArrowType.to_ibis(pa.month_day_nano_interval())
@pytest.mark.parametrize("value_nullable", [True, False])
def test_dtype_from_nullable_map_type(value_nullable):
# the key field cannot be nullable
pyarrow_type = pa.map_(
pa.int64(), pa.field("value", pa.int64(), nullable=value_nullable)
)
ibis_type = PyArrowType.to_ibis(pyarrow_type)
restored_type = PyArrowType.from_ibis(ibis_type)
assert ibis_type == dt.Map(
dt.Int64(nullable=False), dt.Int64(nullable=value_nullable)
)
assert restored_type.key_field.type == pa.int64()
assert restored_type.key_field.nullable is False
assert restored_type.item_field.type == pa.int64()
assert restored_type.item_field.nullable is value_nullable
@pytest.mark.parametrize("value_nullable", [True, False])
@pytest.mark.parametrize("list_nullable", [True, False])
def test_dtype_from_nullable_list_type(value_nullable, list_nullable):
pyarrow_type = pa.list_(pa.field("value", pa.int64(), nullable=value_nullable))
ibis_type = PyArrowType.to_ibis(pyarrow_type, nullable=list_nullable)
restored_type = PyArrowType.from_ibis(ibis_type)
assert ibis_type == dt.Array(
dt.Int64(nullable=value_nullable), nullable=list_nullable
)
assert restored_type.value_field.type == pa.int64()
assert restored_type.value_field.nullable is value_nullable
@pytest.mark.parametrize(
("ibis_type", "arrow_type"),
[
(dt.Set(dt.String(nullable=True)), pa.list_(pa.string())),
(
dt.Set(dt.String(nullable=False)),
pa.list_(pa.field("item", pa.string(), nullable=False)),
),
],
)
def test_ibis_exclusive_types(ibis_type, arrow_type):
assert PyArrowType.from_ibis(ibis_type) == arrow_type
def test_schema_from_pyarrow_checks_duplicate_column_names():
arrow_schema = pa.schema(
[
pa.field("a", pa.int64()),
pa.field("a", pa.int64()),
]
)
with pytest.raises(IntegrityError, match="Duplicate column name"):
PyArrowSchema.to_ibis(arrow_schema)
@h.given(past.schemas(roundtripable_types))
def test_schema_roundtrip(pyarrow_schema):
unique_column_names = set(pyarrow_schema.names)
h.assume(len(unique_column_names) == len(pyarrow_schema.names))
ibis_schema = PyArrowSchema.to_ibis(pyarrow_schema)
restored = PyArrowSchema.from_ibis(ibis_schema)
assert pyarrow_schema.equals(restored)
def test_unknown_dtype_gets_converted_to_string():
assert PyArrowType.from_ibis(dt.unknown) == pa.string()
|
2,170 |
test argmax memleak
|
# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <[email protected]>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import os
import numpy as np
import sys
class TestMiscIssue(unittest.TestCase):
def test_issue4(self):
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
return
# import with pytorch cause segfault
src = """N = 100
import jittor as jt
a = jt.random([N, N])
b = a.broadcast([N,N,N], dims=[0]) * a.broadcast([N,N,N], dims=[2])
b = b.sum(1)
b.sync()
import torch
A = torch.rand(N, N)
torch.matmul(A, A)
"""
assert os.system(f"{sys.executable} -c '{src}'")==0
src = """N = 100
import torch
A = torch.rand(N, N)
torch.matmul(A, A)
import jittor as jt
a = jt.random([N, N])
b = a.broadcast([N,N,N], dims=[0]) * a.broadcast([N,N,N], dims=[2])
b = b.sum(1)
b.sync()
"""
assert os.system(f"{sys.executable} -c '{src}'")==0
def test_mkl_conflict1(self):
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
return
if jt.mkl_ops is None:
return
# import with pytorch cause segfault
src = """
nchw = [2, 3, 100, 100]
oihw = [4, 3, 5, 5]
import jittor as jt
x = jt.random(nchw)
w = jt.random(oihw)
jt.mkl_ops.mkl_conv(x, w, 1, 1, 2, 2).sync()
jt.dirty_fix_pytorch_runtime_error()
import torch
m = torch.nn.Conv2d(3, 4, 5, 1, 2)
m(torch.rand(*nchw))
"""
assert os.system(f"{sys.executable} -c '{src}'")==0
def test_mkl_conflict2(self):
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
return
if jt.mkl_ops is None:
return
# import with pytorch cause segfault
src = """
nchw = [2, 3, 100, 100]
oihw = [4, 3, 5, 5]
import torch
m = torch.nn.Conv2d(3, 4, 5, 1, 2)
m(torch.rand(*nchw))
import jittor as jt
x = jt.random(nchw)
w = jt.random(oihw)
jt.mkl_ops.mkl_conv(x, w, 1, 1, 2, 2).sync()
"""
assert os.system(f"{sys.executable} -c '{src}'")==0
def test_cuda_lowsm(self):
if not jt.has_cuda: return
src = """
import jittor
from jittor.nn import matmul_transpose
a = jittor.ones((3,4,2), dtype="float32")
b = jittor.ones((5, 2), dtype="float32")
print(matmul_transpose(a, b))
jittor.flags.use_cuda = 1
a = jittor.ones((3,4,2), dtype="float32")
b = jittor.ones((5, 2), dtype="float32")
print(matmul_transpose(a, b))
"""
assert os.system(f"cuda_archs=52 {sys.executable} -c '{src}'")==0
def test_parallel(self):
a = jt.code([4], "int", cpu_src="""
#pragma omp parallel num_threads(4)
@out(omp_get_thread_num()) = 456;
""", cpu_header='#include <omp.h>').data
assert (a==[456]*4).all(), a
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_reduce_opt(self):
a = jt.random((16,512,38,38))
b = jt.random((16,512,38,38))
jt.sync([a, b])
with jt.profile_scope(rerun=10, warmup=10) as rep:
norm = a.sqr().sum(1, keepdims=True).sqrt()
c = a / norm
da = jt.grad(c*b, a)
jt.sync([c, da])
gpu_c = c.numpy()
gpu_da = da.numpy()
with jt.flag_scope(use_cuda=0):
norm = a.sqr().sum(1, keepdims=True).sqrt()
c = a / norm
da = jt.grad(c*b, a)
assert np.allclose(gpu_c, c.data, 1e-3)
assert (np.abs(gpu_da-da.data).max() < 1e-6)
assert float(rep[1][3]) < 15e6, float(rep[1][3]) # 15ms(about 8ms)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_cuda_min_max(self):
a = jt.random((10,)) - 2
assert a.min().data == a.data.min(), (a.min(), a.data.min())
assert a.max().data == a.data.max(), (a.max(), a.data.max())
a = jt.random((10,)) + 2
assert a.min().data == a.data.min(), (a.min(), a.data.min())
assert a.max().data == a.data.max(), (a.max(), a.data.max())
a = jt.random((10,)).float64() - 2
assert a.min().data == a.data.min(), (a.min(), a.data.min())
assert a.max().data == a.data.max(), (a.max(), a.data.max())
a = jt.random((10,)).float64() + 2
assert a.min().data == a.data.min(), (a.min(), a.data.min())
assert a.max().data == a.data.max(), (a.max(), a.data.max())
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_cuda_pow_grad_nan(self):
a = jt.float32([1,-1, -1000.1])
da = jt.grad(a**2, a)
assert np.isnan(da.data).sum()==0, da.data
def test_tanh_nan(self):
m=jt.nn.Tanh()
a = m(jt.array([1000]))
assert np.isnan(a.data).sum()==0, a
def test_sigmoid_nan(self):
a = jt.float32([1,-1, -1000.1])
da = jt.grad(a.sigmoid(), a)
assert np.isnan(da.data).sum()==0, da.data
def test_sequential(self):
x = jt.nn.Sequential(lambda x:x, lambda x:x)
n = 0
for a in x:
n += 1
assert n == 2
assert list(x.keys()) == [0,1]
p = x.parameters()
assert len(p)==0
def test_self_update(self):
from jittor.models import resnet18
m = resnet18()
x = m.state_dict()
m.load_state_dict(x)
def test_res2net(self):
import jittor.models
net = jittor.models.res2net50(True)
img = jt.random((2,3,224,224))
out = net(img)
print(out.shape, out.sum())
jt.display_memory_info()
jt.display_memory_info()
assert out.shape == [2,1000]
def METHOD_NAME(self):
a = jt.random([10])
_, m = jt.argmax(a, 0)
del _
m.sync()
g = jt.grad(m*10, a)
g.sync()
del a, g, m
jt.display_memory_info()
assert jt.liveness_info()["lived_ops"] == 0
if __name__ == "__main__":
unittest.main(
|
2,171 |
test accessor returns a node
|
import os
import sys
import unittest
import listener.psapi
import listener.nodes
class EmptyNode(object):
def __init__(self, name):
self.name = name
class TestParentNode(unittest.TestCase):
def setUp(self):
listener.server.__INTERNAL__ = True
self.n = listener.nodes.ParentNode('testing')
def test_init(self):
test_nodes = [EmptyNode(x) for x in ('bingo', 'bongo')]
p = listener.nodes.ParentNode('parent_testing', test_nodes)
for node in test_nodes:
self.assertTrue(node.name in p.children)
self.assertEquals(node, p.children[node.name])
def test_add_child(self):
self.assertEqual(self.n.children, {})
new_node_name = 'testing'
new_node = EmptyNode(new_node_name)
self.n.add_child(new_node)
self.assertIn(new_node_name, self.n.children)
def METHOD_NAME(self):
test_node = listener.nodes.ParentNode('testing')
self.n.add_child(test_node)
self.assertIsInstance(self.n.accessor(['testing'], None, None, None), listener.nodes.ParentNode)
self.assertIsInstance(self.n.accessor(['nonexistent'], None, None, None), listener.nodes.DoesNotExistNode)
def test_accessor_returns_a_copy(self):
test_node = listener.nodes.ParentNode('testing')
self.n.add_child(test_node)
self.assertIsNot(test_node, self.n.accessor(['testing'], None, None, None))
def test_walk_returns_dict(self):
self.assertIsInstance(self.n.walk(), dict)
def test_run_check_returns_dict(self):
self.assertIsInstance(self.n.run_check(), dict)
def test_run_check_returns_valid_result(self):
result = self.n.run_check()
self.assertIn('stdout', result)
self.assertIn('returncode', result)
class TestRunnableNode(unittest.TestCase):
def setUp(self):
self.node_name = 'testing'
self.n = listener.nodes.RunnableNode(self.node_name, lambda: ('Ok', 1))
def test_accessor_returns_copy(self):
self.assertIsNot(self.n, self.n.accessor([], None, None, None))
def test_walk_returns_dict(self):
self.assertIsInstance(self.n.walk(), dict)
def test_walk_returns_valid_response(self):
response = self.n.walk()
self.assertIn(self.node_name, response)
self.assertEqual(len(response[self.node_name]), 2)
def test_walk_passes_units(self):
response = self.n.walk(unit='t')
self.assertEqual(response[self.node_name][1], 't')
def test_set_unit(self):
self.n.set_unit('b', {})
self.assertEqual(self.n.unit, 'b')
def test_set_unit_with_kwargs(self):
self.n.set_unit('b', {'unit': 'k'})
self.assertEqual(self.n.unit, 'k')
def test_get_adjusted_scale(self):
values = self.n.get_adjusted_scale([0], {})
self.assertEqual(values, [0])
def test_get_adjusted_scale_with_unit(self):
self.n.adjust_scale = lambda x, y: ([z+1 for z in x], 'b')
values = self.n.get_adjusted_scale([0], {'units': 'k'})
self.assertEqual(values, [0])
self.assertEqual(self.n.unit, '')
def test_set_warning(self):
self.n.set_warning({'warning': [0]})
self.assertEqual([0], self.n.warning)
self.n.set_warning({})
self.assertEqual('', self.n.warning)
def test_set_critical(self):
self.n.set_critical({'critical': [0]})
self.assertEqual([0], self.n.critical)
self.n.set_critical({})
self.assertEqual('', self.n.critical)
def test_set_title(self):
self.n.set_title({})
self.assertEqual(self.n.title, self.node_name)
self.n.set_title({'title': ['title']})
self.assertEqual(self.n.title, 'title')
def test_set_perfdata_label(self):
self.n.set_perfdata_label({'perfdata_label': [0]})
self.assertEqual(0, self.n.perfdata_label)
self.n.set_perfdata_label({})
self.assertEqual(None, self.n.perfdata_label)
def test_run_check(self):
result = self.n.run_check()
self.assertIsInstance(result, dict)
if __name__ == '__main__':
unittest.main(
|
2,172 |
inner product
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods related to kernelized layers."""
import tensorflow.compat.v2 as tf
def _to_matrix(u):
"""If input tensor is a vector (i.e., has rank 1), converts it to matrix."""
u_rank = len(u.shape)
if u_rank not in [1, 2]:
raise ValueError(
f"The input tensor should have rank 1 or 2. Received rank: {u_rank}"
)
if u_rank == 1:
return tf.expand_dims(u, 0)
return u
def _align_matrices(x, y):
"""Aligns x and y tensors to allow computations over pairs of their rows."""
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
"The outermost dimensions of the input tensors should match. "
f"Received y = {y_shape[1]} vs x = {x_shape[1]}."
)
x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
def METHOD_NAME(u, v):
u = _to_matrix(u)
v = _to_matrix(v)
return tf.matmul(u, v, transpose_b=True)
def exact_gaussian_kernel(x, y, stddev):
r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev.
The Gaussian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v||^2 / (2* stddev^2))
where the norm is the l2-norm. x, y can be either vectors or matrices. If
they are vectors, they must have the same dimension. If they are matrices,
they must have the same number of columns. In the latter case, the method
returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row
from x and v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) (if x, y are vectors) or a
matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian
kernel) for all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_squared_l2_norm = tf.reduce_sum(
tf.math.squared_difference(x_aligned, y_aligned), 2
)
return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
def exact_laplacian_kernel(x, y, stddev):
r"""Computes exact Laplacian kernel value(s) for tensors x & y using stddev.
The Laplacian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v|| / stddev)
where the norm is the l1-norm. x, y can be either vectors or matrices. If
they are vectors, they must have the same dimension. If they are matrices,
they must have the same number of columns. In the latter case, the method
returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row
from x and v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix
of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
return tf.exp(-diff_l1_norm / stddev)
|
2,173 |
get next
|
#!/usr/bin/env python
#
# Urwid example lazy text editor suitable for tabbed and format=flowed text
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: https://urwid.org/
"""
Urwid example lazy text editor suitable for tabbed and flowing text
Features:
- custom list walker for lazily loading text file
Usage:
edit.py <filename>
"""
from __future__ import annotations
import sys
import urwid
class LineWalker(urwid.ListWalker):
"""ListWalker-compatible class for lazily reading file contents."""
def __init__(self, name):
self.file = open(name)
self.lines = []
self.focus = 0
def get_focus(self):
return self._get_at_pos(self.focus)
def set_focus(self, focus):
self.focus = focus
self._modified()
def METHOD_NAME(self, start_from):
return self._get_at_pos(start_from + 1)
def get_prev(self, start_from):
return self._get_at_pos(start_from - 1)
def read_next_line(self):
"""Read another line from the file."""
next_line = self.file.readline()
if not next_line or next_line[-1:] != '\n':
# no newline on last line of file
self.file = None
else:
# trim newline characters
next_line = next_line[:-1]
expanded = next_line.expandtabs()
edit = urwid.Edit("", expanded, allow_tab=True)
edit.set_edit_pos(0)
edit.original_text = next_line
self.lines.append(edit)
return next_line
def _get_at_pos(self, pos):
"""Return a widget for the line number passed."""
if pos < 0:
# line 0 is the start of the file, no more above
return None, None
if len(self.lines) > pos:
# we have that line so return it
return self.lines[pos], pos
if self.file is None:
# file is closed, so there are no more lines
return None, None
assert pos == len(self.lines), "out of order request?"
self.read_next_line()
return self.lines[-1], pos
def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("",focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus+1, edit)
def combine_focus_with_prev(self):
"""Combine the focus edit widget with the one above."""
above, ignore = self.get_prev(self.focus)
if above is None:
# already at the top
return
focus = self.lines[self.focus]
above.set_edit_pos(len(above.edit_text))
above.set_edit_text(above.edit_text + focus.edit_text)
del self.lines[self.focus]
self.focus -= 1
def combine_focus_with_next(self):
"""Combine the focus edit widget with the one below."""
below, ignore = self.METHOD_NAME(self.focus)
if below is None:
# already at bottom
return
focus = self.lines[self.focus]
focus.set_edit_text(focus.edit_text + below.edit_text)
del self.lines[self.focus+1]
class EditDisplay:
palette = [
('body','default', 'default'),
('foot','dark cyan', 'dark blue', 'bold'),
('key','light cyan', 'dark blue', 'underline'),
]
footer_text = ('foot', [
"Text Editor ",
('key', "F5"), " save ",
('key', "F8"), " quit",
])
def __init__(self, name):
self.save_name = name
self.walker = LineWalker(name)
self.listbox = urwid.ListBox(self.walker)
self.footer = urwid.AttrWrap(urwid.Text(self.footer_text),
"foot")
self.view = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'),
footer=self.footer)
def main(self):
self.loop = urwid.MainLoop(self.view, self.palette,
unhandled_input=self.unhandled_keypress)
self.loop.run()
def unhandled_keypress(self, k):
"""Last resort for keypresses."""
if k == "f5":
self.save_file()
elif k == "f8":
raise urwid.ExitMainLoop()
elif k == "delete":
# delete at end of line
self.walker.combine_focus_with_next()
elif k == "backspace":
# backspace at beginning of line
self.walker.combine_focus_with_prev()
elif k == "enter":
# start new line
self.walker.split_focus()
# move the cursor to the new line and reset pref_col
self.loop.process_input(["down", "home"])
elif k == "right":
w, pos = self.walker.get_focus()
w, pos = self.walker.METHOD_NAME(pos)
if w:
self.listbox.set_focus(pos, 'above')
self.loop.process_input(["home"])
elif k == "left":
w, pos = self.walker.get_focus()
w, pos = self.walker.get_prev(pos)
if w:
self.listbox.set_focus(pos, 'below')
self.loop.process_input(["end"])
else:
return
return True
def save_file(self):
"""Write the file out to disk."""
l = []
walk = self.walker
for edit in walk.lines:
# collect the text already stored in edit widgets
if edit.original_text.expandtabs() == edit.edit_text:
l.append(edit.original_text)
else:
l.append(re_tab(edit.edit_text))
# then the rest
while walk.file is not None:
l.append(walk.read_next_line())
# write back to disk
outfile = open(self.save_name, "w")
prefix = ""
for line in l:
outfile.write(prefix + line)
prefix = "\n"
def re_tab(s):
"""Return a tabbed string from an expanded one."""
l = []
p = 0
for i in range(8, len(s), 8):
if s[i-2:i] == " ":
# collapse two or more spaces into a tab
l.append(f"{s[p:i].rstrip()}\t")
p = i
if p == 0:
return s
else:
l.append(s[p:])
return "".join(l)
def main():
try:
name = sys.argv[1]
assert open(name, "a")
except:
sys.stderr.write(__doc__)
return
EditDisplay(name).main()
if __name__=="__main__":
main()
|
2,174 |
set cb
|
from PikaObj import *
def __init__(): ...
class EVENT:
ALL: int
PRESSED: int
PRESSING: int
PRESS_LOST: int
SHORT_CLICKED: int
LONG_PRESSED: int
LONG_PRESSED_REPEAT: int
CLICKED: int
RELEASED: int
SCROLL_BEGIN: int
SCROLL_END: int
SCROLL: int
GESTURE: int
KEY: int
FOCUSED: int
DEFOCUSED: int
LEAVE: int
HIT_TEST: int
COVER_CHECK: int
REFR_EXT_DRAW_SIZE: int
DRAW_MAIN_BEGIN: int
DRAW_MAIN: int
DRAW_MAIN_END: int
DRAW_POST_BEGIN: int
DRAW_POST: int
DRAW_POST_END: int
DRAW_PART_BEGIN: int
DRAW_PART_END: int
VALUE_CHANGED: int
INSERT: int
REFRESH: int
READY: int
CANCEL: int
DELETE: int
CHILD_CHANGED: int
CHILD_CREATED: int
CHILD_DELETED: int
SCREEN_UNLOAD_START: int
SCREEN_LOAD_START: int
SCREEN_LOADED: int
SCREEN_UNLOADED: int
SIZE_CHANGED: int
STYLE_CHANGED: int
LAYOUT_CHANGED: int
GET_SELF_SIZE: int
PREPROCESS: int
def __init__(self): ...
class ALIGN:
DEFAULT: int
TOP_LEFT: int
TOP_MID: int
TOP_RIGHT: int
BOTTOM_LEFT: int
BOTTOM_MID: int
BOTTOM_RIGHT: int
LEFT_MID: int
RIGHT_MID: int
CENTER: int
OUT_TOP_LEFT: int
OUT_TOP_MID: int
OUT_TOP_RIGHT: int
OUT_BOTTOM_LEFT: int
OUT_BOTTOM_MID: int
OUT_BOTTOM_RIGHT: int
OUT_LEFT_TOP: int
OUT_LEFT_MID: int
OUT_LEFT_BOTTOM: int
OUT_RIGHT_TOP: int
OUT_RIGHT_MID: int
OUT_RIGHT_BOTTOM: int
def __init__(self): ...
class PALETTE:
RED: int
PINK: int
PURPLE: int
DEEP_PURPLE: int
INDIGO: int
BLUE: int
LIGHT_BLUE: int
CYAN: int
TEAL: int
GREEN: int
LIGHT_GREEN: int
LIME: int
YELLOW: int
AMBER: int
ORANGE: int
DEEP_ORANGE: int
BROWN: int
BLUE_GREY: int
GREY: int
NONE: int
def __init__(self): ...
class OPA:
TRANSP: int
COVER: int
def __init__(self): ...
class ANIM:
OFF: int
ON: int
def __init__(self): ...
class STATE:
def __init__(self): ...
class lv_event:
def get_code(self) -> int: ...
def get_target(self) -> lv_obj: ...
class lv_color_t: ...
class lv_timer_t:
def set_period(period: int): ...
def METHOD_NAME(cb: any): ...
def _del(self): ...
def palette_lighten(p: int, lvl: int) -> lv_color_t: ...
def palette_main(p: int) -> lv_color_t: ...
class style_t:
def __init__(self): ...
def init(self): ...
def set_radius(self, radius: int): ...
def set_bg_opa(self, opa: int): ...
def set_bg_color(self, color: lv_color_t): ...
def set_outline_width(self, w: int): ...
def set_outline_color(self, color: lv_color_t): ...
def set_outline_pad(self, pad: int): ...
def set_shadow_width(self, w: int): ...
def set_shadow_spread(self, s: int): ...
def set_shadow_color(self, color: lv_color_t): ...
class lv_obj:
def center(self): ...
def set_size(self, size_x: int, size_y: int): ...
def align(self, align: int, x_ofs: int, y_ofs: int): ...
def set_hight(self, h: int): ...
def update_layout(self): ...
def set_width(self, w: int): ...
def add_state(self, state: int): ...
def add_event(self, event_cb: any, filter: int, user_data: pointer): ...
def add_style(self, style: style_t, selector: int): ...
def get_x(self) -> int: ...
def get_y(self) -> int: ...
def set_pos(self, x: int, y: int): ...
class indev_t:
def get_vect(self, point: point_t): ...
def obj(parent: lv_obj) -> lv_obj: ...
def indev_get_act() -> indev_t: ...
class point_t:
def __init__(self): ...
class arc(lv_obj):
MODE_NORMAL: int
MODE_SYMMETRICAL: int
MODE_REVERSE: int
def __init__(self, parent: lv_obj): ...
def set_start_angle(self, start: int): ...
def set_end_angle(self, angle: int): ...
def set_angles(self, start: int, end: int): ...
def set_bg_start_angle(self, start: int): ...
def set_bg_end_angle(self, angle: int): ...
def set_bg_angles(self, start: int, end: int): ...
def set_rotation(self, rotation: int): ...
def set_mode(self, mode: int): ...
def set_value(self, value: int): ...
def set_range(self, min: int, max: int): ...
def set_change_rate(self, rate: int): ...
def get_angle_start(self) -> int: ...
def get_angle_end(self) -> int: ...
def get_bg_angle_start(self) -> int: ...
def get_bg_angle_end(self) -> int: ...
def get_value(self) -> int: ...
def get_min_value(self) -> int: ...
def get_max_value(self) -> int: ...
def get_mode(self) -> int: ...
# def get_rotation(self) -> int: ...
class bar(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_value(self, value: int, anim: int): ...
def set_start_value(self, start_value: int, anim: int): ...
def set_range(self, min: int, max: int): ...
def set_mode(self, mode: int): ...
def get_value(self) -> int: ...
def get_start_value(self) -> int: ...
def get_min_value(self) -> int: ...
def get_max_value(self) -> int: ...
def get_mode(self) -> int: ...
class btn(lv_obj):
def __init__(self, parent: lv_obj): ...
class checkbox(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_text(self, txt: str): ...
def set_text_static(self, txt: str): ...
def get_text(self) -> str: ...
class dropdown(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_text(self, txt: str): ...
def set_options(self, options: str): ...
def add_option(self, option: str, pos:int): ...
def clear_options(self): ...
def set_selected(self, sel_opt: int): ...
def set_dir(self, dir: int): ...
def set_symbol(self, symbol: str): ...
def set_selected_hightlight(self, en: int): ...
# def get_list(self) -> lv_obj: ...
def get_text(self) -> str: ...
def get_options(self) -> str: ...
def get_selected(self) -> int: ...
def get_option_cnt(self) -> int: ...
def get_selected_str(self) -> str: ...
def get_option_index(self, option: str) -> int: ...
def get_symbol(self) -> str: ...
def get_selected_highlight(self) -> int: ...
def get_dir(self) -> int: ...
def open(self): ...
def close(self): ...
def is_open(self) -> int: ...
class label(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_text(self, txt: str): ...
def set_long_mode(self, mode: int): ...
def set_recolor(self, en: int): ...
def set_style_text_align(self, value: int, selector: int): ...
class roller(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_options(self, options: str, mode: int): ...
def set_visible_row_count(self, row_cnt: int): ...
class slider(lv_obj):
def __init__(self, parent: lv_obj): ...
class switch(lv_obj):
def __init__(self, parent: lv_obj): ...
class table(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_cell_value(self, row: int, col: int, txt: str): ...
class textarea(lv_obj):
def __init__(self, parent: lv_obj): ...
def set_one_line(en: int): ...
def scr_act() -> lv_obj: ...
def timer_create_basic() -> lv_timer_t: ...
|
2,175 |
test check is bool non boolean attr
|
# -*- coding: utf-8 -*-
from unittest import mock
from django.db import models
from django.test import TestCase
from tests.testapp.models import (
DummyRelationModel, InheritedFromPostWithUniqFieldCompat, PostWithUniqFieldCompat,
ReverseModelCompat, SecondDummyRelationModel, ThirdDummyRelationModel,
)
from django_extensions.db.fields import UniqueFieldMixin
class UniqFieldMixinCompatTestCase(TestCase):
def setUp(self):
class MockField(UniqueFieldMixin):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.uniq_field = MockField(
attname='uniq_field',
max_length=255,
boolean_attr=True,
non_boolean_attr='non_boolean_attr'
)
f_dummy = DummyRelationModel.objects.create()
s_dummy = SecondDummyRelationModel.objects.create()
t_dummy = ThirdDummyRelationModel.objects.create()
post = PostWithUniqFieldCompat.objects.create(
uniq_field='test_uniq',
common_field='first',
another_common_field='second',
many_to_one_field=f_dummy,
one_to_one_field=s_dummy,
)
post.many_to_many_field.add(t_dummy)
post.save()
ReverseModelCompat.objects.create(post_field=post)
self.post = post
def tearDown(self):
PostWithUniqFieldCompat.objects.all().delete()
DummyRelationModel.objects.all().delete()
SecondDummyRelationModel.objects.all().delete()
ThirdDummyRelationModel.objects.all().delete()
ReverseModelCompat.objects.all().delete()
def test_check_is_bool_boolean_attr(self):
self.assertIsNone(self.uniq_field.check_is_bool('boolean_attr'))
def METHOD_NAME(self):
with self.assertRaisesMessage(
ValueError,
"'non_boolean_attr' argument must be True or False",
):
self.uniq_field.check_is_bool('non_boolean_attr')
def test__get_fields_returns_list_of_tulpes(self):
uniq_mixin_fields = UniqueFieldMixin._get_fields(PostWithUniqFieldCompat)
self.assertIsInstance(uniq_mixin_fields, list)
for field in uniq_mixin_fields:
self.assertIsInstance(field, tuple)
def test__get_fields_returns_correct_fields(self):
option_fields = PostWithUniqFieldCompat._meta.get_fields()
uniq_mixin_fields = [i[0] for i in UniqueFieldMixin._get_fields(PostWithUniqFieldCompat)]
self.assertEqual(len(option_fields), 9)
self.assertEqual(len(uniq_mixin_fields), 7)
fields_to_be_excluded_from_uniq_mixin_fields = [
f for f in option_fields
if f.is_relation and not f.one_to_one and not (f.many_to_one and f.related_model)
]
for field in fields_to_be_excluded_from_uniq_mixin_fields:
self.assertNotIn(field, uniq_mixin_fields)
def test__get_fields_returns_correct_model(self):
post_models = [i[1] for i in UniqueFieldMixin._get_fields(PostWithUniqFieldCompat)]
self.assertTrue(all(model is None for model in post_models))
inherited_post_models = [
i[1] for i
in UniqueFieldMixin._get_fields(InheritedFromPostWithUniqFieldCompat)
if i[1]
]
self.assertEqual(len(inherited_post_models), 6)
self.assertTrue(all(model is PostWithUniqFieldCompat) for model in inherited_post_models)
def test_get_queryset(self):
mocked_get_fields = (
(models.CharField, PostWithUniqFieldCompat),
)
with mock.patch(
'django_extensions.db.fields.UniqueFieldMixin._get_fields',
return_value=mocked_get_fields
), mock.patch(
'tests.testapp.models.PostWithUniqFieldCompat._default_manager.all'
) as mocked_qs_all:
self.uniq_field.get_queryset(PostWithUniqFieldCompat, models.CharField)
mocked_qs_all.assert_called_with()
mocked_get_fields = (
(models.CharField, None),
)
with mock.patch(
'django_extensions.db.fields.UniqueFieldMixin._get_fields',
return_value=mocked_get_fields
), mock.patch(
'tests.testapp.models.InheritedFromPostWithUniqFieldCompat._default_manager.all'
) as mocked_qs_all:
self.uniq_field.get_queryset(InheritedFromPostWithUniqFieldCompat, models.CharField)
mocked_qs_all.assert_called_with()
def test_find_unique(self):
def filter_func(*args, **kwargs):
uniq_field = kwargs.get('uniq_field')
if uniq_field == 'a':
return mocked_qs
return None
mocked_qs = mock.Mock(spec=PostWithUniqFieldCompat.objects)
mocked_qs.filter.side_effect = filter_func
mocked_qs.exclude.return_value = mocked_qs
field = models.CharField
with mock.patch(
'django_extensions.db.fields.UniqueFieldMixin.get_queryset',
return_value=mocked_qs
) as get_qs:
res = self.uniq_field.find_unique(self.post, field, iter('abcde'))
get_qs.assert_called_with(PostWithUniqFieldCompat, field)
mocked_qs.exclude.assert_called_with(pk=self.post.pk)
self.assertEqual(res, 'b')
self.assertTrue(hasattr(self.post, 'uniq_field'))
self.assertEqual(self.post.uniq_field, 'b')
|
2,176 |
set file mode
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Widgets to edit a list of items in a flexible way.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import (
QFileDialog, QGridLayout, QListWidget, QListWidgetItem, QPushButton,
QWidget)
import app
import icons
class ListEdit(QWidget):
"""A widget to edit a list of items (e.g. a list of directories)."""
# emitted when anything changed in the listbox.
changed = pyqtSignal()
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
layout = QGridLayout(self)
self.setLayout(layout)
self.addButton = QPushButton(icons.get('list-add'), '')
self.editButton = QPushButton(icons.get('document-edit'), '')
self.removeButton = QPushButton(icons.get('list-remove'), '')
self.listBox = QListWidget()
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(0)
layout.addWidget(self.listBox, 0, 0, 8, 1)
layout.addWidget(self.addButton, 0, 1)
layout.addWidget(self.editButton, 1, 1)
layout.addWidget(self.removeButton, 2, 1)
self.changed.connect(self.updateSelection)
self.listBox.itemSelectionChanged.connect(self.updateSelection)
self.updateSelection()
self.connectSlots()
app.translateUI(self)
def connectSlots(self):
self.addButton.clicked.connect(self.addClicked)
self.editButton.clicked.connect(self.editClicked)
self.removeButton.clicked.connect(self.removeClicked)
self.listBox.itemDoubleClicked.connect(self.itemDoubleClicked)
self.listBox.model().layoutChanged.connect(self.changed)
def translateUI(self):
self.addButton.setText(_("&Add..."))
self.editButton.setText(_("&Edit..."))
self.removeButton.setText(_("&Remove"))
def addClicked(self, button):
item = self.createItem()
if self.openEditor(item):
self.addItem(item)
def editClicked(self, button):
item = self.listBox.currentItem()
item and self.editItem(item)
def removeClicked(self, button):
item = self.listBox.currentItem()
if item:
self.removeItem(item)
def updateSelection(self):
selected = bool(self.listBox.currentItem())
self.editButton.setEnabled(selected)
self.removeButton.setEnabled(selected)
def itemDoubleClicked(self, item):
item and self.editItem(item)
def createItem(self):
return QListWidgetItem()
def addItem(self, item):
self.listBox.addItem(item)
self.itemChanged(item)
self.changed.emit()
def removeItem(self, item):
self.listBox.takeItem(self.listBox.row(item))
self.itemRemoved(item)
self.changed.emit()
def editItem(self, item):
if self.openEditor(item):
self.itemChanged(item)
self.changed.emit()
def setCurrentItem(self, item):
self.listBox.setCurrentItem(item)
def setCurrentRow(self, row):
self.listBox.setCurrentRow(row)
def openEditor(self, item):
"""Opens an editor (dialog) for the item.
Returns True if the dialog was accepted and the item edited.
Returns False if the dialog was cancelled (the item must be left
unedited).
"""
pass
def itemChanged(self, item):
"""Called after an item has been added or edited.
Re-implement to do something at this moment if needed, e.g. alter the
text or display of other items.
"""
pass
def itemRemoved(self, item):
"""Called after an item has been removed.
Re-implement to do something at this moment if needed.
"""
pass
def setValue(self, strings):
"""Sets the listbox to a list of strings."""
self.listBox.clear()
self.listBox.addItems(strings)
self.changed.emit()
def value(self):
"""Returns the list of paths in the listbox."""
return [self.listBox.item(i).text()
for i in range(self.listBox.count())]
def setItems(self, items):
"""Sets the listbox to a list of items."""
self.listBox.clear()
for item in items:
self.listBox.addItem(item)
self.itemChanged(item)
self.changed.emit()
def items(self):
"""Returns the list of items in the listbox."""
return [self.listBox.item(i)
for i in range(self.listBox.count())]
def clear(self):
"""Clears the listbox."""
self.listBox.clear()
self.changed.emit()
class FilePathEdit(ListEdit):
"""
A widget to edit a list of directories (e.g. a file path).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fileDialog(self):
"""The QFileDialog this widget is using."""
try:
return self._filedialog
except AttributeError:
self._filedialog = d = QFileDialog(self)
d.METHOD_NAME(QFileDialog.Directory)
return d
def openEditor(self, item):
"""Asks the user for an (existing) directory."""
directory = item.text()
dlg = self.fileDialog()
dlg.selectFile(directory)
if dlg.exec_():
item.setText(dlg.selectedFiles()[0])
return True
return False
def METHOD_NAME(self, mode):
modes = {
'directory': QFileDialog.Directory,
QFileDialog.Directory: QFileDialog.Directory,
'file': QFileDialog.ExistingFile,
QFileDialog.ExistingFile: QFileDialog.ExistingFile,
'anyfile': QFileDialog.AnyFile,
QFileDialog.AnyFile: QFileDialog.AnyFile
}
self.fileDialog().METHOD_NAME(modes[mode])
|
2,177 |
test example secret list operations
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# -------------------------------------
from __future__ import print_function
import time
import pytest
from devtools_testutils import recorded_by_proxy
from _shared.test_case import KeyVaultTestCase
from _test_case import SecretsClientPreparer, get_decorator
all_api_versions = get_decorator()
def print(*args):
assert all(arg is not None for arg in args)
def test_create_secret_client():
vault_url = "vault_url"
# pylint:disable=unused-variable
# [START create_secret_client]
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
# Create a SecretClient using default Azure credentials
credential = DefaultAzureCredential()
secret_client = SecretClient(vault_url, credential)
# [END create_secret_client]
class TestExamplesKeyVault(KeyVaultTestCase):
@pytest.mark.parametrize("api_version", all_api_versions, ids=all_api_versions)
@SecretsClientPreparer()
@recorded_by_proxy
def test_example_secret_crud_operations(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
# [START set_secret]
from dateutil import parser as date_parse
expires_on = date_parse.parse("2050-02-02T08:00:00.000Z")
# create a secret, setting optional arguments
secret = secret_client.set_secret(secret_name, "secret-value", expires_on=expires_on)
print(secret.name)
print(secret.properties.version)
print(secret.properties.expires_on)
# [END set_secret]
# [START get_secret]
# get the latest version of a secret
secret = secret_client.get_secret(secret_name)
# alternatively, specify a version
secret = secret_client.get_secret(secret_name, secret.properties.version)
print(secret.id)
print(secret.name)
print(secret.properties.version)
print(secret.properties.vault_url)
# [END get_secret]
# [START update_secret]
# update attributes of an existing secret
content_type = "text/plain"
tags = {"foo": "updated tag"}
updated_secret_properties = secret_client.update_secret_properties(
secret_name, content_type=content_type, tags=tags
)
print(updated_secret_properties.version)
print(updated_secret_properties.updated_on)
print(updated_secret_properties.content_type)
print(updated_secret_properties.tags)
# [END update_secret]
# [START delete_secret]
# delete a secret
deleted_secret_poller = secret_client.begin_delete_secret(secret_name)
deleted_secret = deleted_secret_poller.result()
print(deleted_secret.name)
# if the vault has soft-delete enabled, the secret's, deleted_date
# scheduled purge date and recovery id are set
print(deleted_secret.deleted_date)
print(deleted_secret.scheduled_purge_date)
print(deleted_secret.recovery_id)
# if you want to block until secret is deleted server-side, call wait() on the poller
deleted_secret_poller.wait()
# [END delete_secret]
@pytest.mark.parametrize("api_version", all_api_versions, ids=all_api_versions)
@SecretsClientPreparer()
@recorded_by_proxy
def METHOD_NAME(self, client, **kwargs):
secret_client = client
for i in range(7):
secret_name = self.get_resource_name(f"secret{i}")
secret_client.set_secret(secret_name, f"value{i}")
# [START list_secrets]
# list secrets
secrets = secret_client.list_properties_of_secrets()
for secret in secrets:
# the list doesn't include values or versions of the secrets
print(secret.id)
print(secret.name)
print(secret.enabled)
# [END list_secrets]
# pylint: disable=unused-variable
# [START list_properties_of_secret_versions]
secret_versions = secret_client.list_properties_of_secret_versions("secret-name")
for secret in secret_versions:
# the list doesn't include the values at each version
print(secret.id)
print(secret.enabled)
print(secret.updated_on)
# [END list_properties_of_secret_versions]
# [START list_deleted_secrets]
# gets an iterator of deleted secrets (requires soft-delete enabled for the vault)
deleted_secrets = secret_client.list_deleted_secrets()
for secret in deleted_secrets:
# the list doesn't include values or versions of the deleted secrets
print(secret.id)
print(secret.name)
print(secret.scheduled_purge_date)
print(secret.recovery_id)
print(secret.deleted_date)
# [END list_deleted_secrets]
@pytest.mark.parametrize("api_version", all_api_versions, ids=all_api_versions)
@SecretsClientPreparer()
@recorded_by_proxy
def test_example_secrets_backup_restore(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
secret_client.set_secret(secret_name, "secret-value")
# [START backup_secret]
# backup secret
# returns the raw bytes of the backed up secret
secret_backup = secret_client.backup_secret(secret_name)
print(secret_backup)
# [END backup_secret]
secret_client.begin_delete_secret(secret_name).wait()
secret_client.purge_deleted_secret(secret_name)
if self.is_live:
time.sleep(60)
# [START restore_secret_backup]
# restores a backed up secret
restored_secret = secret_client.restore_secret_backup(secret_backup)
print(restored_secret.id)
print(restored_secret.version)
# [END restore_secret_backup]
@pytest.mark.parametrize("api_version", all_api_versions, ids=all_api_versions)
@SecretsClientPreparer()
@recorded_by_proxy
def test_example_secrets_recover(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
secret_client.set_secret(secret_name, "secret-value")
secret_client.begin_delete_secret(secret_name).wait()
# [START get_deleted_secret]
# gets a deleted secret (requires soft-delete enabled for the vault)
deleted_secret = secret_client.get_deleted_secret(secret_name)
print(deleted_secret.name)
# [END get_deleted_secret]
# [START recover_deleted_secret]
# recover deleted secret to the latest version
recover_secret_poller = secret_client.begin_recover_deleted_secret(secret_name)
recovered_secret = recover_secret_poller.result()
print(recovered_secret.id)
print(recovered_secret.name)
# if you want to block until secret is recovered server-side, call wait() on the poller
recover_secret_poller.wait()
# [END recover_deleted_secret]
|
2,178 |
compose spec yaml
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=too-many-function-args
import asyncio
from typing import Any
import pytest
import yaml
from faker import Faker
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from simcore_service_dynamic_sidecar.core.docker_compose_utils import (
docker_compose_config,
docker_compose_create,
docker_compose_down,
docker_compose_pull,
docker_compose_restart,
docker_compose_rm,
docker_compose_start,
)
from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings
from simcore_service_dynamic_sidecar.core.utils import CommandResult
SLEEP_TIME_S = 60
COMPOSE_SPEC_SAMPLE = {
"version": "3.8",
"services": {
"my-test-container": {
"environment": [
"DY_SIDECAR_PATH_INPUTS=/work/inputs",
"DY_SIDECAR_PATH_OUTPUTS=/work/outputs",
'DY_SIDECAR_STATE_PATHS=["/work/workspace"]',
],
"working_dir": "/work",
"image": "busybox:latest",
"command": f"sh -c \"echo 'setup {__name__}'; sleep {SLEEP_TIME_S}; echo 'teardown {__name__}'\"",
}
},
}
@pytest.fixture
def METHOD_NAME(faker: Faker) -> str:
return yaml.safe_dump(COMPOSE_SPEC_SAMPLE, indent=1)
@pytest.mark.parametrize("with_restart", [True, False])
async def test_docker_compose_workflow(
METHOD_NAME: str,
mock_environment: EnvVarsDict,
with_restart: bool,
ensure_run_in_sequence_context_is_empty: None,
mocker: MockerFixture,
):
settings = ApplicationSettings.create_from_envs()
def _print_result(r: CommandResult):
assert r.elapsed
assert r.elapsed > 0
print(f"{r.command:*^100}", "\nELAPSED:", r.elapsed)
compose_spec: dict[str, Any] = yaml.safe_load(METHOD_NAME)
print("compose_spec:\n", compose_spec)
# validates specs
r = await docker_compose_config(METHOD_NAME, timeout=10)
_print_result(r)
assert r.success, r.message
# removes all stopped containers from specs
r = await docker_compose_rm(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
# pulls containers before starting them
fake_app = mocker.AsyncMock()
fake_app.state.settings = settings
await docker_compose_pull(fake_app, METHOD_NAME)
# creates containers
r = await docker_compose_create(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
# tries to start containers which were not able to start
r = await docker_compose_start(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
if with_restart:
# restarts
r = await docker_compose_restart(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
# stops and removes
r = await docker_compose_down(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
# full cleanup
r = await docker_compose_rm(METHOD_NAME, settings)
_print_result(r)
assert r.success, r.message
async def test_burst_calls_to_docker_compose_config(
METHOD_NAME: str,
mock_environment: EnvVarsDict,
ensure_run_in_sequence_context_is_empty: None,
):
CALLS_COUNT = 10 # tried manually with 1E3 but takes too long
results = await asyncio.gather(
*(
docker_compose_config(
METHOD_NAME,
timeout=100 + i, # large timeout and emulates change in parameters
)
for i in range(CALLS_COUNT)
),
return_exceptions=True,
)
exceptions = [r for r in results if isinstance(r, Exception)]
assert not exceptions, "docker_compose* does NOT raise exceptions"
assert all(
isinstance(r, CommandResult) for r in results
), "docker_compose* does NOT raise exceptions"
success = [r for r in results if r.success]
failed = [r for r in results if not r.success]
assert len(success) == CALLS_COUNT
assert not failed
async def test_docker_start_fails_if_containers_are_not_present(
METHOD_NAME: str,
mock_environment: EnvVarsDict,
ensure_run_in_sequence_context_is_empty: None,
):
settings = ApplicationSettings.create_from_envs()
def _print_result(r: CommandResult):
assert r.elapsed
assert r.elapsed > 0
print(f"{r.command:*^100}", "\nELAPSED:", r.elapsed)
compose_spec: dict[str, Any] = yaml.safe_load(METHOD_NAME)
print("compose_spec:\n", compose_spec)
# validates specs
r = await docker_compose_config(METHOD_NAME, timeout=10)
_print_result(r)
assert r.success, r.message
# fails when containers are missing
r = await docker_compose_start(METHOD_NAME, settings)
_print_result(r)
assert r.success is False, r.message
|
2,179 |
postscript rule7
|
# Leo colorizer control file for postscript mode.
# This file is in the public domain.
# Properties for postscript mode.
properties = {
"lineComment": "%",
}
# Attributes dict for postscript_main ruleset.
postscript_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for postscript_literal ruleset.
postscript_literal_attributes_dict = {
"default": "LITERAL1",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for postscript mode.
attributesDictDict = {
"postscript_literal": postscript_literal_attributes_dict,
"postscript_main": postscript_main_attributes_dict,
}
# Keywords dict for postscript_main ruleset.
postscript_main_keywords_dict = {
"NULL": "literal2",
"abs": "operator",
"add": "operator",
"atan": "operator",
"ceiling": "operator",
"clear": "keyword1",
"cleartomark": "keyword1",
"copy": "keyword1",
"cos": "operator",
"count": "keyword1",
"countexecstack": "keyword1",
"counttomark": "keyword1",
"div": "operator",
"dup": "keyword1",
"exch": "keyword1",
"exec": "keyword1",
"execstack": "keyword1",
"exit": "keyword1",
"exp": "operator",
"false": "literal2",
"floor": "operator",
"for": "keyword1",
"idiv": "operator",
"if": "keyword1",
"ifelse": "keyword1",
"ln": "operator",
"log": "operator",
"loop": "keyword1",
"mark": "keyword1",
"mod": "operator",
"mul": "operator",
"ned": "operator",
"pop": "keyword1",
"quit": "keyword1",
"rand": "operator",
"repeat": "keyword1",
"roll": "keyword1",
"round": "operator",
"rrand": "operator",
"sin": "operator",
"sqrt": "operator",
"srand": "operator",
"start": "keyword1",
"stop": "keyword1",
"stopped": "keyword1",
"sub": "operator",
"true": "literal2",
"truncate": "operator",
}
# Keywords dict for postscript_literal ruleset.
postscript_literal_keywords_dict = {}
# Dictionary of keywords dictionaries for postscript mode.
keywordsDictDict = {
"postscript_literal": postscript_literal_keywords_dict,
"postscript_main": postscript_main_keywords_dict,
}
# Rules for postscript_main ruleset.
def postscript_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="%!")
def postscript_rule1(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="%?")
def postscript_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="%%")
def postscript_rule3(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="%")
def postscript_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="(", end=")",
delegate="postscript::literal")
def postscript_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="<", end=">")
def postscript_rule6(colorer, s, i):
return colorer.match_mark_following(s, i, kind="label", pattern="/")
def METHOD_NAME(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="}")
def postscript_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="{")
def postscript_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="]")
def postscript_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="[")
def postscript_rule11(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for postscript_main ruleset.
rulesDict1 = {
"%": [postscript_rule0, postscript_rule1, postscript_rule2, postscript_rule3,],
"(": [postscript_rule4,],
"/": [postscript_rule6,],
"0": [postscript_rule11,],
"1": [postscript_rule11,],
"2": [postscript_rule11,],
"3": [postscript_rule11,],
"4": [postscript_rule11,],
"5": [postscript_rule11,],
"6": [postscript_rule11,],
"7": [postscript_rule11,],
"8": [postscript_rule11,],
"9": [postscript_rule11,],
"<": [postscript_rule5,],
"@": [postscript_rule11,],
"A": [postscript_rule11,],
"B": [postscript_rule11,],
"C": [postscript_rule11,],
"D": [postscript_rule11,],
"E": [postscript_rule11,],
"F": [postscript_rule11,],
"G": [postscript_rule11,],
"H": [postscript_rule11,],
"I": [postscript_rule11,],
"J": [postscript_rule11,],
"K": [postscript_rule11,],
"L": [postscript_rule11,],
"M": [postscript_rule11,],
"N": [postscript_rule11,],
"O": [postscript_rule11,],
"P": [postscript_rule11,],
"Q": [postscript_rule11,],
"R": [postscript_rule11,],
"S": [postscript_rule11,],
"T": [postscript_rule11,],
"U": [postscript_rule11,],
"V": [postscript_rule11,],
"W": [postscript_rule11,],
"X": [postscript_rule11,],
"Y": [postscript_rule11,],
"Z": [postscript_rule11,],
"[": [postscript_rule10,],
"]": [postscript_rule9,],
"a": [postscript_rule11,],
"b": [postscript_rule11,],
"c": [postscript_rule11,],
"d": [postscript_rule11,],
"e": [postscript_rule11,],
"f": [postscript_rule11,],
"g": [postscript_rule11,],
"h": [postscript_rule11,],
"i": [postscript_rule11,],
"j": [postscript_rule11,],
"k": [postscript_rule11,],
"l": [postscript_rule11,],
"m": [postscript_rule11,],
"n": [postscript_rule11,],
"o": [postscript_rule11,],
"p": [postscript_rule11,],
"q": [postscript_rule11,],
"r": [postscript_rule11,],
"s": [postscript_rule11,],
"t": [postscript_rule11,],
"u": [postscript_rule11,],
"v": [postscript_rule11,],
"w": [postscript_rule11,],
"x": [postscript_rule11,],
"y": [postscript_rule11,],
"z": [postscript_rule11,],
"{": [postscript_rule8,],
"}": [METHOD_NAME,],
}
# Rules for postscript_literal ruleset.
def postscript_rule12(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="(", end=")",
delegate="postscript::literal")
# Rules dict for postscript_literal ruleset.
rulesDict2 = {
"(": [postscript_rule12,],
}
# x.rulesDictDict for postscript mode.
rulesDictDict = {
"postscript_literal": rulesDict2,
"postscript_main": rulesDict1,
}
# Import dict for postscript mode.
importDict = {}
|
2,180 |
message type at
|
# Author(s): Andrea Colangelo ([email protected])
# Copyright 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright 2018 Sergio Corato (https://efatto.it)
# Copyright 2018-2019 Lorenzo Battistini <https://github.com/eLBati>
import logging
import re
from odoo import _, fields, models
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
RESPONSE_MAIL_REGEX = (
"[A-Z]{2}[a-zA-Z0-9]{11,16}_[a-zA-Z0-9]{,5}_[A-Z]{2}_" "[a-zA-Z0-9]{,3}"
)
class FatturaPAAttachmentOut(models.Model):
_inherit = "fatturapa.attachment.out"
def _message_type_ns(
self, root, id_sdi, message_id, receipt_dt, fatturapa_attachment_out
):
error_list = root.find("ListaErrori")
error_str = ""
for error in error_list:
error_str += "\n[%s] %s %s" % (
error.find("Codice").text if error.find("Codice") is not None else "",
error.find("Descrizione").text
if error.find("Descrizione") is not None
else "",
error.find("Suggerimento").text
if error.find("Suggerimento") is not None
else "",
)
fatturapa_attachment_out.write(
{
"state": "sender_error",
"last_sdi_response": "SdI ID: {}; "
"Message ID: {}; Receipt date: {}; "
"Error: {}".format(id_sdi, message_id, receipt_dt, error_str),
}
)
def _message_type_mc(
self, root, id_sdi, message_id, receipt_dt, fatturapa_attachment_out
):
missed_delivery_note = root.find("Descrizione").text
fatturapa_attachment_out.write(
{
"state": "recipient_error",
"last_sdi_response": "SdI ID: {}; "
"Message ID: {}; Receipt date: {}; "
"Missed delivery note: {}".format(
id_sdi, message_id, receipt_dt, missed_delivery_note
),
}
)
def _message_type_rc(
self, root, id_sdi, message_id, receipt_dt, fatturapa_attachment_out
):
delivery_dt = root.find("DataOraConsegna").text
fatturapa_attachment_out.write(
{
"state": "validated",
"delivered_date": fields.Datetime.now(),
"last_sdi_response": "SdI ID: {}; "
"Message ID: {}; Receipt date: {}; "
"Delivery date: {}".format(id_sdi, message_id, receipt_dt, delivery_dt),
}
)
def _message_type_ne(self, root, id_sdi, message_id, fatturapa_attachment_out):
esito_committente = root.find("EsitoCommittente")
if esito_committente is not None:
# more than one esito?
esito = esito_committente.find("Esito")
state = ""
if esito is not None:
if esito.text == "EC01":
state = "accepted"
elif esito.text == "EC02":
state = "rejected"
fatturapa_attachment_out.write(
{
"state": state,
"last_sdi_response": "SdI ID: {}; "
"Message ID: {}; Response: {}; ".format(
id_sdi, message_id, esito.text
),
}
)
def _message_type_dt(
self, root, id_sdi, message_id, receipt_dt, fatturapa_attachment_out
):
description = root.find("Descrizione")
if description is not None:
fatturapa_attachment_out.write(
{
"state": "validated",
"last_sdi_response": "SdI ID: {}; "
"Message ID: {}; Receipt date: {}; "
"Description: {}".format(
id_sdi, message_id, receipt_dt, description.text
),
}
)
def METHOD_NAME(
self, root, id_sdi, message_id, receipt_dt, fatturapa_attachment_out
):
description = root.find("Descrizione")
if description is not None:
fatturapa_attachment_out.write(
{
"state": "accepted",
"last_sdi_response": (
"SdI ID: {}; Message ID: {}; Receipt date: {};"
" Description: {}"
).format(id_sdi, message_id, receipt_dt, description.text),
}
)
def parse_pec_response(self, message_dict):
message_dict["model"] = self._name
message_dict["res_id"] = 0
regex = re.compile(RESPONSE_MAIL_REGEX)
notifications = [x for x in message_dict["attachments"] if regex.match(x.fname)]
if not notifications:
raise UserError(
_(
'PEC message "%s" is coming from SDI but attachments do not '
"match SDI response format. Please check."
)
% (message_dict["subject"])
)
sdi_channel_model = self.env["sdi.channel"]
attachments = sdi_channel_model.receive_notification(
{
notification.fname: notification.content
for notification in notifications
},
)
# Link the message to the last attachment updated
message_dict["res_id"] = attachments[-1].id
return message_dict
|
2,181 |
fix data dtype
|
"""OpenGL Utilities.
"""
from contextlib import contextmanager
from functools import lru_cache
from typing import Tuple
import numpy as np
from vispy.app import Canvas
from vispy.gloo import gl
from vispy.gloo.context import get_current_canvas
from napari.utils.translations import trans
texture_dtypes = [
np.dtype(np.uint8),
np.dtype(np.uint16),
np.dtype(np.float32),
]
@contextmanager
def _opengl_context():
"""Assure we are running with a valid OpenGL context.
Only create a Canvas is one doesn't exist. Creating and closing a
Canvas causes vispy to process Qt events which can cause problems.
Ideally call opengl_context() on start after creating your first
Canvas. However it will work either way.
"""
canvas = Canvas(show=False) if get_current_canvas() is None else None
try:
yield
finally:
if canvas is not None:
canvas.close()
@lru_cache(maxsize=1)
def get_gl_extensions() -> str:
"""Get basic info about the Gl capabilities of this machine"""
with _opengl_context():
return gl.glGetParameter(gl.GL_EXTENSIONS)
@lru_cache
def get_max_texture_sizes() -> Tuple[int, int]:
"""Return the maximum texture sizes for 2D and 3D rendering.
If this function is called without an OpenGL context it will create a
temporary non-visible Canvas. Either way the lru_cache means subsequent
calls to thing function will return the original values without
actually running again.
Returns
-------
Tuple[int, int]
The max textures sizes for (2d, 3d) rendering.
"""
with _opengl_context():
max_size_2d = gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)
if max_size_2d == ():
max_size_2d = None
# vispy/gloo doesn't provide the GL_MAX_3D_TEXTURE_SIZE location,
# but it can be found in this list of constants
# http://pyopengl.sourceforge.net/documentation/pydoc/OpenGL.GL.html
with _opengl_context():
GL_MAX_3D_TEXTURE_SIZE = 32883
max_size_3d = gl.glGetParameter(GL_MAX_3D_TEXTURE_SIZE)
if max_size_3d == ():
max_size_3d = None
return max_size_2d, max_size_3d
def METHOD_NAME(data):
"""Makes sure the dtype of the data is accetpable to vispy.
Acceptable types are int8, uint8, int16, uint16, float32.
Parameters
----------
data : np.ndarray
Data that will need to be of right type.
Returns
-------
np.ndarray
Data that is of right type and will be passed to vispy.
"""
dtype = np.dtype(data.dtype)
if dtype in texture_dtypes:
return data
try:
dtype = {
"i": np.float32,
"f": np.float32,
"u": np.uint16,
"b": np.uint8,
}[dtype.kind]
except KeyError as e: # not an int or float
raise TypeError(
trans._(
'type {dtype} not allowed for texture; must be one of {textures}',
deferred=True,
dtype=dtype,
textures=set(texture_dtypes),
)
) from e
return data.astype(dtype)
# blend_func parameters are multiplying:
# - source color
# - destination color
# - source alpha
# - destination alpha
# they do not apply to min/max blending equation
BLENDING_MODES = {
'opaque': {
"depth_test": True,
"cull_face": False,
"blend": False,
},
'translucent': {
"depth_test": True,
"cull_face": False,
"blend": True,
"blend_func": ('src_alpha', 'one_minus_src_alpha', 'one', 'one'),
"blend_equation": 'func_add',
},
'translucent_no_depth': {
"depth_test": False,
"cull_face": False,
"blend": True,
"blend_func": ('src_alpha', 'one_minus_src_alpha', 'one', 'one'),
"blend_equation": 'func_add', # see vispy/vispy#2324
},
'additive': {
"depth_test": False,
"cull_face": False,
"blend": True,
"blend_func": ('src_alpha', 'dst_alpha', 'one', 'one'),
"blend_equation": 'func_add',
},
'minimum': {
"depth_test": False,
"cull_face": False,
"blend": True,
"blend_equation": 'min',
},
}
|
2,182 |
test machine types
|
#
# Test virtio-scsi and virtio-blk queue settings for all machine types
#
# Copyright (c) 2019 Virtuozzo International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import re
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu.machine import QEMUMachine
from avocado_qemu import QemuSystemTest
from avocado import skip
#list of machine types and virtqueue properties to test
VIRTIO_SCSI_PROPS = {'seg_max_adjust': 'seg_max_adjust'}
VIRTIO_BLK_PROPS = {'seg_max_adjust': 'seg-max-adjust'}
DEV_TYPES = {'virtio-scsi-pci': VIRTIO_SCSI_PROPS,
'virtio-blk-pci': VIRTIO_BLK_PROPS}
VM_DEV_PARAMS = {'virtio-scsi-pci': ['-device', 'virtio-scsi-pci,id=scsi0'],
'virtio-blk-pci': ['-device',
'virtio-blk-pci,id=scsi0,drive=drive0',
'-drive',
'driver=null-co,id=drive0,if=none']}
class VirtioMaxSegSettingsCheck(QemuSystemTest):
@staticmethod
def make_pattern(props):
pattern_items = ['{0} = \w+'.format(prop) for prop in props]
return '|'.join(pattern_items)
def query_virtqueue(self, vm, dev_type_name):
query_ok = False
error = None
props = None
output = vm.command('human-monitor-command',
command_line = 'info qtree')
props_list = DEV_TYPES[dev_type_name].values();
pattern = self.make_pattern(props_list)
res = re.findall(pattern, output)
if len(res) != len(props_list):
props_list = set(props_list)
res = set(res)
not_found = props_list.difference(res)
not_found = ', '.join(not_found)
error = '({0}): The following properties not found: {1}'\
.format(dev_type_name, not_found)
else:
query_ok = True
props = dict()
for prop in res:
p = prop.split(' = ')
props[p[0]] = p[1]
return query_ok, props, error
def check_mt(self, mt, dev_type_name):
mt['device'] = dev_type_name # Only for the debug() call.
logger = logging.getLogger('machine')
logger.debug(mt)
with QEMUMachine(self.qemu_bin) as vm:
vm.set_machine(mt["name"])
vm.add_args('-nodefaults')
for s in VM_DEV_PARAMS[dev_type_name]:
vm.add_args(s)
try:
vm.launch()
query_ok, props, error = self.query_virtqueue(vm, dev_type_name)
except:
query_ok = False
error = sys.exc_info()[0]
if not query_ok:
self.fail('machine type {0}: {1}'.format(mt['name'], error))
for prop_name, prop_val in props.items():
expected_val = mt[prop_name]
self.assertEqual(expected_val, prop_val)
@staticmethod
def seg_max_adjust_enabled(mt):
# machine types >= 5.0 should have seg_max_adjust = true
# others seg_max_adjust = false
mt = mt.split("-")
# machine types with one line name and name like pc-x.x
if len(mt) <= 2:
return False
# machine types like pc-<chip_name>-x.x[.x]
ver = mt[2]
ver = ver.split(".");
# versions >= 5.0 goes with seg_max_adjust enabled
major = int(ver[0])
if major >= 5:
return True
return False
@skip("break multi-arch CI")
def METHOD_NAME(self):
# collect all machine types except 'none', 'isapc', 'microvm'
with QEMUMachine(self.qemu_bin) as vm:
vm.launch()
machines = [m['name'] for m in vm.command('query-machines')]
vm.shutdown()
machines.remove('none')
machines.remove('isapc')
machines.remove('microvm')
for dev_type in DEV_TYPES:
# create the list of machine types and their parameters.
mtypes = list()
for m in machines:
if self.seg_max_adjust_enabled(m):
enabled = 'true'
else:
enabled = 'false'
mtypes.append({'name': m,
DEV_TYPES[dev_type]['seg_max_adjust']: enabled})
# test each machine type for a device type
for mt in mtypes:
self.check_mt(mt, dev_type)
|
2,183 |
test info
|
"""
:codeauthor: Pablo Suárez Hdez. <[email protected]>
Test cases for salt.modules.udev
"""
import pytest
import salt.modules.udev as udev
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {udev: {}}
# 'info' function tests: 1
def METHOD_NAME():
"""
Test if it returns the info of udev-created node in a dict
"""
cmd_out = {
"retcode": 0,
"stdout": (
"P: /devices/virtual/vc/vcsa7\n"
"N: vcsa7\n"
"E: DEVNAME=/dev/vcsa7\n"
"E: DEVPATH=/devices/virtual/vc/vcsa7\n"
"E: MAJOR=7\n"
"E: MINOR=135\n"
"E: SUBSYSTEM=vc\n"
"\n"
),
"stderr": "",
}
ret = {
"E": {
"DEVNAME": "/dev/vcsa7",
"DEVPATH": "/devices/virtual/vc/vcsa7",
"MAJOR": 7,
"MINOR": 135,
"SUBSYSTEM": "vc",
},
"N": "vcsa7",
"P": "/devices/virtual/vc/vcsa7",
}
mock = MagicMock(return_value=cmd_out)
with patch.dict(udev.__salt__, {"cmd.run_all": mock}):
data = udev.info("/dev/vcsa7")
assert ret["P"] == data["P"]
assert ret.get("N") == data.get("N")
for key, value in data["E"].items():
assert ret["E"][key] == value
# 'exportdb' function tests: 1
def test_exportdb():
"""
Test if it returns the all the udev database into a dict
"""
udev_data = """
P: /devices/LNXSYSTM:00/LNXPWRBN:00
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00
E: DRIVER=button
E: MODALIAS=acpi:LNXPWRBN:
E: SUBSYSTEM=acpi
P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: EV=3
E: ID_FOR_SEAT=input-acpi-LNXPWRBN_00
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: KEY=10000000000000 0
E: MODALIAS=input:b0019v0000p0001e0000-e0,1,k74,ramlsfw
E: NAME="Power Button"
E: PHYS="LNXPWRBN/button/input0"
E: PRODUCT=19/0/1/0
E: PROP=0
E: SUBSYSTEM=input
E: TAGS=:seat:
E: USEC_INITIALIZED=2010022
P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
N: input/event2
E: BACKSPACE=guess
E: DEVNAME=/dev/input/event2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: MAJOR=13
E: MINOR=66
E: SUBSYSTEM=input
E: TAGS=:power-switch:
E: USEC_INITIALIZED=2076101
E: XKBLAYOUT=us
E: XKBMODEL=pc105
"""
out = [
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00",
"E": {
"MODALIAS": "acpi:LNXPWRBN:",
"SUBSYSTEM": "acpi",
"DRIVER": "button",
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00",
},
},
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2",
"E": {
"SUBSYSTEM": "input",
"PRODUCT": "19/0/1/0",
"PHYS": '"LNXPWRBN/button/input0"',
"NAME": '"Power Button"',
"ID_INPUT": 1,
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2",
"MODALIAS": "input:b0019v0000p0001e0000-e0,1,k74,ramlsfw",
"ID_PATH_TAG": "acpi-LNXPWRBN_00",
"TAGS": ":seat:",
"PROP": 0,
"ID_FOR_SEAT": "input-acpi-LNXPWRBN_00",
"KEY": "10000000000000 0",
"USEC_INITIALIZED": 2010022,
"ID_PATH": "acpi-LNXPWRBN:00",
"EV": 3,
"ID_INPUT_KEY": 1,
},
},
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2",
"E": {
"SUBSYSTEM": "input",
"XKBLAYOUT": "us",
"MAJOR": 13,
"ID_INPUT": 1,
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2",
"ID_PATH_TAG": "acpi-LNXPWRBN_00",
"DEVNAME": "/dev/input/event2",
"TAGS": ":power-switch:",
"BACKSPACE": "guess",
"MINOR": 66,
"USEC_INITIALIZED": 2076101,
"ID_PATH": "acpi-LNXPWRBN:00",
"XKBMODEL": "pc105",
"ID_INPUT_KEY": 1,
},
"N": "input/event2",
},
]
mock = MagicMock(return_value={"retcode": 0, "stdout": udev_data})
with patch.dict(udev.__salt__, {"cmd.run_all": mock}):
data = udev.exportdb()
assert data == [x for x in data if x]
for d_idx, d_section in enumerate(data):
assert out[d_idx]["P"] == d_section["P"]
assert out[d_idx].get("N") == d_section.get("N")
for key, value in d_section["E"].items():
assert out[d_idx]["E"][key] == value
def test_normalize_info():
"""
Test if udevdb._normalize_info does not returns nested lists that contains only one item.
:return:
"""
data = {"key": ["value", "here"], "foo": ["bar"], "some": "data"}
assert udev._normalize_info(data) == {
"foo": "bar",
"some": "data",
"key": ["value", "here"],
}
|
2,184 |
safe popup noclose
|
import os
from PyQt5 import uic
from PyQt5.QtCore import QObject, pyqtSignal, Qt
from PyQt5.QtWidgets import QDialog, QMessageBox
from os.path import split as split_path
import platform
from enum import IntEnum, auto
class ProgressDialog(QDialog):
cancel = pyqtSignal()
trigger_popup = pyqtSignal(str, str, object, tuple, bool)
trigger_update = pyqtSignal(int)
def __init__(self, steps, parent=None):
super(ProgressDialog, self).__init__(parent)
form, _ = uic.loadUiType(split_path(__file__)[0] + "/progressDialog.ui")
self.ui = form()
self.ui.setupUi(self)
self.steps = steps
self.current = 0
self._add_pending()
self.ui.cancel.clicked.connect(self.cancel)
self.trigger_popup.connect(self.popup)
self.trigger_update.connect(self.update_step)
def __call__(self, progress):
self.safe_update_step(progress)
def set_busy(self, busy):
self.ui.progress.setMaximum(0 if busy else 100)
def safe_update_step(self, progress):
self.trigger_update.emit(progress)
def update_step(self, progress):
self.set_busy(False)
if self.is_finished:
return
if progress == 100:
self._finish_step()
else:
self.ui.progress.setValue(progress)
@property
def is_finished(self):
return len(self.steps) == self.current
def _add_pending(self):
self.ui.info.setText("Step {} of {}: {}".format(self.current + 1, len(self.steps), self.steps[self.current]))
def _finish_step(self):
if self.is_finished:
return
self.current += 1
if self.is_finished:
self.ui.progress.setValue(100)
else:
self.ui.progress.setValue(0)
self._add_pending()
def popup(self, level, title, description, args, close):
assert level in ("information", "warning", "critical")
if args is not None:
description = [description]
for arg in args:
if isinstance(arg, Exception):
description.append(str(arg))
description = "\n".join(description)
getattr(QMessageBox, str(level))(self, title, description)
if close:
self.close()
def safe_popup(self, level, title, description, *args):
self.trigger_popup.emit(level, title, description, args, True)
def METHOD_NAME(self, level, title, description, *args):
self.trigger_popup.emit(level, title, description, args, False)
class BarId(IntEnum):
bar0 = auto()
bar1 = auto()
class PercentProgressDialog(QDialog):
class _ProgressEmitter(QObject):
progress0 = pyqtSignal(int)
progress1 = pyqtSignal(int)
def __call__(self, val, bar: BarId = BarId.bar0):
if bar == BarId.bar0:
self.progress0.emit(val)
elif bar == BarId.bar1:
self.progress1.emit(val)
def __init__(self, parent=None, *, title=None, secondary_bar=False):
super().__init__(parent)
localDir = os.path.split(__file__)[0]
form, _ = uic.loadUiType(os.path.join(localDir, "percentProgressDialog.ui"))
self._ui = form()
self._ui.setupUi(self)
self._ui.cancel.clicked.connect(self.reject)
self._emitter = self._ProgressEmitter(parent=self)
self._emitter.progress0.connect(self._ui.progress0.setValue)
self._emitter.progress1.connect(self._ui.progress1.setValue)
if title:
self.setWindowTitle(title)
self._ui.progress0.setFormat(f"{title}: %p%")
# did not manage to show a titlebar on OSX, or progress text on the progress bar
# added additional label to UI to handle information display on OSX
if platform.system() == "Darwin":
self._ui.osxLabel0.setText(title)
self._ui.osxLabel0.setVisible(True)
self._emitter.progress0.connect(lambda val: self._ui.osxLabel0.setText(f"{title} {val}%"))
if secondary_bar:
self.getBar(BarId.bar1).setVisible(True)
if platform.system() == "Darwin":
self._ui.osxLabel1.setVisible(True)
def updateProgress(self, progress: int, bar: BarId = BarId.bar0):
# Using emitter to avoid updating UI from non-main thread
self._emitter(progress, bar)
def setBusy(self, bar: BarId = BarId.bar0):
self.getBar(bar).setMaximum(0)
def getBar(self, bar: BarId):
if bar == BarId.bar0:
return self._ui.progress0
elif bar == BarId.bar1:
return self._ui.progress1
def updateBarFormat(self, title: str, bar: BarId = BarId.bar0):
self.getBar(bar).setFormat(f"{title}: %p%")
if platform.system() == "Darwin":
if bar == BarId.bar0:
self._emitter.progress0.disconnect()
self._emitter.progress0.connect(lambda val: self._ui.osxLabel0.setText(f"{title} {val}%"))
self._emitter.progress0.connect(self._ui.progress0.setValue)
if bar == BarId.bar1:
self._emitter.progress1.disconnect()
self._emitter.progress1.connect(lambda val: self._ui.osxLabel1.setText(f"{title} {val}%"))
self._emitter.progress1.connect(self._ui.progress1.setValue)
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication
from time import sleep
app = QApplication([])
p = ProgressDialog(["abc", "def", "ghi"])
p.show()
for j in range(3):
for i in range(11):
p.safe_update_step(i * 10)
sleep(0.01)
p.safe_popup("information", "lol", "rofl")
app.exec_()
|
2,185 |
main
|
import pandas as pd
import sys
import time
import numpy as np
import scipy as sp
import scipy.optimize as opt
import scipy.signal as signal
import os, json
import pygama.dataset as ds
import pygama.analysis.histograms as pgh
import pygama.dsp.transforms as pgt
import pygama.utils as pgu
import pygama.analysis.peak_fitting as pga
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
plt.style.use('style.mplstyle')
def METHOD_NAME():
#ttrap_max_vs_energy()
#rise()
#rise_diff()
AoverE_vs_E()
#test()
def ttrap_max_vs_energy():
if(len(sys.argv) != 2):
print('Usage: 2d_plots.py [run number]')
sys.exit()
start = time.time()
with open("runDB.json") as f:
runDB = json.load(f)
tier_dir = os.path.expandvars(runDB["tier_dir"])
meta_dir = os.path.expandvars(runDB["meta_dir"])
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
plt.hist2d(df['e_cal'], df['ttrap_max'], np.arange(0,100,1), norm=LogNorm())
plt.xlim(0,50)
plt.ylim(0,100)
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('ttrap_max', ha='right', y=1.0)
plt.title('Kr83m Data')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
def rise():
if(len(sys.argv) != 2):
print('Usage: 2d_plots.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
tier_dir = os.path.expandvars(runDB["tier_dir"])
meta_dir = os.path.expandvars(runDB["meta_dir"])
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
df['rise'] = df['tp50'] - df['t0']
plt.hist2d(df['e_cal'], df['rise'], np.arange(-1000,1500,0.5), norm=LogNorm())
plt.xlim(0,50)
plt.ylim(-1000,1500)
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('tp50 - t0', ha='right', y=1.0)
plt.title('Kr83m Data')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
def rise_diff():
if(len(sys.argv) != 3):
print('Usage: 2d_plots.py [run number 1] [run number 2]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
tier_dir = os.path.expandvars(runDB["tier_dir"])
meta_dir = os.path.expandvars(runDB["meta_dir"])
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
df['rise'] = df['tp100'] - df['t0']
df_2 = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[2]))
df_2['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[2]))['e_cal']
df_2['rise'] = df_2['tp100'] - df_2['t0']
xlo, xhi, xpb = 0, 50, 2
ylo, yhi, ypb = 0, 1400, 10
nxbins = int((xhi-xlo)/xpb)
nybins = int((yhi-ylo)/ypb)
hist1, xbins, ybins = np.histogram2d(df['e_cal'], df['rise'], [nxbins,nybins], [[xlo,xhi], [ylo,yhi]])
hist2, xbins, ybins = np.histogram2d(df_2['e_cal'], df_2['rise'], [nxbins,nybins], [[xlo,xhi], [ylo,yhi]])
#hist_diff = hist2 - hist1
hist2 = hist2.T
xbins = xbins[0:(len(xbins)-1)]
ybins = ybins[0:(len(ybins)-1)]
plt.hist2d(hist2[0],hist2[1], [xbins, ybins])
plt.xlim(xlo, xhi)
plt.ylim(ylo, yhi)
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('tp100 - t0', ha='right', y=1.0)
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
def AoverE_vs_E():
if(len(sys.argv) != 3):
print('Usage: 2d_plots.py [run number 1] [run number 2]')
sys.exit()
start = time.time()
with open("runDB.json") as f:
runDB = json.load(f)
tier_dir = os.path.expandvars(runDB["tier_dir"])
meta_dir = os.path.expandvars(runDB["meta_dir"])
# make 2D plot
def plot_2D_hist():
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
plt.hist2d(df['e_cal'], (df['current_max']/df['e_cal']), bins=[100,200], range=[[0, 50], [0, .1]], normed=True, cmap='jet')
plt.xlim(5,50)
plt.xlabel('E (keV)', ha='right', x=1.0)
plt.ylabel('A/E', ha='right', y=1.0)
plt.title("Run {}".format(sys.argv[1]))
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts (normalized)')
plt.tight_layout()
plt.show()
# make 1D hist
def plot_1D_hist(a,b):
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
df = df.loc[(df.e_cal>=float(a))&(df.e_cal<=float(b))]
df_2 = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[2]))
df_2['e_cal'] = pd.read_hdf('{}/Spectrum_{}.hdf5'.format(meta_dir,sys.argv[2]))['e_cal']
df_2 = df_2.loc[(df_2.e_cal>=float(a))&(df_2.e_cal<=float(b))]
plt.hist(df['current_max']/df['e_cal'], np.arange(0,.2,.0010), histtype='step', density=True, label='run {}, {} < E < {} keV'.format(sys.argv[1],a,b))
plt.hist(df_2['current_max']/df_2['e_cal'], np.arange(0,.2,.0010), histtype='step', density=True, label='run {}, {} < E < {} keV'.format(sys.argv[2],a,b))
plt.xlabel('A/E', ha='right', x=1.0)
plt.ylabel('Counts (normalized)', ha='right', y=1.0)
plt.legend(frameon=True, loc='best', fontsize='small')
plt.show()
#plot_2D_hist()
plot_1D_hist(a=25,b=30)
def test():
if(len(sys.argv) != 3):
print('Usage: 2d_plots.py [run number 1] [run number 2]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
tier_dir = os.path.expandvars(runDB["tier_dir"])
meta_dir = os.path.expandvars(runDB["meta_dir"])
df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1]))
df['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[1]))['e_cal']
df['rise'] = df['tp100'] - df['tp50']
df_2 = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[2]))
df_2['e_cal'] = pd.read_hdf('{}/Spectrum_{}_2.hdf5'.format(meta_dir,sys.argv[2]))['e_cal']
df_2['rise'] = df_2['tp100'] - df_2['tp50']
x = df_2['e_cal']
y = df_2['rise']
x2 = df['e_cal']
y2 = df['rise']
f = plt.figure(figsize=(5,5))
p1 = f.add_subplot(111, title='Kr83m - Background', xlabel='Energy (keV)', ylabel='tp100-tp50')
h1,xedg1,yedg1 = np.histogram2d(x, y, bins=[25,25], range=[[0,50],[0,1400]])
h2,xedg1,yedg1 = np.histogram2d(x2, y2, bins=[25,25], range=[[0,50],[0,1400]])
h1 = h1.T
h2 = h2.T
h3 = h1 - h2
#hMin, hMax = np.amin(h1), np.amax(h1)
# # im1 = p1.imshow(h1,cmap='jet',vmin=hMin,vmax=hMax, aspect='auto') #norm=LogNorm())
im1 = p1.imshow(h3,cmap='jet', origin='lower', aspect='auto', extent=[xedg1[0], xedg1[-1], yedg1[0], yedg1[-1]])
cb1 = f.colorbar(im1, ax=p1, fraction=0.037, pad=0.04)
#plt.hist2d(hist2[0],hist2[1], [xbins, ybins])
#plt.xlim(xlo, xhi)
#plt.ylim(ylo, yhi)
#plt.xlabel('Energy (keV)', ha='right', x=1.0)
#plt.ylabel('tp100 - t0', ha='right', y=1.0)
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
METHOD_NAME()
|
2,186 |
on tag deleted
|
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
""" Tag completion which is connected to LibLarch """
from gi.repository import Gtk
import unicodedata
FILTER_NAME = '@@TagCompletion'
def tag_filter(tag, parameters=None):
""" Show only regular tags which has some active tasks or the user has
changed an attribute (e.g. color, workview) => only important tags """
has_attributes = len(tag.get_all_attributes(butname=True)) > 0
return has_attributes or tag.get_active_tasks_count() > 0
def normalize_unicode(string):
""" Unicode characters with diacritic could have more than just one
representation. We force them to be in just one of them."""
return unicodedata.normalize('NFC', str(string))
def tag_match(completion, key, iterator, column):
""" Does key match an item in the list?
Don't match any item if only artefacts (!, @) are inserted
(don't show all tags) """
key = key.lower().lstrip()
if key in ['', '!', '@', '!@']:
return False
text = completion.get_model().get_value(iterator, column)
text = normalize_unicode(text.lower())
return text.startswith(key)
class TagCompletion(Gtk.EntryCompletion):
""" Tag completion which allows to enter 4 representation of a '@tag':
['@tag', '!@tag', 'tag', '!tag']
The user can choose wheter write tag with or without '@',
with or without '!' which is used for negation.
The list of tasks is updated by LibLarch callbacks """
def __init__(self, tree):
""" Initialize entry completion
Create a list store which is connected to a LibLarch and
kept updated. """
super().__init__()
self.tags = Gtk.ListStore(str)
tree = tree.get_basetree()
tree.add_filter(FILTER_NAME, tag_filter, {'flat': True})
tag_tree = tree.get_viewtree('tag_completion', False)
tag_tree.register_cllbck('node-added-inview', self._on_tag_added)
tag_tree.register_cllbck('node-deleted-inview', self.METHOD_NAME)
tag_tree.apply_filter(FILTER_NAME)
self.set_model(self.tags)
self.set_text_column(0)
self.set_match_func(tag_match, 0)
self.set_inline_completion(True)
self.set_inline_selection(True)
self.set_popup_single_match(False)
def _try_insert(self, name):
""" Insert an item into ListStore if it is not already there.
It keeps the list sorted. """
position = 0
for position, row in enumerate(self.tags, 1):
if row[0] == name:
# already there
return
elif row[0] > name:
position -= 1
break
self.tags.insert(position, (name, ))
def _on_tag_added(self, tag, path):
""" Add all variants of tag """
tag = normalize_unicode(tag)
self._try_insert(tag)
self._try_insert('!' + tag)
self._try_insert(tag[1:])
self._try_insert('!' + tag[1:])
def _try_delete(self, name):
""" Delete an item if it is in the list """
for row in self.tags:
if row[0] == name:
self.tags.remove(row.iter)
break
def METHOD_NAME(self, tag, path):
""" Delete all variants of tag """
tag = normalize_unicode(tag)
self._try_delete(tag)
self._try_delete('!' + tag)
self._try_delete(tag[1:])
self._try_delete('!' + tag[1:])
|
2,187 |
remove weight norm
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
import math
from typing import Optional
import paddle
from paddle import nn
from paddlespeech.t2s.models.vits.wavenet.residual_block import ResidualBlock
class WaveNet(nn.Layer):
"""WaveNet with global conditioning."""
def __init__(
self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=30,
stacks: int=3,
base_dilation: int=2,
residual_channels: int=64,
aux_channels: int=-1,
gate_channels: int=128,
skip_channels: int=64,
global_channels: int=-1,
dropout_rate: float=0.0,
bias: bool=True,
use_weight_norm: bool=True,
use_first_conv: bool=False,
use_last_conv: bool=False,
scale_residual: bool=False,
scale_skip_connect: bool=False, ):
"""Initialize WaveNet module.
Args:
in_channels (int):
Number of input channels.
out_channels (int):
Number of output channels.
kernel_size (int):
Kernel size of dilated convolution.
layers (int):
Number of residual block layers.
stacks (int):
Number of stacks i.e., dilation cycles.
base_dilation (int):
Base dilation factor.
residual_channels (int):
Number of channels in residual conv.
gate_channels (int):
Number of channels in gated conv.
skip_channels (int):
Number of channels in skip conv.
aux_channels (int):
Number of channels for local conditioning feature.
global_channels (int):
Number of channels for global conditioning feature.
dropout_rate (float):
Dropout rate. 0.0 means no dropout applied.
bias (bool):
Whether to use bias parameter in conv layer.
use_weight_norm (bool):
Whether to use weight norm. If set to true, it will be applied to all of the conv layers.
use_first_conv (bool):
Whether to use the first conv layers.
use_last_conv (bool):
Whether to use the last conv layers.
scale_residual (bool):
Whether to scale the residual outputs.
scale_skip_connect (bool):
Whether to scale the skip connection outputs.
"""
super().__init__()
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
self.base_dilation = base_dilation
self.use_first_conv = use_first_conv
self.use_last_conv = use_last_conv
self.scale_skip_connect = scale_skip_connect
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
if self.use_first_conv:
self.first_conv = nn.Conv1D(
in_channels, residual_channels, kernel_size=1, bias_attr=True)
# define residual blocks
self.conv_layers = nn.LayerList()
for layer in range(layers):
dilation = base_dilation**(layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
global_channels=global_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bias=bias,
scale_residual=scale_residual, )
self.conv_layers.append(conv)
# define output layers
if self.use_last_conv:
self.last_conv = nn.Sequential(
nn.ReLU(),
nn.Conv1D(
skip_channels, skip_channels, kernel_size=1,
bias_attr=True),
nn.ReLU(),
nn.Conv1D(
skip_channels, out_channels, kernel_size=1, bias_attr=True),
)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(
self,
x: paddle.Tensor,
x_mask: Optional[paddle.Tensor]=None,
c: Optional[paddle.Tensor]=None,
g: Optional[paddle.Tensor]=None, ) -> paddle.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor):
Input noise signal (B, 1, T) if use_first_conv else (B, residual_channels, T).
x_mask (Optional[Tensor]):
Mask tensor (B, 1, T).
c (Optional[Tensor]):
Local conditioning features (B, aux_channels, T).
g (Optional[Tensor]):
Global conditioning features (B, global_channels, 1).
Returns:
Tensor:
Output tensor (B, out_channels, T) if use_last_conv else(B, residual_channels, T).
"""
# encode to hidden representation
if self.use_first_conv:
x = self.first_conv(x)
# residual block
skips = 0.0
for f in self.conv_layers:
x, h = f(x, x_mask=x_mask, c=c, g=g)
skips = skips + h
x = skips
if self.scale_skip_connect:
x = x * math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
if self.use_last_conv:
x = self.last_conv(x)
return x
def apply_weight_norm(self):
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
def METHOD_NAME(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(METHOD_NAME)
|
2,188 |
predict measurement
|
from functools import lru_cache
import numpy as np
from ..base import Property
from ..types.prediction import GaussianMeasurementPrediction
from ..types.update import Update
from ..models.measurement.linear import LinearGaussian
from ..updater.kalman import KalmanUpdater
class InformationKalmanUpdater(KalmanUpdater):
r"""A class which implements the update of information form of the Kalman filter. This is
conceptually very simple. The update proceeds as:
.. math::
Y_{k|k} = Y_{k|k-1} + H^{T}_k R^{-1}_k H_k
\mathbf{y}_{k|k} = \mathbf{y}_{k|k-1} + H^{T}_k R^{-1}_k \mathbf{z}_{k}
where :math:`\mathbf{y}_{k|k-1}` is the predicted information state and :math:`Y_{k|k-1}` the
predicted information matrix which form the :class:`~.InformationStatePrediction` object. The
measurement matrix :math:`H_k` and measurement covariance :math:`R_k` are those in the Kalman
filter (see tutorial 1). An :class:`~.InformationStateUpdate` object is returned.
Note
----
Analogously with the :class:`~.InformationKalmanPredictor`, the measurement model is queried
for the existence of an :meth:`inverse_covar()` property. If absent, the :meth:`covar()` is
inverted.
"""
measurement_model: LinearGaussian = Property(
default=None,
doc="A linear Gaussian measurement model. This need not be defined if "
"a measurement model is provided in the measurement. If no model "
"specified on construction, or in the measurement, then error "
"will be thrown.")
def _inverse_measurement_covar(self, measurement_model, **kwargs):
"""Return the inverse of the measurement covariance (or calculate it)
Parameters
----------
measurement_model
The measurement model to be queried
**kwargs : various, optional
These are passed to :meth:`~.LinearGaussian.covar()`
Returns
-------
: :class:`numpy.ndarray`
The inverse of the measurement covariance, :math:`R_k^{-1}`
"""
if hasattr(measurement_model, 'inverse_covar'):
inv_measurement_covar = measurement_model.inverse_covar(**kwargs)
else:
inv_measurement_covar = np.linalg.inv(measurement_model.covar(**kwargs))
return inv_measurement_covar
@lru_cache()
def METHOD_NAME(self, predicted_state, measurement_model=None, **kwargs):
r"""There's no direct analogue of a predicted measurement in the information form. This
method is therefore provided to return the predicted measurement as would the standard
Kalman updater. This is mainly for compatibility as it's not anticipated that it would
be used in the usual operation of the information filter.
Parameters
----------
predicted_information_state : :class:`~.State`
The predicted state in information form :math:`\mathbf{y}_{k|k-1}`
measurement_model : :class:`~.MeasurementModel`
The measurement model. If omitted, the model in the updater object
is used
**kwargs : various
These are passed to :meth:`~.MeasurementModel.matrix()`
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction, :math:`H \mathbf{x}_{k|k-1}`
"""
# If a measurement model is not specified then use the one that's
# native to the updater
measurement_model = self._check_measurement_model(measurement_model)
hh = self._measurement_matrix(predicted_state=predicted_state,
measurement_model=measurement_model,
**kwargs)
predicted_covariance = np.linalg.inv(predicted_state.precision)
predicted_state_mean = predicted_covariance @ predicted_state.state_vector
predicted_measurement = hh @ predicted_state_mean
innovation_covariance = hh @ predicted_covariance @ hh.T + measurement_model.covar()
return GaussianMeasurementPrediction(predicted_measurement, innovation_covariance,
predicted_state.timestamp,
cross_covar=predicted_covariance @ hh.T)
def update(self, hypothesis, **kwargs):
r"""The Information filter update (corrector) method. Given a hypothesised association
between a predicted information state and an actual measurement, calculate the posterior
information state.
Parameters
----------
hypothesis : :class:`~.SingleHypothesis`
the prediction-measurement association hypothesis. This hypothesis
carries a predicted information state.
**kwargs : various
These are passed to :meth:`predict_measurement`
Returns
-------
: :class:`~.InformationStateUpdate`
The posterior information state with information state :math:`\mathbf{y}_{k|k}` and
precision :math:`Y_{k|k}`
"""
measurement_model = hypothesis.measurement.measurement_model
measurement_model = self._check_measurement_model(measurement_model)
pred_info_mean = hypothesis.prediction.state_vector
hh = measurement_model.matrix()
invr = self._inverse_measurement_covar(measurement_model)
posterior_precision = hypothesis.prediction.precision + hh.T @ invr @ hh
posterior_information_mean = pred_info_mean + hh.T @ invr @ \
hypothesis.measurement.state_vector
if self.force_symmetric_covariance:
posterior_precision = (posterior_precision + posterior_precision.T)/2
return Update.from_state(hypothesis.prediction, posterior_information_mean,
posterior_precision,
timestamp=hypothesis.measurement.timestamp, hypothesis=hypothesis)
|
2,189 |
movie start recording
|
# Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .wb import wb
from .node import Node
from .robot import Robot
import ctypes
import typing
class Supervisor(Robot):
def __init__(self):
super().__init__()
def getRoot(self) -> Node:
return Node()
def getSelf(self) -> Node:
return Node(tag=0)
def getFromDef(self, d: str) -> Node:
node = Node(DEF=d)
return node if node._ref else None
def getFromId(self, id) -> Node:
node = Node(id=id)
return node if node._ref else None
def getFromDevice(self, tag) -> Node:
node = Node(tag=tag)
return node if node._ref else None
def getSelected(self) -> Node:
node = Node(selected=True)
return node if node._ref else None
def setLabel(self, id, label, x, y, size, color, transparency=0, font='Arial'):
wb.wb_supervisor_set_label(id, str.encode(label), ctypes.c_double(x), ctypes.c_double(y), ctypes.c_double(size),
color, ctypes.c_double(transparency), str.encode(font))
def simulationQuit(self, status: int):
wb.wb_supervisor_simulation_quit(status)
def simulationSetMode(self, mode: int):
self.simulation_mode = mode
def simulationGetMode(self) -> int:
return self.simulation_mode
def simulationReset(self):
wb.wb_supervisor_simulation_reset()
def simulationResetPhysics(self):
wb.wb_supervisor_simulation_reset_physics()
def worldLoad(self, filename: str):
wb.wb_supervisor_world_load(str.encode(filename))
def worldSave(self, filename: str = None) -> int:
if not filename:
return wb.wb_supervisor_world_save(None)
return wb.wb_supervisor_world_save(str.encode(filename))
def worldReload(self):
wb.wb_supervisor_world_reload()
def exportImage(self, filename: str, quality: int):
wb.wb_supervisor_export_image(str.encode(filename), quality)
def METHOD_NAME(self, filename: str, width: int, height: int, codec: int, quality: int, acceleration: int,
caption: bool):
wb.wb_supervisor_movie_start_recording(str.encode(filename), width, height, codec, quality, acceleration,
1 if caption else 0)
def movieStopRecording(self):
wb.wb_supervisor_movie_stop_recording()
def movieIsReady(self):
return wb.wb_supervisor_movie_is_ready() != 0
def movieFailed(self):
return wb.wb_supervisor_movie_failed() != 0
def animationStartRecording(self, filename: str):
return wb.wb_supervisor_animation_start_recording(str.encode(filename))
def animationStopRecording(self):
return wb.wb_supervisor_animation_stop_recording()
def virtualRealityHeadsetIsUsed(self):
return wb.wb_supervisor_virtual_reality_headset_is_used() != 0
def virtualRealityHeadsetGetPosition(self) -> typing.List[float]:
return wb.wb_supervisor_virtual_reality_headset_get_position()
def virtualRealityHeadsetGetOrientation(self) -> typing.List[float]:
return wb.wb_supervisor_virtual_reality_headset_get_orientation()
@property
def simulation_mode(self) -> int:
return wb.wb_supervisor_simulation_get_mode()
@simulation_mode.setter
def simulation_mode(self, mode: int):
return wb.wb_supervisor_simulation_set_mode(mode)
Supervisor.SIMULATION_MODE_PAUSE = 0
Supervisor.SIMULATION_MODE_REAL_TIME = 1
Supervisor.SIMULATION_MODE_FAST = 2
|
2,190 |
bug708901
|
# Minimal tests for dis module
from __future__ import print_function
import sys
import unittest
import six
import xdis.std as dis
from xdis import IS_PYPY, PYTHON3, PYTHON_VERSION_TRIPLE
if PYTHON3:
from io import StringIO
else:
from test.test_support import run_unittest
from StringIO import StringIO
# Turn off black formatting so we
# can use custom line numbering
# that we make use of in testing
# fmt: off
def METHOD_NAME():
for _ in range(1,
10):
pass
def bug1333982(x=[]):
assert 0, ([s for s in x] +
1)
pass
# fmt: on
if PYTHON_VERSION_TRIPLE[0:2] == (2, 7):
def _f(a):
print(a)
return 1
dis_f = """\
%3d: 0 LOAD_GLOBAL 0 (print)
3 LOAD_FAST 0 (a)
6 CALL_FUNCTION 1 (1 positional, 0 named)
9 POP_TOP
%3d: 10 LOAD_CONST 1 (1)
13 RETURN_VALUE
""" % (
_f.func_code.co_firstlineno + 1,
_f.func_code.co_firstlineno + 2,
)
dis_bug708901 = """\
%3d: 0 SETUP_LOOP 23 (to 26)
3 LOAD_GLOBAL 0 (range)
6 LOAD_CONST 1 (1)
%3d: 9 LOAD_CONST 2 (10)
12 CALL_FUNCTION 2 (2 positional, 0 named)
15 GET_ITER
>> 16 FOR_ITER 6 (to 25)
19 STORE_FAST 0 (_)
%3d: 22 JUMP_ABSOLUTE 16 (to 16)
>> 25 POP_BLOCK
>> 26 LOAD_CONST 0 (None)
29 RETURN_VALUE
""" % (
METHOD_NAME.func_code.co_firstlineno + 1,
METHOD_NAME.func_code.co_firstlineno + 2,
METHOD_NAME.func_code.co_firstlineno + 3,
)
dis_bug708901pypy = """\
%3d: 0 SETUP_LOOP 23 (to 26)
3 LOAD_GLOBAL 0 (range)
6 LOAD_CONST 1 (1)
%3d: 9 LOAD_CONST 2 (10)
12 CALL_FUNCTION 2 (2 positional, 0 named)
15 GET_ITER
>> 16 FOR_ITER 6 (to 25)
19 STORE_FAST 0 (_)
%3d: 22 JUMP_ABSOLUTE 16 (to 16)
>> 25 POP_BLOCK
>> 26 LOAD_CONST 0 (None)
29 RETURN_VALUE
""" % (
METHOD_NAME.func_code.co_firstlineno + 1,
METHOD_NAME.func_code.co_firstlineno + 2,
METHOD_NAME.func_code.co_firstlineno + 3,
)
dis_bug1333982 = """\
%3d: 0 LOAD_CONST 1 (0)
3 POP_JUMP_IF_TRUE 41 (to 41)
6 LOAD_GLOBAL 0 (AssertionError)
9 BUILD_LIST 0
12 LOAD_FAST 0 (x)
15 GET_ITER
>> 16 FOR_ITER 12 (to 31)
19 STORE_FAST 1 (s)
22 LOAD_FAST 1 (s)
25 LIST_APPEND 2
28 JUMP_ABSOLUTE 16 (to 16)
%3d: >> 31 LOAD_CONST 2 (1)
34 BINARY_ADD
35 CALL_FUNCTION 1 (1 positional, 0 named)
38 RAISE_VARARGS 1
%3d: >> 41 LOAD_CONST 0 (None)
44 RETURN_VALUE
""" % (
bug1333982.func_code.co_firstlineno + 1,
bug1333982.func_code.co_firstlineno + 2,
bug1333982.func_code.co_firstlineno + 3,
)
_BIG_LINENO_FORMAT = """\
%3d: 0 LOAD_GLOBAL 0 (spam)
3 POP_TOP
4 LOAD_CONST 0 (None)
7 RETURN_VALUE
"""
class DisTests(unittest.TestCase):
def do_disassembly_test(self, func, expected):
s = StringIO()
save_stdout = sys.stdout
sys.stdout = s
dis.dis(func)
sys.stdout = save_stdout
got = s.getvalue()
# Trim trailing blanks (if any).
lines = got.split("\n")
# lines = [line.rstrip() for line in lines]
expected = expected.split("\n")
import difflib
if expected != lines:
self.fail(
"events did not match expectation:\n"
+ "\n".join(difflib.ndiff(expected, lines))
)
def test_opmap(self):
self.assertEqual(dis.opmap["STOP_CODE"], 0)
self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst)
self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
def test_opname(self):
opname = dis.opname
opmap = dis.opmap
self.assertEqual(opname[opmap["LOAD_FAST"]], "LOAD_FAST")
def test_boundaries(self):
opmap = dis.opmap
self.assertEqual(opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
self.assertEqual(opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
def test_dis(self):
self.do_disassembly_test(_f, dis_f)
def test_bug_708901(self):
if IS_PYPY:
self.do_disassembly_test(METHOD_NAME, dis_bug708901pypy)
else:
self.do_disassembly_test(METHOD_NAME, dis_bug708901)
def test_bug_1333982(self):
# This one is checking bytecodes generated for an `assert` statement,
# so fails if the tests are run with -O. Skip this test then.
if False:
self.do_disassembly_test(bug1333982, dis_bug1333982)
else:
self.skipTest("need asserts, run without -O")
def test_big_linenos(self):
def func(count):
namespace = {}
func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
exec_fn = six.__dict__["exec_"]
exec_fn(func, namespace)
return namespace["foo"]
# Test all small ranges
for i in range(1, 300):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
# Test some larger ranges too
for i in range(300, 5000, 10):
expected = _BIG_LINENO_FORMAT % (i + 2)
self.do_disassembly_test(func(i), expected)
if PYTHON_VERSION_TRIPLE >= (3, 0):
# Write a test for showing Python 2.x Long types in Python 3
pass
def test_main():
run_unittest(DisTests)
if __name__ == "__main__":
test_main()
|
2,191 |
display details
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
from ... import objects
from ...completion import candidates
from .. import CmdError
from ..base import torrent as base
from . import _mixin as mixin
from ._table import TERMSIZE, print_table
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
class AddTorrentsCmd(base.AddTorrentsCmdbase,
mixin.make_request):
provides = {'cli'}
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
# Use current working directory as base
return candidates.fs_path(args.curarg.before_cursor,
base='.',
glob=r'*.torrent')
class TorrentDetailsCmd(base.TorrentDetailsCmdbase,
mixin.make_request, mixin.select_torrents):
provides = {'cli'}
async def METHOD_NAME(self, torrent_id):
from ...views.details import SECTIONS
needed_keys = set(('name',))
for _section in SECTIONS:
for _item in _section['items']:
needed_keys.update(_item.needed_keys)
response = await self.make_request(
objects.srvapi.torrent.torrents((torrent_id,), keys=needed_keys),
quiet=True)
if not response.torrents:
raise CmdError()
else:
torrent = response.torrents[0]
if TERMSIZE.columns is None:
self._machine_readable(torrent)
else:
self._human_readable(torrent)
def _human_readable(self, torrent):
from ...views.details import SECTIONS
label_width = max(len(item.label)
for section in SECTIONS
for item in section['items'])
for i,section in enumerate(SECTIONS):
if i != 0:
print() # Newline between sections
print('\033[1m' + section['title'].upper() + '\033[0m')
for item in section['items']:
print(' %s: %s' % (item.label.rjust(label_width), item.human_readable(torrent)))
def _machine_readable(self, torrent):
from ...views.details import SECTIONS
for section in SECTIONS:
for item in section['items']:
print('%s\t%s' % (item.label.lower(), item.machine_readable(torrent)))
class ListTorrentsCmd(base.ListTorrentsCmdbase,
mixin.make_request, mixin.select_torrents,
mixin.only_supported_columns):
provides = {'cli'}
async def make_torrent_list(self, tfilter, sort, columns):
from ...views.torrent import COLUMNS as TORRENT_COLUMNS
# Remove columns that aren't supported by CLI interface (e.g. 'marked')
columns = self.only_supported_columns(columns, TORRENT_COLUMNS)
# Get needed keys
if tfilter is None:
keys = set(sort.needed_keys)
else:
keys = set(sort.needed_keys + tfilter.needed_keys)
# Get wanted torrents and sort them
for colname in columns:
keys.update(TORRENT_COLUMNS[colname].needed_keys)
response = await self.make_request(
objects.srvapi.torrent.torrents(tfilter, keys=keys),
quiet=True)
torrents = sort.apply(response.torrents)
# Show table of found torrents
if torrents:
print_table(torrents, columns, TORRENT_COLUMNS)
else:
raise CmdError()
class TorrentMagnetURICmd(base.TorrentMagnetURICmdbase,
mixin.select_torrents):
provides = {'cli'}
def display_uris(self, uris):
for uri in uris:
print(uri)
class MoveTorrentsCmd(base.MoveTorrentsCmdbase,
mixin.make_request, mixin.select_torrents):
provides = {'cli'}
@classmethod
async def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
if args.curarg_index == 1:
log.debug('Getting torrent filter candidates from %r', candidates.torrent_filter)
return await candidates.torrent_filter(args.curarg)
elif args.curarg_index == 2:
return candidates.fs_path(args.curarg.before_cursor,
base=objects.cfg['srv.path.complete'],
directories_only=True)
class RemoveTorrentsCmd(base.RemoveTorrentsCmdbase,
mixin.make_request, mixin.select_torrents, mixin.ask_yes_no):
provides = {'cli'}
async def show_list_of_hits(self, tfilter):
import sys
if sys.stdout.isatty():
cmd = 'ls --sort name %s' % tfilter
await objects.cmdmgr.run_async(cmd)
def remove_list_of_hits(self):
pass
class RenameCmd(base.RenameCmdbase,
mixin.make_request, mixin.select_torrents, mixin.select_files):
provides = {'cli'}
class StartTorrentsCmd(base.StartTorrentsCmdbase,
mixin.make_request, mixin.select_torrents):
provides = {'cli'}
class StopTorrentsCmd(base.StopTorrentsCmdbase,
mixin.make_request, mixin.select_torrents):
provides = {'cli'}
class VerifyTorrentsCmd(base.VerifyTorrentsCmdbase,
mixin.make_request, mixin.select_torrents):
provides = {'cli'}
|
2,192 |
make template
|
from xbmcgui import Dialog
from tmdbhelper.lib.addon.plugin import get_localized
from tmdbhelper.lib.addon.consts import PLAYERS_BASEDIR_SAVE, PLAYERS_BASEDIR_TEMPLATES
from tmdbhelper.lib.files.futils import get_files_in_folder, read_file, write_to_file
from tmdbhelper.lib.api.kodi.rpc import get_directory
from tmdbhelper.lib.items.listitem import ListItem
from tmdbhelper.lib.addon.dialog import BusyDialog
class CreatePlayer():
def __init__(self):
self.plugin_name = '' # Name of player file $STR_PLUGINNAME
self.plugin_id = '' # plugin.video.xyz #STR_PLUGINID
self.search_url_movie = '' # plugin://plugin.video.xyz?info=search $STR_PLUGINMOVIESEARCHURL
self.search_url_tv = '' # plugin://plugin.video.xyz?info=search $STR_PLUGINTVSEARCHURL
self.search_url_movie_query = '{title_url}'
self.search_url_tv_query = '{showname_url}'
self.template = {} # The template to use for building our plugin
self.template_filename = ''
self.filename = '' # The filename to save the player as
def reconfigure_urls(self):
def _reconfigure_url(url_name, url):
new_url = Dialog().input(f'{url_name} URL\n{get_localized(32435)}', defaultt=url)
if 'QUERYKEY' not in new_url:
Dialog().ok(get_localized(32433), f'{new_url}\n\n{get_localized(32433)}. {get_localized(32434)}.')
return _reconfigure_url(url_name, url)
new_url = new_url.replace('QUERYKEY', f'STR_PLUGIN{url_name}QUERYKEY')
return new_url
for url_name, key in [('MOVIE', 'search_url_movie'), ('TV', 'search_url_tv')]:
self.__dict__[key] = _reconfigure_url(url_name, self.__dict__[key])
def get_template(self):
template_files = get_files_in_folder(PLAYERS_BASEDIR_TEMPLATES, r'.*\.json')
x = Dialog().select(get_localized(32432), [i for i in template_files])
if x == -1:
return
self.template_filename = template_files[x]
return read_file(PLAYERS_BASEDIR_TEMPLATES + self.template_filename)
def METHOD_NAME(self):
template = self.template
if not template:
return
template = template.replace('STR_PLUGINNAME', self.plugin_name)
template = template.replace('STR_PLUGINID', self.plugin_id)
template = template.replace('STR_PLUGINMOVIESEARCHURL', self.search_url_movie)
template = template.replace('STR_PLUGINTVSEARCHURL', self.search_url_tv)
template = template.replace('STR_PLUGINMOVIEQUERYKEY', self.search_url_movie_query)
template = template.replace('STR_PLUGINTVQUERYKEY', self.search_url_tv_query)
return template
def _select_from_dir(self, url, header='', use_current='', parent_url=''):
with BusyDialog():
plugins_dir = []
if parent_url and url != f'plugin://{self.plugin_id}':
plugins_dir.append({'label': get_localized(32426), 'file': parent_url})
if use_current:
plugins_dir.append({'label': use_current, 'file': url})
plugins_dir += get_directory(url)
plugins_gui = [
ListItem(
label=i.get('label'), label2=i.get('file', ''),
art={'thumb': i.get('thumbnail', '')}).get_listitem()
for i in plugins_dir]
x = Dialog().select(header, plugins_gui, useDetails=True)
if x == -1:
return
return plugins_dir[x]
def get_plugin_id(self):
try:
plugin = self._select_from_dir('addons://sources/video', 'Select plugin')
return plugin['file'].replace('plugin://', '')
except (KeyError, AttributeError, TypeError):
return
def save_player(self):
filename = f'autogen.{self.plugin_id}.json'
write_to_file(self.template, PLAYERS_BASEDIR_SAVE, filename, join_addon_data=False)
return filename
def get_search_urls(self):
def _get_search_url(url, header='', parent_urls=None):
parent_urls = parent_urls or {}
try:
new_item = self._select_from_dir(
url, header, use_current=get_localized(32427),
parent_url=parent_urls.get(url))
new_url = new_item['file']
if new_item['label'] == get_localized(32427):
return new_url
except (KeyError, AttributeError, TypeError):
return
if new_url not in parent_urls:
parent_urls[new_url] = url
return _get_search_url(new_url, header, parent_urls=parent_urls)
self.search_url_movie = _get_search_url(f'plugin://{self.plugin_id}', get_localized(32428))
if self.search_url_movie and Dialog().yesno(get_localized(32429), get_localized(32431)):
self.search_url_tv = self.search_url_movie
return
self.search_url_tv = _get_search_url(f'plugin://{self.plugin_id}', get_localized(32429))
def create_player(self):
self.plugin_id = self.get_plugin_id()
if not self.plugin_id:
return
self.template = self.get_template()
if not self.template:
return
if 'plugins_' not in self.template_filename:
self.get_search_urls()
if not self.search_url_movie and not self.search_url_tv:
return
if 'urlquery_' in self.template_filename:
self.reconfigure_urls()
self.plugin_name = Dialog().input(get_localized(32430))
if not self.plugin_name:
return
self.template = self.METHOD_NAME()
if not self.template:
return
self.filename = self.save_player()
return self.filenam
|
2,193 |
to dict
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ContainerStateWaiting(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'reason': 'str'
}
attribute_map = {
'message': 'message',
'reason': 'reason'
}
def __init__(self, message=None, reason=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerStateWaiting - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._message = None
self._reason = None
self.discriminator = None
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
@property
def message(self):
"""Gets the message of this V1ContainerStateWaiting. # noqa: E501
Message regarding why the container is not yet running. # noqa: E501
:return: The message of this V1ContainerStateWaiting. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running. # noqa: E501
:param message: The message of this V1ContainerStateWaiting. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ContainerStateWaiting. # noqa: E501
(brief) reason the container is not yet running. # noqa: E501
:return: The reason of this V1ContainerStateWaiting. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ContainerStateWaiting.
(brief) reason the container is not yet running. # noqa: E501
:param reason: The reason of this V1ContainerStateWaiting. # noqa: E501
:type: str
"""
self._reason = reason
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.METHOD_NAME())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerStateWaiting):
return False
return self.METHOD_NAME() == other.METHOD_NAME()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerStateWaiting):
return True
return self.METHOD_NAME() != other.METHOD_NAME()
|
2,194 |
reconnect a client
|
"""
Tests TCP communication between a server that accepts multiple clients.
"""
from testplan.common.utils.context import context
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.driver.tcp import TCPServer, TCPClient
@testsuite
class TCPTestsuite:
"""TCP tests for a server and 2 clients."""
def __init__(self):
self._conn_idx = {}
def setup(self, env):
"""Will be executed before the testcase."""
# Client 1 connects, server accepts and stores the connection index,
env.client1.connect()
self._conn_idx["client1"] = env.server.accept_connection()
# Client 2 connects, server accepts and stores the connection index,
env.client2.connect()
self._conn_idx["client2"] = env.server.accept_connection()
@testcase
def send_and_receive_msg(self, env, result):
"""
The TCP communication is the following:
1. Client 1 sends a message.
2. Client 2 sends a message:
3. Server receives client 1 message.
4. Server responds to client 1.
5. Server receives client 2 message.
6. Server responds to client 2.
"""
idx1 = self._conn_idx["client1"]
idx2 = self._conn_idx["client2"]
msg1 = env.client1.cfg.name
result.log("Client1 is sending: {}".format(msg1))
bytes_sent1 = env.client1.send_text(msg1)
msg2 = env.client2.cfg.name
result.log("Client2 is sending: {}".format(msg2))
bytes_sent2 = env.client2.send_text(msg2)
received = env.server.receive_text(size=bytes_sent1, conn_idx=idx1)
result.equal(received, msg1, "Server received")
response = "Hello {}".format(received)
result.log("Server is responding: {}".format(response))
resp_size = env.server.send_text(response, conn_idx=idx1)
result.equal(
env.client1.receive_text(size=resp_size),
response,
"Client1 received",
)
received = env.server.receive_text(size=bytes_sent2, conn_idx=idx2)
result.equal(received, msg2, "Server received")
response = "Hello {}".format(received)
result.log("Server is responding: {}".format(response))
resp_size = env.server.send_text(response, conn_idx=idx2)
result.equal(
env.client2.receive_text(size=resp_size),
response,
"Client2 received",
)
@testcase
def METHOD_NAME(self, env, result):
"""
Tests the ability to reconnect a client within the testcase.
After reconnection, the server accepts the new connection
and assignes a new connection index for this client.
"""
prev_idx = self._conn_idx["client1"]
env.client1.reconnect()
self._conn_idx["client1"] = env.server.accept_connection()
new_idx = self._conn_idx["client1"]
result.gt(new_idx, prev_idx, "Client has new connection index")
msg = "Connection old index: {}, new index: {}".format(
prev_idx, new_idx
)
bytes_sent = env.client1.send_text(msg)
# Default conn_idx tp receive is the most recent.
received = env.server.receive_text(size=bytes_sent)
result.log(received)
def get_multitest(name):
"""
Creates and returns a new MultiTest instance to be added to the plan.
The environment is a server and 2 clients connecting using the context
functionality that retrieves host/port of the server after is started.
"""
test = MultiTest(
name=name,
suites=[TCPTestsuite()],
environment=[
TCPServer(name="server"),
TCPClient(
name="client1",
host=context("server", "{{host}}"),
port=context("server", "{{port}}"),
connect_at_start=False,
),
TCPClient(
name="client2",
host=context("server", "{{host}}"),
port=context("server", "{{port}}"),
connect_at_start=False,
),
],
)
return test
|
2,195 |
tear down
|
#
# Copyright (C) 2010-2022 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import espressomd.lb
import unittest as ut
import unittest_decorators as utx
import numpy as np
# Define the LB parameters
TIME_STEP = 0.008
AGRID = .4
GRID_SIZE = 6
KVISC = 4
DENS = 2.3
F = 5.5 / GRID_SIZE**3
GAMMA = 1
LB_PARAMS = {'agrid': AGRID,
'density': DENS,
'kinematic_viscosity': KVISC,
'tau': TIME_STEP,
'ext_force_density': np.array([-.7 * F, .9 * F, .8 * F])}
class TestLBMomentumConservation:
"""
Tests momentum conservation for an LB coupled to a particle, where opposing
forces are applied to LB and particle. The test should uncover issues
with boundary and ghost layer handling.
"""
system = espressomd.System(box_l=[GRID_SIZE * AGRID] * 3)
system.time_step = TIME_STEP
system.cell_system.skin = 0.01
n_nodes = system.cell_system.get_state()["n_nodes"]
def setUp(self):
self.set_cellsystem()
self.lbf = self.lb_class(**LB_PARAMS, **self.lb_params)
def METHOD_NAME(self):
self.system.lb = None
self.system.thermostat.turn_off()
self.system.part.clear()
def test(self):
self.system.lb = self.lbf
self.system.thermostat.set_lb(LB_fluid=self.lbf, gamma=GAMMA, seed=1)
np.testing.assert_allclose(
self.lbf.ext_force_density,
LB_PARAMS["ext_force_density"])
# Initial momentum before integration = 0
mom_tol = 1E-4 if self.lbf.single_precision else 1E-12
np.testing.assert_allclose(
self.system.analysis.linear_momentum(), [0., 0., 0.], atol=mom_tol)
ext_fluid_force = self.system.volume() * LB_PARAMS["ext_force_density"]
p = self.system.part.add(
pos=self.system.box_l / 2, ext_force=-ext_fluid_force, v=[.2, .4, .6])
initial_momentum = np.array(self.system.analysis.linear_momentum())
np.testing.assert_allclose(initial_momentum, np.copy(p.v) * p.mass,
atol=mom_tol)
while True:
self.system.integrator.run(500)
measured_momentum = self.system.analysis.linear_momentum()
coupling_force = -(p.f - p.ext_force)
compensation = -TIME_STEP / 2 * coupling_force
np.testing.assert_allclose(measured_momentum + compensation,
initial_momentum, atol=self.atol)
if np.linalg.norm(p.f) < 0.01 \
and np.all(np.abs(p.pos) > 10.1 * self.system.box_l):
break
# Make sure, the particle has crossed the periodic boundaries
self.assertGreater(max(np.abs(p.v)) * self.system.time,
self.system.box_l[0])
@ut.skipIf(TestLBMomentumConservation.n_nodes == 1,
"LB with regular decomposition already tested with 2 MPI ranks")
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBMomentumConservationRegularWalberla(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": False}
atol = 1.2e-4
def set_cellsystem(self):
self.system.cell_system.set_regular_decomposition()
@ut.skipIf(TestLBMomentumConservation.n_nodes == 1,
"LB with regular decomposition already tested with 2 MPI ranks")
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBMomentumConservationRegularWalberlaSinglePrecision(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": True}
atol = 6.5e-4
def set_cellsystem(self):
self.system.cell_system.set_regular_decomposition()
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBCPUMomentumConservationHybridNSquareWalberla(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": False}
atol = 1.2e-4
def set_cellsystem(self):
self.system.cell_system.set_hybrid_decomposition(
n_square_types={0}, cutoff_regular=1)
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBCPUMomentumConservationHybridNSquareWalberlaSinglePrecision(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": True}
atol = 6.5e-4
def set_cellsystem(self):
self.system.cell_system.set_hybrid_decomposition(
n_square_types={0}, cutoff_regular=1)
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBCPUMomentumConservationHybridRegularWalberla(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": False}
atol = 1.2e-4
def set_cellsystem(self):
self.system.cell_system.set_hybrid_decomposition(
n_square_types={1}, cutoff_regular=1)
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBCPUMomentumConservationHybridRegularWalberlaSinglePrecision(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": True}
atol = 6.5e-4
def set_cellsystem(self):
self.system.cell_system.set_hybrid_decomposition(
n_square_types={1}, cutoff_regular=1)
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBMomentumConservationNSquareWalberla(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": False}
atol = 1.2e-4
def set_cellsystem(self):
self.system.cell_system.set_n_square()
@utx.skipIfMissingFeatures(["WALBERLA", "EXTERNAL_FORCES"])
class TestLBMomentumConservationNSquareWalberlaSinglePrecision(
TestLBMomentumConservation, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
lb_params = {"single_precision": True}
atol = 6.5e-4
def set_cellsystem(self):
self.system.cell_system.set_n_square()
if __name__ == "__main__":
ut.main()
|
2,196 |
label func
|
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def METHOD_NAME(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, METHOD_NAME)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
|
2,197 |
test
|
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-only
#
# Advanced cgset functionality test - '-b' '-g' <controller> (cgroup v2)
#
# Copyright (c) 2023 Oracle and/or its affiliates.
# Author: Kamalesh Babulal <[email protected]>
from cgroup import Cgroup, CgroupVersion
from systemd import Systemd
from run import RunError
import consts
import ftests
import sys
import os
CONTROLLER = 'cpu'
SYSTEMD_CGNAME = '064_cg_in_scope'
OTHER_CGNAME = '064_cg_not_in_scope'
SLICE = 'libcgtests.slice'
SCOPE = 'test064.scope'
CONFIG_FILE_NAME = os.path.join(os.getcwd(), '064cgconfig.conf')
def prereqs(config):
result = consts.TEST_PASSED
cause = None
if CgroupVersion.get_version('cpu') != CgroupVersion.CGROUP_V2:
result = consts.TEST_SKIPPED
cause = 'This test requires the cgroup v2 cpu controller'
return result, cause
if config.args.container:
result = consts.TEST_SKIPPED
cause = 'This test cannot be run within a container'
return result, cause
def setup(config):
result = consts.TEST_PASSED
cause = None
pid = Systemd.write_config_with_pid(config, CONFIG_FILE_NAME, SLICE, SCOPE)
Cgroup.configparser(config, load_file=CONFIG_FILE_NAME)
# create and check if the cgroup was created under the systemd default path
if not Cgroup.create_and_validate(config, None, SYSTEMD_CGNAME):
result = consts.TEST_FAILED
cause = (
'Failed to create systemd delegated cgroup {} under '
'/sys/fs/cgroup/{}/{}/'.format(SYSTEMD_CGNAME, SLICE, SCOPE)
)
return result, cause
# With cgroup v2, we can't enable controller for the child cgroup, while
# a task is attached to test064.scope. Attach the task from test064.scope
# to child cgroup SYSTEMD_CGNAME and then enable cpu controller in the parent,
# so that the cgroup.get() works
Cgroup.set(config, cgname=SYSTEMD_CGNAME, setting='cgroup.procs', value=pid)
Cgroup.set(
config, cgname=(os.path.join(SLICE, SCOPE)), setting='cgroup.subtree_control',
value='+cpu', ignore_systemd=True
)
# create and check if the cgroup was created under the controller root
if not Cgroup.create_and_validate(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True):
result = consts.TEST_FAILED
cause = (
'Failed to create cgroup {} under '
'/sys/fs/cgroup/{}/'.format(OTHER_CGNAME, CONTROLLER)
)
return result, cause
def METHOD_NAME(config):
result = consts.TEST_PASSED
cause = None
Cgroup.set_and_validate(config, SYSTEMD_CGNAME, 'cpu.weight', '200')
Cgroup.set_and_validate(config, OTHER_CGNAME, 'cpu.weight', '300', ignore_systemd=True)
try:
Cgroup.set(config, SYSTEMD_CGNAME, 'cpu.weight', '400', ignore_systemd=True)
except RunError as re:
if 'requested group parameter does not exist' not in re.stderr:
raise re
else:
result = consts.TEST_FAILED
cause = 'Setting cpu.weight on {} erroneously succeeded'.format(SYSTEMD_CGNAME)
try:
Cgroup.set(config, OTHER_CGNAME, 'cpu.weight', '500')
except RunError as re:
if 'requested group parameter does not exist' not in re.stderr:
raise re
else:
result = consts.TEST_FAILED
tmp_cause = 'Setting cpu.weight on {} erroneously succeeded'.format(OTHER_CGNAME)
cause = '\n'.join(filter(None, [cause, tmp_cause]))
return result, cause
def teardown(config):
Systemd.remove_scope_slice_conf(config, SLICE, SCOPE, CONTROLLER, CONFIG_FILE_NAME)
# Incase the error occurs before the creation of OTHER_CGNAME,
# let's ignore the exception
try:
Cgroup.delete(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True)
except RunError as re:
if 'No such file or directory' in re.stderr:
raise re
def main(config):
[result, cause] = prereqs(config)
if result != consts.TEST_PASSED:
return [result, cause]
[result, cause] = setup(config)
if result != consts.TEST_PASSED:
return [result, cause]
try:
[result, cause] = METHOD_NAME(config)
finally:
teardown(config)
return [result, cause]
if __name__ == '__main__':
config = ftests.parse_args()
# this test was invoked directly. run only it
config.args.num = int(os.path.basename(__file__).split('-')[0])
sys.exit(ftests.main(config))
# vim: set et ts=4 sw=4:
|
2,198 |
annotate parameter
|
#!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python library generator.
This module generates Python code from a Google API discovery documents.
"""
__author__ = '[email protected](Alex Kesling), [email protected] (Tony Aiuto)'
from apiclient import discovery as discovery_client
from googleapis.codegen import api
from googleapis.codegen import api_library_generator
from googleapis.codegen import language_model
from googleapis.codegen import utilities
# NOTE(user): This was originally copied from another generator...
# so if there is any weirdness from a Python perspective, put it in that
# context when thinking about it.
class PythonLanguageModel(language_model.LanguageModel):
"""A LanguageModel for Python."""
language = 'python'
_SCHEMA_TYPE_TO_PYTHON_TYPE = {
'any': 'object',
'array': 'list',
'boolean': 'bool',
'integer': 'long',
'number': 'float',
'object': 'object',
'string': 'str',
}
_PYTHON_KEYWORDS = discovery_client.RESERVED_WORDS
# We can not create classes which match a Python keyword or built in object
# type.
RESERVED_CLASS_NAMES = _PYTHON_KEYWORDS
array_of_policy = language_model.NamingPolicy(format_string='list')
map_of_policy = language_model.NamingPolicy(format_string='dict')
def __init__(self):
super(PythonLanguageModel, self).__init__(class_name_delimiter='.')
self._SUPPORTED_TYPES['array'] = self._List
self._SUPPORTED_TYPES['boolean'] = self._Boolean
self._SUPPORTED_TYPES['object'] = self._Dictionary
def _Boolean(self, data_value):
"""Convert provided boolean to language specific literal."""
return unicode(bool(data_value.value))
def _Dictionary(self, data_value):
"""Convert provided object to language specific literal."""
wrapper = '{%s}'
pairs = []
for key, val in data_value.value.iteritems():
val = self.RenderDataValue(val)
pairs.append('"%s": %s' % (key, val))
return wrapper % ', '.join(pairs)
def _List(self, data_value):
"""Convert provided array to language specific literal."""
wrapper = '[%s]'
items = [self.RenderDataValue(element) for element in data_value.value]
return wrapper % ', '.join(items)
def GetCodeTypeFromDictionary(self, def_dict):
"""Gets an element's data type from its JSON definition.
Overrides the default.
Args:
def_dict: (dict) The definition dictionary for this type
Returns:
A name suitable for use as a Python data type
"""
json_type = def_dict.get('type', 'string')
native_type = (self._SCHEMA_TYPE_TO_PYTHON_TYPE.get(json_type) or
self._SCHEMA_TYPE_TO_PYTHON_TYPE.get('string'))
return native_type
# pylint: disable=unused-argument
def ToMemberName(self, s, the_api):
"""Convert a wire format name into a suitable Python variable name."""
return s.replace('-', '_')
PYTHON_LANGUAGE_MODEL = PythonLanguageModel()
class PythonGenerator(api_library_generator.ApiLibraryGenerator):
"""The Python code generator."""
def __init__(self, discovery_doc, options=None):
super(PythonGenerator, self).__init__(
PythonApi,
discovery_doc,
language='python',
language_model=PYTHON_LANGUAGE_MODEL,
options=options)
# pylint: disable=unused-argument
def AnnotateMethod(self, the_api, method, resource):
"""Correct naming for APIClient methods in Python.
Overrides default implementation.
Args:
the_api: (Api) The Api.
method: (Method) The Method to annotate.
resource: (Resource) The Resource which owns this Method.
"""
method.SetTemplateValue('codeName',
discovery_client.fix_method_name(method.codeName))
def METHOD_NAME(self, method, parameter):
"""Correct naming for APIClient parameters in Python.
Overrides default implementation.
Args:
method: (Method) The Method this parameter belongs to.
parameter: (Parameter) The Parameter to annotate.
"""
parameter.SetTemplateValue('codeName',
discovery_client.key2param(parameter.codeName))
# pylint: disable=unused-argument
def AnnotateResource(self, the_api, resource):
"""Correct naming for APIClient resources in Python.
Overrides default implementation.
Args:
the_api: (Api) The Api which owns this resource.
resource: (Resource) The Resource to annotate.
"""
resource.SetTemplateValue('codeName',
(discovery_client
.fix_method_name(resource.codeName)))
class PythonApi(api.Api):
"""An Api with Python annotations."""
def __init__(self, discovery_doc, **unused_kwargs):
super(PythonApi, self).__init__(discovery_doc)
# pylint: disable=unused-argument
def ToClassName(self, s, element, element_type=None):
"""Convert a discovery name to a suitable Python class name.
Overrides the default.
Args:
s: (str) A rosy name of data element.
element: (object) The object we need a class name for.
element_type: (str) The kind of element (resource|method) to name.
Returns:
A name suitable for use as a class in the generator's target language.
"""
if s.lower() in PythonLanguageModel.RESERVED_CLASS_NAMES:
# Prepend the service name
return '%s%s' % (utilities.CamelCase(self.values['name']),
utilities.CamelCase(s))
return utilities.CamelCase(s)
|
2,199 |
test get eol chars no eol
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
"""Tests for sourcecode.py"""
import os
import sys
import pytest
from spyder.utils import sourcecode
def test_normalize_eols():
text = "a\nb\r\nc\rd"
assert sourcecode.normalize_eols(text) == "a\nb\nc\nd"
def test_get_primary_at():
code = 'import functools\nfunctools.partial'
assert sourcecode.get_primary_at(code, len(code)) == 'functools.partial'
def test_get_identifiers():
code = 'import functools\nfunctools.partial'
assert set(sourcecode.get_identifiers(code)) == set(['import', 'functools',
'functools.partial'])
def test_split_source():
code = 'import functools\nfunctools.partial'
assert sourcecode.split_source(code) == ['import functools', 'functools.partial']
code = code.replace('\n', '\r\n')
assert sourcecode.split_source(code) == ['import functools', 'functools.partial']
def test_path_components():
if not os.name == 'nt':
path_components0 = ['', '', 'documents', 'test', 'test.py']
else:
path_components0 = ['c:', '', 'documents', 'test', 'test.py']
path0 = os.path.join(*path_components0)
assert sourcecode.path_components(path0) == path_components0
def test_differentiate_prefix():
if not os.name == 'nt':
path_components0 = ['','','documents','test','test.py']
path_components1 = ['','','documents','projects','test','test.py']
else:
path_components0 = ['c:','','documents','test','test.py']
path_components1 = ['c:','','documents','projects','test','test.py']
diff_path0 = os.path.join(*['test'])
diff_path1 = os.path.join(*['projects','test'])
assert sourcecode.differentiate_prefix(
path_components0, path_components1) == diff_path0
assert sourcecode.differentiate_prefix(
path_components1, path_components0) == diff_path1
def test_get_same_name_files():
files_path_list = []
if not os.name == 'nt':
fname0 = os.path.join(*['','','documents','test','test.py'])
files_path_list.append(fname0)
fname1 = os.path.join(*['','','documents','projects','test','test.py'])
files_path_list.append(fname1)
same_name_files = [['','','documents','test','test.py'],
['','','documents','projects','test','test.py']]
else:
fname0 = os.path.join(*['c:','','documents','test','test.py'])
files_path_list.append(fname0)
fname1 = os.path.join(*['c:','','documents','projects','test','test.py'])
files_path_list.append(fname1)
same_name_files = [['c:','','documents','test','test.py'],
['c:','','documents','projects','test','test.py']]
assert sourcecode.get_same_name_files(files_path_list
,'test.py') == same_name_files
def test_shortest_path():
if not os.name == 'nt':
files_path_list =[['','','documents','test','test.py'],
['','','documents','projects','test','test.py']]
shortest_path = os.path.join(*['','','documents','test','test.py'])
else:
files_path_list =[['c:','','documents','test','test.py'],
['c:','','documents','projects','test','test.py']]
shortest_path = os.path.join(*['c:','','documents','test','test.py'])
assert sourcecode.shortest_path(files_path_list) == shortest_path
def test_disambiguate_fname():
files_path_list = []
if not os.name == 'nt':
fname0 = os.path.join(*['','','documents','test','test.py'])
files_path_list.append(fname0)
fname1 = os.path.join(*['','','documents','projects','test','test.py'])
files_path_list.append(fname1)
else:
fname0 = os.path.join(*['c:','','documents','test','test.py'])
files_path_list.append(fname0)
fname1 = os.path.join(*['c:','','documents','projects','test','test.py'])
files_path_list.append(fname1)
title0 = 'test.py - ' + os.path.join(*['test'])
title1 = 'test.py - ' + os.path.join(*['projects','test'])
assert sourcecode.disambiguate_fname(files_path_list, fname0) == title0
assert sourcecode.disambiguate_fname(files_path_list, fname1) == title1
def test_get_eol_chars():
eol_chars = sourcecode.get_eol_chars('foo\r\n')
assert eol_chars == '\r\n'
def METHOD_NAME():
eol_chars = sourcecode.get_eol_chars('foo')
if os.name == 'nt':
assert eol_chars == "\r\n"
elif sys.platform.startswith('linux'):
assert eol_chars == "\n"
elif sys.platform == 'darwin':
assert eol_chars == "\r"
if __name__ == '__main__':
pytest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.