id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,900 |
target humidity
|
"""Support for humidifier and dehumidifier."""
import logging
from homeassistant.const import * # noqa: F401
from homeassistant.components.humidifier.const import *
from homeassistant.components.humidifier import (
DOMAIN as ENTITY_DOMAIN,
HumidifierEntity,
HumidifierEntityFeature, # v2022.5
HumidifierDeviceClass,
)
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotToggleEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
from .fan import MiotModesSubEntity
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
MODE_OFF = 'Off'
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
config['hass'] = hass
model = str(config.get(CONF_MODEL) or '')
spec = hass.data[DOMAIN]['miot_specs'].get(model)
entities = []
if isinstance(spec, MiotSpec):
for srv in spec.get_services(ENTITY_DOMAIN, 'dehumidifier'):
if not srv.get_property('on'):
continue
entities.append(MiotHumidifierEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotHumidifierEntity(MiotToggleEntity, HumidifierEntity):
def __init__(self, config: dict, miot_service: MiotService):
super().__init__(miot_service, config=config, logger=_LOGGER)
self._prop_power = miot_service.get_property('on')
self._prop_mode = miot_service.get_property('mode')
self._prop_fan_level = miot_service.get_property('fan_level')
self._prop_water_level = miot_service.get_property('water_level')
self._prop_temperature = miot_service.get_property('temperature')
self._prop_target_humi = miot_service.get_property('target_humidity')
self._prop_humidity = miot_service.get_property('relative_humidity', 'humidity')
self._environment = miot_service.spec.get_service('environment')
if self._environment:
self._prop_temperature = self._environment.get_property('temperature') or self._prop_temperature
self._prop_target_humi = self._environment.get_property('target_humidity') or self._prop_target_humi
self._prop_humidity = self._environment.get_property('relative_humidity', 'humidity') or self._prop_humidity
self._humidifier_mode = None
self._mode_props = [self._prop_mode, self._prop_fan_level]
self._mode_props = list(filter(lambda x: x, self._mode_props))
if self._mode_props:
self._humidifier_mode = self._mode_props.pop(0)
self._supported_features = HumidifierEntityFeature.MODES
async def async_added_to_hass(self):
await super().async_added_to_hass()
self._vars['target_humidity_ratio'] = self.custom_config_number('target_humidity_ratio')
async def async_update(self):
await super().async_update()
if not self._available:
return
if self._prop_water_level and self._prop_water_level.writeable:
self._update_sub_entities(
[self._prop_water_level.name],
domain='number_select',
)
add_fans = self._add_entities.get('fan')
for p in self._mode_props:
pnm = p.full_name
if self._humidifier_mode and pnm == self._humidifier_mode.full_name:
continue
if pnm in self._subs:
self._subs[pnm].update_from_parent()
elif add_fans:
self._subs[pnm] = MiotModesSubEntity(self, p)
add_fans([self._subs[pnm]], update_before_add=True)
@property
def device_class(self):
if cls := self.get_device_class(HumidifierDeviceClass):
return cls
typ = f'{self._model} {self._miot_service.spec.type}'
if HumidifierDeviceClass.DEHUMIDIFIER.value in typ or '.derh.' in typ:
return HumidifierDeviceClass.DEHUMIDIFIER
return HumidifierDeviceClass.HUMIDIFIER
@property
def METHOD_NAME(self):
if not self._prop_target_humi:
return None
num = int(self._prop_target_humi.from_dict(self._state_attrs) or 0)
if fac := self._vars.get('target_humidity_ratio'):
num = round(num * fac)
return num
def set_humidity(self, humidity: int):
if not self._prop_target_humi:
return False
num = humidity
if self._prop_target_humi.value_range:
stp = self._prop_target_humi.range_step()
num = round(humidity / stp) * stp
if fac := self._vars.get('target_humidity_ratio'):
num = round(num / fac)
elif self._prop_target_humi.value_list:
num = None
vls = self._prop_target_humi.list_value(None)
vls.sort()
for n in vls:
if humidity >= n or num is None:
num = n
if num is None:
return False
return self.set_property(self._prop_target_humi, num)
@property
def min_humidity(self):
if not self._prop_target_humi:
return DEFAULT_MIN_HUMIDITY
if self._prop_target_humi.value_list:
vls = self._prop_target_humi.list_value(None)
vls.sort()
return vls[0]
num = self._prop_target_humi.range_min()
if fac := self._vars.get('target_humidity_ratio'):
num = round(num * fac)
return num
@property
def max_humidity(self):
if not self._prop_target_humi:
return DEFAULT_MAX_HUMIDITY
if self._prop_target_humi.value_list:
vls = self._prop_target_humi.list_value(None)
vls.sort()
return vls[-1]
num = self._prop_target_humi.range_max()
if fac := self._vars.get('target_humidity_ratio'):
num = round(num * fac)
return num
@property
def mode(self):
if not self.is_on:
return MODE_OFF
if not self._humidifier_mode:
return None
val = self._humidifier_mode.from_dict(self._state_attrs)
if val is None:
return None
return self._humidifier_mode.list_description(val)
@property
def available_modes(self):
mds = [MODE_OFF]
if self._humidifier_mode:
mds.extend(self._humidifier_mode.list_descriptions() or [])
return mds
def set_mode(self, mode: str):
if mode == MODE_OFF:
return self.turn_off()
if not self._humidifier_mode:
return False
val = self._humidifier_mode.list_value(mode)
if val is None:
return False
return self.set_property(self._humidifier_mode, val)
|
3,901 |
trace log
|
# -*- coding: utf-8 -*-
# vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:textwidth=0:
# License: GPL2 or later see COPYING
# Written by Michael Brown
# Copyright (C) 2007 Michael E Brown <[email protected]>
import functools
import inspect
import logging
import os
import sys
# defaults to module verbose log
# does a late binding on log. Forwards all attributes to logger.
# works around problem where reconfiguring the logging module means loggers
# configured before reconfig dont output.
class getLog(object):
# pylint: disable=unused-argument,too-few-public-methods
def __init__(self, name=None, prefix="", *args, **kargs):
if name is None:
frame = inspect.getouterframes(inspect.currentframe())[1][0]
name = frame.f_globals["__name__"]
self.name = prefix + name
def __getattr__(self, name):
logger = logging.getLogger(self.name)
return getattr(logger, name)
# emulates logic in logging module to ensure we only log
# messages that logger is enabled to produce.
def doLog(logger, level, *args, **kargs):
if logger.manager.disable >= level:
return
if logger.isEnabledFor(level):
try:
logger.handle(logger.makeRecord(logger.name, level, *args, **kargs))
except TypeError:
del(kargs["func"])
logger.handle(logger.makeRecord(logger.name, level, *args, **kargs))
def safe_repr(arg):
""" Generally repr() can fail when called before __init__(), we will workaround this case """
try:
return repr(arg)
except AttributeError:
return str(type(arg))
def METHOD_NAME(logger=None):
def noop(func):
return func
def decorator(func):
@functools.wraps(func)
def trace(*args, **kw):
# default to logger that was passed by module, but
# can override by passing logger=foo as function parameter.
# make sure this doesn't conflict with one of the parameters
# you are expecting
filename = os.path.normcase(inspect.getsourcefile(func))
func_name = func.__name__
if hasattr(func, 'func_code'):
lineno = func.func_code.co_firstlineno
else:
lineno = func.__code__.co_firstlineno
l2 = kw.get('logger', logger)
if l2 is None:
l2 = logging.getLogger("trace.%s" % func.__module__)
if isinstance(l2, str):
l2 = logging.getLogger(l2)
message = "ENTER %s("
message = message + ', '.join([safe_repr(arg) for arg in args])
if args and kw:
message += ', '
for k, v in list(kw.items()):
message = message + "%s=%s" % (k, safe_repr(v))
message = message + ")"
frame = inspect.getouterframes(inspect.currentframe())[1][0]
doLog(l2, logging.INFO, os.path.normcase(frame.f_code.co_filename),
frame.f_lineno, message, args=[func_name], exc_info=None,
func=frame.f_code.co_name)
try:
result = "Bad exception raised: Exception was not a derived "\
"class of 'Exception'"
try:
result = func(*args, **kw)
except (KeyboardInterrupt, Exception) as e:
result = "EXCEPTION RAISED"
doLog(l2, logging.INFO, filename, lineno,
"EXCEPTION: %s\n", args=[e],
exc_info=sys.exc_info(), func=func_name)
raise
finally:
doLog(l2, logging.INFO, filename, lineno,
"LEAVE %s --> %s\n", args=[func_name, result],
exc_info=None, func=func_name)
return result
return trace
#end of trace()
if os.environ.get("MOCK_TRACE_LOG", "true") == "false":
return noop
if logging.getLogger("trace").propagate:
return decorator
else:
return noop
# unit tests...
if __name__ == "__main__":
logging.basicConfig(
level=logging.WARNING,
format='%(name)s %(levelname)s %(filename)s, %(funcName)s, Line: %(lineno)d: %(message)s',)
log = getLog("foobar.bubble")
root = getLog(name="")
log.setLevel(logging.WARNING)
root.setLevel(logging.DEBUG)
log.debug(" --> debug")
log.error(" --> error")
log.warning(" --> warning")
@METHOD_NAME(log)
# pylint: disable=unused-argument
def testFunc(arg1, arg2="default", *args, **kargs):
return 42
testFunc("hello", "world", logger=root)
testFunc("happy", "joy", name="skippy")
testFunc("hi")
@METHOD_NAME(root)
def testFunc22():
return testFunc("archie", "bunker")
testFunc22()
@METHOD_NAME(root)
def testGen():
yield 1
yield 2
for j in testGen():
log.debug("got: %s", j)
@METHOD_NAME()
def anotherFunc(*args):
# pylint: disable=no-value-for-parameter
return testFunc(*args)
anotherFunc("pretty")
getLog()
|
3,902 |
test gradient nulls
|
"""
Test of r3.gradient
@author Anna Petrasova
"""
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
r3univar_test_grad_x = """
n=600
null_cells=0
cells=600
min=0.00902566899999995
max=0.0993248405000001
range=0.0902991715000001
mean=0.0641879624599999
mean_of_abs=0.0641879624599999
stddev=0.0243482677445681
variance=0.000592838142161176
coeff_var=37.9327631091908
sum=38.512777476
"""
r3univar_test_grad_y = """
n=600
null_cells=0
cells=600
min=-0.0990409449999998
max=-0.00774536350000012
range=0.0912955814999997
mean=-0.0563959154616667
mean_of_abs=0.0563959154616667
stddev=0.0244377519801364
variance=0.000597203721842658
coeff_var=-43.3324856597942
sum=-33.837549277"""
r3univar_test_grad_z = """
n=600
null_cells=0
cells=600
min=0.00643308800000026
max=0.0967259644999999
range=0.0902928764999997
mean=0.0336457494116667
mean_of_abs=0.0336457494116667
stddev=0.0186882020765464
variance=0.000349248896853835
coeff_var=55.5440208743464
sum=20.187449647
"""
r3univar_test_nulls_grad_x = """
n=107
null_cells=18
cells=125
min=0
max=10
range=10
mean=3.70093457943925
mean_of_abs=3.70093457943925
stddev=3.6357902977452
variance=13.2189710891781
coeff_var=98.2397883481656
sum=396
"""
r3univar_test_nulls_grad_y = """
n=107
null_cells=18
cells=125
min=-10
max=0
range=10
mean=-3.70093457943925
mean_of_abs=3.70093457943925
stddev=3.6357902977452
variance=13.2189710891781
coeff_var=-98.2397883481656
sum=-396
"""
r3univar_test_nulls_grad_z = """
n=107
null_cells=18
cells=125
min=0
max=10
range=10
mean=3.70093457943925
mean_of_abs=3.70093457943925
stddev=3.6357902977452
variance=13.2189710891781
coeff_var=98.2397883481656
sum=396
"""
class GradientTest(TestCase):
@classmethod
def setUpClass(cls):
"""Use temporary region settings"""
cls.use_temp_region()
cls.runModule("g.region", res3=10, n=100, s=0, w=0, e=120, b=0, t=50)
cls.runModule("r3.in.ascii", input="data/test_map_1", output="test_map_1_ref")
cls.runModule("g.region", res3=1, n=5, s=0, w=0, e=5, b=0, t=5)
cls.runModule("r3.in.ascii", input="data/test_map_2", output="test_map_2_ref")
@classmethod
def tearDownClass(cls):
"""!Remove the temporary region"""
cls.del_temp_region()
cls.runModule(
"g.remove",
flags="f",
type="raster_3d",
name=",".join(
[
"test_map_1_ref",
"test_map_2_ref",
"test_grad_x",
"test_grad_y",
"test_grad_z",
"test_null_grad_x",
"test_null_grad_y",
"test_null_grad_z",
]
),
)
def test_gradient_runs(self):
self.runModule("g.region", res3=10, n=100, s=0, w=0, e=120, b=0, t=50)
self.assertModuleFail(
"r3.gradient",
input="test_map_1_ref",
output=["test_grad_x", "test_grad_y"],
overwrite=True,
)
self.assertModule(
"r3.gradient",
input="test_map_1_ref",
output=["test_grad_x", "test_grad_y", "test_grad_z"],
overwrite=True,
)
def test_gradient(self):
self.runModule("g.region", res3=10, n=100, s=0, w=0, e=120, b=0, t=50)
self.runModule(
"r3.gradient",
input="test_map_1_ref",
output=["test_grad_x", "test_grad_y", "test_grad_z"],
overwrite=True,
)
self.assertRaster3dFitsUnivar(
raster="test_grad_x", reference=r3univar_test_grad_x, precision=1e-8
)
self.assertRaster3dFitsUnivar(
raster="test_grad_y", reference=r3univar_test_grad_y, precision=1e-8
)
self.assertRaster3dFitsUnivar(
raster="test_grad_z", reference=r3univar_test_grad_z, precision=1e-8
)
def test_gradient_block(self):
self.runModule("g.region", res3=10, n=100, s=0, w=0, e=120, b=0, t=50)
self.assertModule(
"r3.gradient",
input="test_map_1_ref",
blocksize=[200, 2, 50],
output=["test_grad_x", "test_grad_y", "test_grad_z"],
overwrite=True,
)
self.assertRaster3dFitsUnivar(
raster="test_grad_x", reference=r3univar_test_grad_x, precision=1e-8
)
self.assertRaster3dFitsUnivar(
raster="test_grad_y", reference=r3univar_test_grad_y, precision=1e-8
)
self.assertRaster3dFitsUnivar(
raster="test_grad_z", reference=r3univar_test_grad_z, precision=1e-8
)
def METHOD_NAME(self):
self.runModule("g.region", res3=1, n=5, s=0, w=0, e=5, b=0, t=5)
self.assertModule(
"r3.gradient",
input="test_map_2_ref",
blocksize=[200, 2, 50],
output=["test_null_grad_x", "test_null_grad_y", "test_null_grad_z"],
)
self.assertRaster3dFitsUnivar(
raster="test_null_grad_x",
reference=r3univar_test_nulls_grad_x,
precision=1e-8,
)
self.assertRaster3dFitsUnivar(
raster="test_null_grad_y",
reference=r3univar_test_nulls_grad_y,
precision=1e-8,
)
self.assertRaster3dFitsUnivar(
raster="test_null_grad_z",
reference=r3univar_test_nulls_grad_z,
precision=1e-8,
)
if __name__ == "__main__":
test()
|
3,903 |
test remote blob create
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2023 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import pytest
import vineyard
from vineyard import RemoteBlobBuilder
from vineyard import RPCClient
from vineyard.core import default_builder_context
from vineyard.core import default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
payload = b'abcdefgh1234567890uvwxyz'
large_payload = b'abcdefgh1234567890uvwxyz' * 10_000_000
def METHOD_NAME(vineyard_client, vineyard_endpoint):
vineyard_rpc_client = vineyard.connect(*vineyard_endpoint.split(':'))
buffer_writer = RemoteBlobBuilder(len(payload))
buffer_writer.copy(0, payload)
blob_id = vineyard_rpc_client.create_remote_blob(buffer_writer)
# get as local blob
local_blob = vineyard_client.get_blob(blob_id)
# check local blob
assert local_blob.id == blob_id
assert local_blob.size == len(payload)
assert memoryview(local_blob) == memoryview(payload)
def test_remote_blob_get(vineyard_client, vineyard_endpoint):
vineyard_rpc_client: RPCClient = vineyard.connect(*vineyard_endpoint.split(':'))
buffer_writer = vineyard_client.create_blob(len(payload))
buffer_writer.copy(0, payload)
blob = buffer_writer.seal(vineyard_client)
# get as remote blob
remote_blob = vineyard_rpc_client.get_remote_blob(blob.id)
# check remote blob
assert remote_blob.id == blob.id
assert remote_blob.size == blob.size
assert remote_blob.size == len(payload)
assert memoryview(remote_blob) == memoryview(blob)
assert memoryview(remote_blob) == memoryview(payload)
def test_remote_blob_create_and_get(vineyard_endpoint):
vineyard_rpc_client = vineyard.connect(*vineyard_endpoint.split(':'))
buffer_writer = RemoteBlobBuilder(len(payload))
buffer_writer.copy(0, payload)
blob_id = vineyard_rpc_client.create_remote_blob(buffer_writer)
# get as remote blob
remote_blob = vineyard_rpc_client.get_remote_blob(blob_id)
# check remote blob
assert remote_blob.id == blob_id
assert remote_blob.size == len(payload)
assert memoryview(remote_blob) == memoryview(payload)
def test_remote_blob_create_and_get_large_object(vineyard_endpoint):
vineyard_rpc_client = vineyard.connect(*vineyard_endpoint.split(':'))
# allocate & copy
buffer_writer = RemoteBlobBuilder(len(large_payload))
buffer_writer.copy(0, large_payload)
blob_id = vineyard_rpc_client.create_remote_blob(buffer_writer)
# get as remote blob
remote_blob = vineyard_rpc_client.get_remote_blob(blob_id)
# check remote blob
assert remote_blob.id == blob_id
assert remote_blob.size == len(large_payload)
assert memoryview(remote_blob) == memoryview(large_payload)
# wrap
buffer_writer = RemoteBlobBuilder.wrap(large_payload)
blob_id = vineyard_rpc_client.create_remote_blob(buffer_writer)
# get as remote blob
remote_blob = vineyard_rpc_client.get_remote_blob(blob_id)
# check remote blob
assert remote_blob.id == blob_id
assert remote_blob.size == len(large_payload)
assert memoryview(remote_blob) == memoryview(large_payload)
def test_remote_blob_error(vineyard_endpoint):
vineyard_rpc_client = vineyard.connect(*vineyard_endpoint.split(':'))
with pytest.raises(
ValueError, match="Vineyard RPC client cannot be used to create local blobs"
):
vineyard_rpc_client.put(np.ones((2, 3, 4)))
def test_multiple_remote_blobs(vineyard_endpoint):
vineyard_rpc_client = vineyard.connect(*vineyard_endpoint.split(':'))
for i in range(4):
print(f"Writing {i} blob")
payload = os.urandom(1024 * 1024 * 100)
blob_builder = vineyard.RemoteBlobBuilder(len(payload))
blob_builder.copy(0, payload)
have_written = False
while not have_written:
try:
blob_id = vineyard_rpc_client.create_remote_blob(blob_builder)
have_written = True
print(f"Successfully written {i} blob")
remote_blob = vineyard_rpc_client.get_remote_blob(blob_id)
assert remote_blob.size == len(payload)
print(f"Successfully read {i} blob")
except vineyard.NotEnoughMemoryException:
print(f"Not enough memory, retrying {i} blob")
continue
|
3,904 |
test ignore duplicates
|
from urllib.parse import parse_qs
from django.conf import settings
from django.db.utils import IntegrityError
from django.template import Context
from django.test import TestCase, RequestFactory
from django.urls import reverse
from rest_framework import status
from wagtail.core.models import PageViewRestriction, Site
from unittest.mock import patch
from home.models import SVGToPNGMap
from home.templatetags.generic_components import google_analytics
from iogt_users.factories import UserFactory, GroupFactory
from home.factories import ArticleFactory, HomePageFactory
from wagtail_factories import SiteFactory
class PageViewGroupPermissionTests(TestCase):
def setUp(self):
self.user = UserFactory()
Site.objects.all().delete()
self.site = SiteFactory(site_name='IoGT', port=8000, is_default_site=True)
self.home_page = HomePageFactory(parent=self.site.root_page)
self.group_restricted_article = ArticleFactory(parent=self.home_page)
view_restriction = PageViewRestriction.objects.create(
page=self.group_restricted_article, restriction_type=PageViewRestriction.GROUPS)
self.allowed_group = GroupFactory(name='Allowed group')
view_restriction.groups.add(self.allowed_group)
def test_group_limited_article_without_login_redirects_to_login_page(self):
response = self.client.get(self.group_restricted_article.url)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(
f'{reverse("account_login")}?next={self.group_restricted_article.url}', response.url)
def test_group_limited_article_without_group_returns_403(self):
self.client.login(username=self.user.username, password='test@123')
response = self.client.get(self.group_restricted_article.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_group_limited_article_with_group_user_returns_200(self):
self.user.groups.add(self.allowed_group)
self.client.login(username=self.user.username, password='test@123')
response = self.client.get(self.group_restricted_article.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class SVGToPNGMapTests(TestCase):
def setUp(self) -> None:
self.svg_path = 'static/icons/search.svg'
def test_create_png_if_not_found(self):
png = SVGToPNGMap.get_png_image(self.svg_path)
expected_path_regex = ''.join([
settings.MEDIA_ROOT,
r"/svg-to-png-maps/svg-to-png.*\.png"
])
self.assertRegex(png.path, expected_path_regex)
self.assertGreater(png.size, 0)
png_2 = SVGToPNGMap.get_png_image(self.svg_path)
self.assertEquals(png, png_2)
def METHOD_NAME(self):
png = SVGToPNGMap.get_png_image(self.svg_path)
duplicate = {
'svg_path': self.svg_path,
'fill_color': None,
'stroke_color': None,
'png_image_file': png
}
SVGToPNGMap.objects.create(**duplicate)
SVGToPNGMap.objects.create(**duplicate)
count = SVGToPNGMap.objects.filter(
svg_path=self.svg_path,
fill_color=None,
stroke_color=None
).count()
self.assertEquals(count, 2)
png_2 = SVGToPNGMap.get_png_image(self.svg_path, None, None)
self.assertEquals(png, png_2)
@patch.object(SVGToPNGMap, 'create')
def test_get_png_must_not_fail(self, create):
create.side_effect = Exception('boom')
png = SVGToPNGMap.get_png_image(self.svg_path)
self.assertIsNone(png)
def test_uniqueness_unspecified_stroke_and_fill(self):
SVGToPNGMap.create(self.svg_path)
with self.assertRaises(IntegrityError):
SVGToPNGMap.create(self.svg_path)
def test_uniqueness_no_stroke_and_fill(self):
SVGToPNGMap.create(self.svg_path, None, None)
with self.assertRaises(IntegrityError):
SVGToPNGMap.create(self.svg_path, None, None)
def test_uniqueness_fill_no_stroke(self):
SVGToPNGMap.create(
self.svg_path,
fill_color='#a1b2c3',
stroke_color=None
)
with self.assertRaises(IntegrityError):
SVGToPNGMap.create(
self.svg_path,
fill_color='#a1b2c3',
stroke_color=None
)
def test_uniqueness_stroke_no_fill(self):
SVGToPNGMap.create(self.svg_path, fill_color=None, stroke_color='#fff')
with self.assertRaises(IntegrityError):
SVGToPNGMap.create(
self.svg_path,
fill_color=None,
stroke_color='#fff'
)
def test_uniqueness_stroke_and_fill(self):
SVGToPNGMap.create(
self.svg_path,
fill_color='#555',
stroke_color='#666'
)
with self.assertRaises(IntegrityError):
SVGToPNGMap.create(
self.svg_path,
fill_color='#555',
stroke_color='#666'
)
class GoogleAnalyticsTagsTestCase(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_query_param_without_value(self):
request = self.request_factory.get('/en/?test')
context = Context({'request': request})
rendered_template = google_analytics(context, tracking_code='my-code')
parsed_qs = parse_qs(rendered_template)
self.assertEqual(parsed_qs['tracking_code'][0], "my-code")
self.assertEqual(parsed_qs['p'][0], "/en/?test=")
def test_query_param_with_value(self):
request = self.request_factory.get('/en/?test=abc')
context = Context({'request': request})
rendered_template = google_analytics(context, tracking_code='my-code')
parsed_qs = parse_qs(rendered_template)
self.assertEqual(parsed_qs['tracking_code'][0], "my-code")
self.assertEqual(parsed_qs['p'][0], "/en/?test=abc")
def test_query_param_with_multiple_values_with_same_key(self):
request = self.request_factory.get('/en/?test=abc&test=xyz')
context = Context({'request': request})
rendered_template = google_analytics(context, tracking_code='my-code')
parsed_qs = parse_qs(rendered_template)
self.assertEqual(parsed_qs['tracking_code'][0], "my-code")
self.assertEqual(parsed_qs['p'][0], "/en/?test=abc&test=xyz")
def test_query_param_with_utm(self):
request = self.request_factory.get(
'/en/?utm_content=content&utm_term=term&utm_source=source&utm_medium=medium&utm_campaign=campaign')
context = Context({'request': request})
rendered_template = google_analytics(context, tracking_code='my-code')
parsed_qs = parse_qs(rendered_template)
self.assertEqual(parsed_qs['tracking_code'][0], "my-code")
self.assertEqual(parsed_qs['p'][0], "/en/")
self.assertEqual(parsed_qs['utm_content'][0], "content")
self.assertEqual(parsed_qs['utm_term'][0], "term")
self.assertEqual(parsed_qs['utm_source'][0], "source")
self.assertEqual(parsed_qs['utm_medium'][0], "medium")
self.assertEqual(parsed_qs['utm_campaign'][0], "campaign")
|
3,905 |
node children
|
#!/usr/bin/python
# vim: set fileencoding=utf-8
from clang.cindex import *
import asciitree # must be version 0.2
import sys
TOP_NODE_TYPES = [
CursorKind.ENUM_DECL,
CursorKind.STRUCT_DECL ]
CHILD_NODE_TYPES = [
CursorKind.FIELD_DECL,
CursorKind.UNION_DECL,
#CursorKind.STRUCT_DECL,
CursorKind.ENUM_CONSTANT_DECL ]
def filter_node(node):
if node is None:
return False
# # no node or no location
# if node is None or node.location is None or node.location.file is None:
# return False
# # not the main source file
# if node.location.file.name != sys.argv[1]:
# return False
if node.kind in CHILD_NODE_TYPES:
return True
return False
def get_struct_decl(node):
for c in node.get_children():
if c is not None and c.kind == CursorKind.STRUCT_DECL:
return c
return None
def METHOD_NAME(node):
if node is None:
return []
if isinstance(node,list):
return node
if node.kind == CursorKind.FIELD_DECL:
if node.type.kind == TypeKind.ELABORATED:
return METHOD_NAME(node.type.get_declaration())
elif node.type.kind == TypeKind.RECORD:
return list(c for c in node.type.get_fields())
elif node.type.kind == TypeKind.CONSTANTARRAY:
et = node.type.element_type
if et.kind == TypeKind.RECORD:
return list(c for c in et.get_fields())
elif node.type.kind == TypeKind.ENUM:
return METHOD_NAME(node.type.get_declaration())
else:
print("{} {}".format(str(node.type.kind),node.spelling))
return list(c for c in node.get_children() if filter_node(c))
def get_array_size(f):
et = f.type.element_type
ec = f.type.element_count
if et.spelling == 'char':
return 'string({})'.format(ec)
else:
return '{} x {} bytes'.format(ec,et.get_size())
def get_field_size(f):
if f.is_bitfield():
return '{} bits'.format(f.get_bitfield_width())
elif f.type.kind == TypeKind.CONSTANTARRAY:
return get_array_size(f)
return '{} bytes'.format(f.type.get_size())
def print_field_decl(kind, text, f):
ft = ''
is_anon = f.is_anonymous()
if f.type.kind == TypeKind.RECORD:
decl = f.type.get_declaration()
if decl.kind == CursorKind.UNION_DECL:
if is_anon:
ft = 'anonymous union ({})'.format(decl.mangled_name)
else:
ft = 'union {}'.format(f.type.spelling)
elif decl.kind == CursorKind.STRUCT_DECL:
if is_anon:
ft = '{} anonymous struct'.format(f.kind)
else:
ft = 'struct {}'.format(f.type.spelling)
elif f.type.kind == TypeKind.TYPEDEF:
decl = f.type.get_canonical()
ft = decl.spelling
elif get_struct_decl(f) is not None:
ft = 'anon struct'
elif f.type.kind == TypeKind.CONSTANTARRAY:
et = f.type.element_type
ft = et.spelling
else:
ft = f.type.spelling
return '{} {} ({})'.format(ft, text, get_field_size(f))
def print_enum_decl(kind, text, e):
return '{} {} ({})'.format(kind, text, e.enum_value)
def print_union_decl(kind, text, f):
return '{} {} ({})'.format(kind, text, get_field_size(f))
def print_node(node):
if isinstance(node,list):
return 'ROOT'
text = node.spelling or node.displayname
kind = str(node.kind)[str(node.kind).index('.')+1:]
if CursorKind.FIELD_DECL == node.kind:
return print_field_decl(kind, text, node)
if CursorKind.ENUM_CONSTANT_DECL == node.kind:
return print_enum_decl(kind, text, node)
if CursorKind.UNION_DECL == node.kind:
return print_union_decl(kind, text, node)
return '{} {}'.format(kind, text)
def get_top_node(name, node):
for c in node.get_children():
if c.spelling == name:
# struct found
return c
print("get_top_node: '{}' not found!".format(name))
return None
def get_top_nodes(node):
l = list()
for c in node.get_children():
# no node or no location
if c is None or c.location is None or c.location.file is None:
continue
# not the main source file
if c.location.file.name != sys.argv[1]:
continue
if c.kind in TOP_NODE_TYPES:
# struct found
l.append(c)
return l
def print_top_node(name):
s = get_top_node(name, translation_unit.cursor)
if s is None:
return
print(asciitree.draw_tree(s, METHOD_NAME, print_node))
def print_all():
s = get_top_nodes(translation_unit.cursor)
print(asciitree.draw_tree(s, METHOD_NAME, print_node))
### Main ###
if len(sys.argv) < 3:
print("Usage: dump_ast_yaml.py [header file name] [top node] [additional compile args]")
sys.exit()
Config.set_library_file('/usr/local/Cellar/llvm/6.0.0/lib/libclang.dylib')
index = Index.create()
translation_unit = index.parse(sys.argv[1], ['-x', 'c++', '-std=c++11'] + sys.argv[3:])
print_top_node(sys.argv[2])
#print_top_node('CustomFunctionData')
#print_all()
|
3,906 |
register plugin
|
import os
from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.utils.pathscrub import pathscrub
from flexget.utils.template import RenderError
logger = logger.bind(name='symlink')
class Symlink:
schema = {
'oneOf': [
{
'title': 'specify options',
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'rename': {'type': 'string'},
'existing': {'type': 'string', 'enum': ['ignore', 'fail']},
'link_type': {'type': 'string', 'enum': ['soft', 'hard']},
},
'required': ['to'],
'additionalProperties': False,
},
{'title': 'specify path', 'type': 'string', 'format': 'path'},
]
}
def prepare_config(self, config):
if not isinstance(config, dict):
config = {'to': config}
config.setdefault('existing', 'fail')
config.setdefault('link_type', 'soft')
return config
@plugin.priority(0)
def on_task_output(self, task, config):
if not config:
return
config = self.prepare_config(config)
existing = config['existing']
for entry in task.accepted:
if 'location' not in entry:
entry.fail('Does not have location field for symlinking')
continue
linkfrom = entry['location']
linkfrom_path, linkfrom_name = os.path.split(linkfrom)
# get the proper path and name in order of: entry, config, above split
linkto_path = entry.get('link_to', config.get('to', linkfrom_path))
if config.get('rename'):
linkto_name = config['rename']
elif entry.get('filename') and entry['filename'] != linkfrom_name:
# entry specifies different filename than what was split from the path
# since some inputs fill in filename it must be different in order to be used
linkto_name = entry['filename']
else:
linkto_name = linkfrom_name
try:
linkto_path = entry.render(linkto_path)
except RenderError as err:
raise plugin.PluginError(
f'Path value replacement `{linkto_path}` failed: {err.args[0]}'
)
try:
linkto_name = entry.render(linkto_name)
except RenderError as err:
raise plugin.PluginError(
f'Filename value replacement `{linkto_name}` failed: {err.args[0]}'
)
# Clean invalid characters with pathscrub plugin
linkto_path = pathscrub(os.path.expanduser(linkto_path))
linkto_name = pathscrub(linkto_name, filename=True)
# Join path and filename
linkto = os.path.join(linkto_path, linkto_name)
if linkto == entry['location']:
raise plugin.PluginWarning('source and destination are the same.')
# Hardlinks for dirs will not be failed here
if os.path.exists(linkto) and (
config['link_type'] == 'soft' or os.path.isfile(linkfrom)
):
msg = 'Symlink destination %s already exists' % linkto
if existing == 'ignore':
logger.verbose(msg)
else:
entry.fail(msg)
continue
logger.verbose('{}link `{}` to `{}`', config['link_type'], linkfrom, linkto)
try:
if config['link_type'] == 'soft':
os.symlink(linkfrom, linkto)
else:
if os.path.isdir(linkfrom):
self.hard_link_dir(linkfrom, linkto, existing)
else:
dirname = os.path.dirname(linkto)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.link(linkfrom, linkto)
except OSError as e:
entry.fail('Failed to create {}link, {}'.format(config['link_type'], e))
def hard_link_dir(self, path, destination, existing):
if not os.path.exists(destination):
try:
os.makedirs(destination)
except OSError as e:
# Raised when it already exists, but are there other cases?
logger.debug('Failed to create destination dir {}: {}', destination, e)
# 'recursively' traverse and hard link
working_dir = os.getcwd()
os.chdir(path) # change working dir to make dir joins easier
for root, dirs, files in os.walk('.'):
dst_dir = os.path.abspath(os.path.join(destination, root))
for d in dirs:
try:
os.mkdir(d)
except OSError as e:
# Raised when it already exists, but are there other cases?
logger.debug('Failed to create subdir {}: {}', d, e)
for f in files:
src_file = os.path.join(root, f)
dst_file = os.path.join(dst_dir, f)
logger.debug('Hardlinking {} to {}', src_file, dst_file)
try:
os.link(src_file, dst_file)
except OSError as e:
logger.debug('Failed to create hardlink for file {}: {}', f, e)
if existing == 'fail':
raise # reraise to fail the entry in the calling function
os.chdir(working_dir)
@event('plugin.register')
def METHOD_NAME():
if os.name == 'nt':
logger.trace('Symlinks not supported on Windows. Skipping Symlink plugin register.')
return
plugin.register(Symlink, 'symlink', api_ver=2)
|
3,907 |
test coverage
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pickle
from collections import defaultdict
import pytest
from telegram.error import (
BadRequest,
ChatMigrated,
Conflict,
Forbidden,
InvalidToken,
NetworkError,
PassportDecryptionError,
RetryAfter,
TelegramError,
TimedOut,
)
from telegram.ext import InvalidCallbackData
from tests.auxil.slots import mro_slots
class TestErrors:
def test_telegram_error(self):
with pytest.raises(TelegramError, match="^test message$"):
raise TelegramError("test message")
with pytest.raises(TelegramError, match="^Test message$"):
raise TelegramError("Error: test message")
with pytest.raises(TelegramError, match="^Test message$"):
raise TelegramError("[Error]: test message")
with pytest.raises(TelegramError, match="^Test message$"):
raise TelegramError("Bad Request: test message")
def test_unauthorized(self):
with pytest.raises(Forbidden, match="test message"):
raise Forbidden("test message")
with pytest.raises(Forbidden, match="^Test message$"):
raise Forbidden("Error: test message")
with pytest.raises(Forbidden, match="^Test message$"):
raise Forbidden("[Error]: test message")
with pytest.raises(Forbidden, match="^Test message$"):
raise Forbidden("Bad Request: test message")
def test_invalid_token(self):
with pytest.raises(InvalidToken, match="Invalid token"):
raise InvalidToken
def test_network_error(self):
with pytest.raises(NetworkError, match="test message"):
raise NetworkError("test message")
with pytest.raises(NetworkError, match="^Test message$"):
raise NetworkError("Error: test message")
with pytest.raises(NetworkError, match="^Test message$"):
raise NetworkError("[Error]: test message")
with pytest.raises(NetworkError, match="^Test message$"):
raise NetworkError("Bad Request: test message")
def test_bad_request(self):
with pytest.raises(BadRequest, match="test message"):
raise BadRequest("test message")
with pytest.raises(BadRequest, match="^Test message$"):
raise BadRequest("Error: test message")
with pytest.raises(BadRequest, match="^Test message$"):
raise BadRequest("[Error]: test message")
with pytest.raises(BadRequest, match="^Test message$"):
raise BadRequest("Bad Request: test message")
def test_timed_out(self):
with pytest.raises(TimedOut, match="^Timed out$"):
raise TimedOut
def test_chat_migrated(self):
with pytest.raises(ChatMigrated, match="New chat id: 1234") as e:
raise ChatMigrated(1234)
assert e.value.new_chat_id == 1234
def test_retry_after(self):
with pytest.raises(RetryAfter, match="Flood control exceeded. Retry in 12 seconds"):
raise RetryAfter(12)
def test_conflict(self):
with pytest.raises(Conflict, match="Something something."):
raise Conflict("Something something.")
@pytest.mark.parametrize(
("exception", "attributes"),
[
(TelegramError("test message"), ["message"]),
(Forbidden("test message"), ["message"]),
(InvalidToken(), ["message"]),
(NetworkError("test message"), ["message"]),
(BadRequest("test message"), ["message"]),
(TimedOut(), ["message"]),
(ChatMigrated(1234), ["message", "new_chat_id"]),
(RetryAfter(12), ["message", "retry_after"]),
(Conflict("test message"), ["message"]),
(PassportDecryptionError("test message"), ["message"]),
(InvalidCallbackData("test data"), ["callback_data"]),
],
)
def test_errors_pickling(self, exception, attributes):
pickled = pickle.dumps(exception)
unpickled = pickle.loads(pickled)
assert type(unpickled) is type(exception)
assert str(unpickled) == str(exception)
for attribute in attributes:
assert getattr(unpickled, attribute) == getattr(exception, attribute)
@pytest.mark.parametrize(
"inst",
[
(TelegramError("test message")),
(Forbidden("test message")),
(InvalidToken()),
(NetworkError("test message")),
(BadRequest("test message")),
(TimedOut()),
(ChatMigrated(1234)),
(RetryAfter(12)),
(Conflict("test message")),
(PassportDecryptionError("test message")),
(InvalidCallbackData("test data")),
],
)
def test_slot_behaviour(self, inst):
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def METHOD_NAME(self):
"""
This test is only here to make sure that new errors will override __reduce__ and set
__slots__ properly.
Add the new error class to the below covered_subclasses dict, if it's covered in the above
test_errors_pickling and test_slots_behavior tests.
"""
def make_assertion(cls):
assert set(cls.__subclasses__()) == covered_subclasses[cls]
for subcls in cls.__subclasses__():
make_assertion(subcls)
covered_subclasses = defaultdict(set)
covered_subclasses.update(
{
TelegramError: {
Forbidden,
InvalidToken,
NetworkError,
ChatMigrated,
RetryAfter,
Conflict,
PassportDecryptionError,
InvalidCallbackData,
},
NetworkError: {BadRequest, TimedOut},
}
)
make_assertion(TelegramError)
def test_string_representations(self):
"""We just randomly test a few of the subclasses - should suffice"""
e = TelegramError("This is a message")
assert repr(e) == "TelegramError('This is a message')"
assert str(e) == "This is a message"
e = RetryAfter(42)
assert repr(e) == "RetryAfter('Flood control exceeded. Retry in 42 seconds')"
assert str(e) == "Flood control exceeded. Retry in 42 seconds"
e = BadRequest("This is a message")
assert repr(e) == "BadRequest('This is a message')"
assert str(e) == "This is a message"
|
3,908 |
test matrices phlti
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import os
import tempfile
import numpy as np
import pytest
import scipy.io as spio
import scipy.sparse as sps
from pymor.models.iosys import LTIModel, PHLTIModel, SecondOrderModel
pytestmark = pytest.mark.builtin
def _build_matrices_lti(with_D, with_E):
A = sps.csc_matrix([[1, 2], [3, 4]])
B = np.array([[1], [2]])
C = np.array([[1, 2]])
D = np.array([[1]]) if with_D else None
E = np.array([[5, 6], [7, 8]]) if with_E else None
return A, B, C, D, E
def _test_matrices_lti(A, B, C, D, E,
A2, B2, C2, D2, E2,
with_D, with_E):
assert np.allclose(A.toarray(), A2.toarray())
assert np.allclose(B, B2)
assert np.allclose(C, C2)
if with_D:
assert np.allclose(D, D2)
else:
assert D2 is None
if with_E:
assert np.allclose(E, E2)
else:
assert E2 is None
def _build_matrices_phlti(with_P, with_S, with_N, with_E, with_Q):
J = sps.csc_matrix([[0, -1], [1, 0]])
R = sps.csc_matrix([[1, 0], [0, 1]])
G = np.array([[1], [0]])
P = np.array([[2], [0]]) if with_P else None
S = np.array([[1]]) if with_S else None
N = np.array([[0]]) if with_N else None
E = np.array([[1, 0], [0, 2]]) if with_E else None
Q = np.array([[2, 0], [0, 1]]) if with_Q else None
return J, R, G, P, S, N, E, Q
def _test_matrices_phlti(J, R, G, P, S, N, E, Q,
J2, R2, G2, P2, S2, N2, E2, Q2,
with_P, with_S, with_N, with_E, with_Q):
assert np.allclose(J.toarray(), J2.toarray())
assert np.allclose(R.toarray(), R2.toarray())
assert np.allclose(G, G2)
if with_P:
assert np.allclose(P, P2)
else:
assert P2 is None
if with_S:
assert np.allclose(S, S2)
else:
assert S2 is None
if with_N:
assert np.allclose(N, N2)
else:
assert N2 is None
if with_E:
assert np.allclose(E, E2)
else:
assert E2 is None
if with_Q:
assert np.allclose(Q, Q2)
else:
assert Q2 is None
def _build_matrices_so(with_Cv, with_D):
M = sps.csc_matrix([[1, 2], [3, 4]])
E = np.array([[5, 6], [7, 8]])
K = np.array([[9, 10], [11, 12]])
B = np.array([[1], [2]])
Cp = np.array([[1, 2]])
Cv = np.array([[3, 4]]) if with_Cv else None
D = np.array([[1]]) if with_D else None
return M, E, K, B, Cp, Cv, D
def _test_matrices_so(M, E, K, B, Cp, Cv, D,
M2, E2, K2, B2, Cp2, Cv2, D2,
with_Cv, with_D):
assert np.allclose(M.toarray(), M2.toarray())
assert np.allclose(E, E2)
assert np.allclose(K, K2)
assert np.allclose(B, B2)
assert np.allclose(Cp, Cp2)
if with_Cv:
assert np.allclose(Cv, Cv2)
else:
assert Cv2 is None
if with_D:
assert np.allclose(D, D2)
else:
assert D2 is None
@pytest.mark.parametrize('with_D', [False, True])
@pytest.mark.parametrize('with_E', [False, True])
def test_matrices_lti(with_D, with_E):
matrices = _build_matrices_lti(with_D, with_E)
lti = LTIModel.from_matrices(*matrices)
matrices2 = lti.to_matrices()
_test_matrices_lti(*matrices, *matrices2, with_D, with_E)
@pytest.mark.parametrize('with_D', [False, True])
@pytest.mark.parametrize('with_E', [False, True])
def test_files_lti(with_D, with_E):
matrices = _build_matrices_lti(with_D, with_E)
lti = LTIModel.from_matrices(*matrices)
with tempfile.TemporaryDirectory() as tmpdirname:
files = (
os.path.join(tmpdirname, 'A.mtx'),
os.path.join(tmpdirname, 'B.mtx'),
os.path.join(tmpdirname, 'C.mtx'),
os.path.join(tmpdirname, 'D.mtx') if with_D else None,
os.path.join(tmpdirname, 'E.mtx') if with_E else None,
)
lti.to_files(*files)
lti2 = LTIModel.from_files(*files)
matrices2 = lti2.to_matrices()
_test_matrices_lti(*matrices, *matrices2, with_D, with_E)
@pytest.mark.parametrize('with_D', [False, True])
@pytest.mark.parametrize('with_E', [False, True])
def test_mat_file_lti(with_D, with_E):
matrices = _build_matrices_lti(with_D, with_E)
assert all(np.issubdtype(mat.dtype, np.integer) for mat in matrices if mat is not None)
lti = LTIModel.from_matrices(*matrices)
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, 'lti')
lti.to_mat_file(file_name)
lti2 = LTIModel.from_mat_file(file_name)
matrices2 = lti2.to_matrices()
_test_matrices_lti(*matrices, *matrices2, with_D, with_E)
assert all(np.issubdtype(mat.dtype, np.floating) for mat in matrices2 if mat is not None)
def test_mat_file_lti_C():
A, B, _, _, _ = _build_matrices_lti(False, False)
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, 'lti')
spio.savemat(file_name, {'A': A, 'B': B})
lti2 = LTIModel.from_mat_file(file_name)
matrices2 = lti2.to_matrices()
_test_matrices_lti(A, B, B.T, None, None, *matrices2, False, False)
@pytest.mark.parametrize('with_D', [False, True])
@pytest.mark.parametrize('with_E', [False, True])
def test_abcde_files(with_D, with_E):
matrices = _build_matrices_lti(with_D, with_E)
lti = LTIModel.from_matrices(*matrices)
with tempfile.TemporaryDirectory() as tmpdirname:
files_basename = os.path.join(tmpdirname, 'lti')
lti.to_abcde_files(files_basename)
lti2 = LTIModel.from_abcde_files(files_basename)
matrices2 = lti2.to_matrices()
_test_matrices_lti(*matrices, *matrices2, with_D, with_E)
@pytest.mark.parametrize('with_P', [False, True])
@pytest.mark.parametrize('with_S', [False, True])
@pytest.mark.parametrize('with_N', [False, True])
@pytest.mark.parametrize('with_E', [False, True])
@pytest.mark.parametrize('with_Q', [False, True])
def METHOD_NAME(with_P, with_S, with_N, with_E, with_Q):
matrices = _build_matrices_phlti(with_P, with_S, with_N, with_E, with_Q)
phlti = PHLTIModel.from_matrices(*matrices)
matrices2 = phlti.to_matrices()
_test_matrices_phlti(*matrices, *matrices2, with_P, with_S, with_N, with_E, with_Q)
@pytest.mark.parametrize('with_Cv', [False, True])
@pytest.mark.parametrize('with_D', [False, True])
def test_matrices_so(with_Cv, with_D):
matrices = _build_matrices_so(with_Cv, with_D)
som = SecondOrderModel.from_matrices(*matrices)
matrices2 = som.to_matrices()
_test_matrices_so(*matrices, *matrices2, with_Cv, with_D)
@pytest.mark.parametrize('with_Cv', [False, True])
@pytest.mark.parametrize('with_D', [False, True])
def test_files_so(with_Cv, with_D):
matrices = _build_matrices_so(with_Cv, with_D)
som = SecondOrderModel.from_matrices(*matrices)
with tempfile.TemporaryDirectory() as tmpdirname:
files = (
os.path.join(tmpdirname, 'M.mtx'),
os.path.join(tmpdirname, 'E.mtx'),
os.path.join(tmpdirname, 'K.mtx'),
os.path.join(tmpdirname, 'B.mtx'),
os.path.join(tmpdirname, 'Cp.mtx'),
os.path.join(tmpdirname, 'Cv.mtx') if with_Cv else None,
os.path.join(tmpdirname, 'D.mtx') if with_D else None,
)
som.to_files(*files)
som2 = SecondOrderModel.from_files(*files)
matrices2 = som2.to_matrices()
_test_matrices_so(*matrices, *matrices2, with_Cv, with_D)
|
3,909 |
test prediction proba unify
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.base import clone
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.so_gaal import SO_GAAL
from pyod.utils.data import generate_data
class TestSO_GAAL(unittest.TestCase):
"""
Notes: GAN may yield unstable results, so the test is design for running
models only, without any performance check.
"""
def setUp(self):
self.n_train = 1000
self.n_test = 200
self.n_features = 2
self.contamination = 0.1
# GAN may yield unstable results; turning performance check off
# self.roc_floor = 0.8
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
n_features=self.n_features, contamination=self.contamination,
random_state=42)
self.clf = SO_GAAL(contamination=self.contamination)
self.clf.fit(self.X_train)
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
assert (hasattr(self.clf, '_mu') and
self.clf._mu is not None)
assert (hasattr(self.clf, '_sigma') and
self.clf._sigma is not None)
assert (hasattr(self.clf, 'discriminator') and
self.clf.discriminator is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
# assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def METHOD_NAME(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
3,910 |
to html
|
# -*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame import metadata
from libopensesame.py3compat import *
from libqtopensesame.misc.base_subcomponent import BaseSubcomponent
import re
import os
from libopensesame.oslogging import oslogger
try:
import markdown
from markdown.extensions import attr_list, extra, toc
except ImportError:
oslogger.error(u'Unable to import markdown, proceeding without markdown')
markdown = None
try:
from pygments import highlight
from pygments.lexers import Python3TracebackLexer as TracebackLexer
from pygments.lexers import Python3Lexer as PythonLexer
from pygments.formatters import HtmlFormatter
except ImportError:
highlight = None
from libqtopensesame.misc.translate import translation_context
_ = translation_context(u'markdown', category=u'core')
class MarkdownParser(BaseSubcomponent):
r"""A Markdown parser with syntax highlighting."""
def __init__(self, main_window):
r"""Constructor.
Parameters
----------
main_window
The main-window object.
"""
self.setup(main_window)
self.css = u'<style type="text/css">'
with safe_open(self.main_window.theme.resource(u'markdown.css')) as fd:
self.css += fd.read() % {u'background_image':
os.path.abspath(self.main_window.theme.resource(
u'background.png'))}
if highlight is not None:
self.traceback_lexer = TracebackLexer()
self.python_lexer = PythonLexer()
self.html_formatter = HtmlFormatter()
self.css += self.html_formatter.get_style_defs(u'.highlight')
self.re_script = re.compile(
r'^~~~\s*.(?P<syntax>\w+)(?P<script>.*?)^~~~', re.S | re.M)
self.css += u'</style><link href="https://fonts.googleapis.com/css?family=Roboto+Slab&display=swap" rel="stylesheet">'
if markdown is not None:
self.ext = [attr_list.AttrListExtension(), extra.ExtraExtension(),
toc.TocExtension(title=u'Overview'),
u'markdown.extensions.tables']
self.footer = u'''
<p>
<a class="dismiss-button" href="opensesame://action.close_current_tab">%s</a>
</p>
<div class="footer">
%s
Copyright <a href="http://www.cogsci.nl/smathot">Sebastiaan Mathôt</a> 2010-2023
</div>
''' % (_(u'Dismiss this message'), metadata.identity)
def highlight(self, md):
r"""Replaces ~~~ blocks with syntax-highlighted HTML code.
Parameters
----------
md : str
A Markdown string.
Returns
-------
str
A Markdown string.
"""
if highlight is None:
return md
while True:
m = re.search(self.re_script, md)
if m is None:
break
orig = m.group()
syntax = m.group(u'syntax')
script = m.group(u'script')
if syntax == u'traceback':
lexer = self.traceback_lexer
elif syntax == u'python':
lexer = self.python_lexer
else:
md = md.replace(orig, u'<code>%s</code>\n' % script)
continue
new = highlight(script, lexer, self.html_formatter)
md = md.replace(orig, new)
return md
def METHOD_NAME(self, md):
r"""Converts Markdown to HTML.
Parameters
----------
md : str
A Markdown string.
Returns
-------
str
A Markdown string.
"""
md = self.highlight(md)
if markdown is None:
return u'<pre>%s</pre>' % md
html = markdown.markdown(md, extensions=self.ext, errors=u'ignore') \
+ self.css + self.footer
if html.startswith(u'<p>title:'):
title, body = tuple(html.split(u'\n', 1))
html = u'<h1>%s</h1>\n\n%s' % (title[9:-4], body)
return html
# Alias for backwards compatibility
markdown_parser = MarkdownParser
|
3,911 |
from vertices
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.discretizers.builtin.grids.interfaces import Grid
from pymor.discretizers.builtin.grids.referenceelements import triangle
class UnstructuredTriangleGrid(Grid):
"""A generic unstructured, triangular grid.
Use :meth:`~UnstructuredTriangleGrid.from_vertices` to instantiate
the grid from vertex coordinates and connectivity data.
"""
dim = 2
reference_element = triangle
def __init__(self, sizes, subentity_data, embedding_data):
self.__auto_init(locals())
vertices = self.centers(2)
self.domain = np.array([[np.min(vertices[:, 0]), np.min(vertices[:, 1])],
[np.max(vertices[:, 0]), np.max(vertices[:, 1])]])
@classmethod
def METHOD_NAME(cls, vertices, faces):
"""Instantiate grid from vertex coordinates and connectivity data.
Parameters
----------
vertices
A (num_vertices, 2)-shaped |array| containing the coordinates
of all vertices in the grid. The row numbers in the array will
be the global indices of the given vertices (codim 2 entities).
faces
A (num_faces, 3)-shaped |array| containing the global indices
of the vertices which define a given triangle in the grid.
The row numbers in the array will be the global indices of the
given triangles (codim 0 entities).
"""
assert faces.shape[1] == 3
assert np.min(faces) == 0
assert np.max(faces) == len(vertices) - 1
vertices = vertices.astype(np.float64, copy=False)
faces = faces.astype(np.int32, copy=False)
edges, num_edges = compute_edges(faces)
COORDS = vertices[faces]
SHIFTS = COORDS[:, 0, :]
TRANS = COORDS[:, 1:, :] - SHIFTS[:, np.newaxis, :]
TRANS = TRANS.swapaxes(1, 2)
sizes = (len(faces), num_edges, len(vertices))
subentity_data = (np.arange(len(faces), dtype=np.int32).reshape(-1, 1), edges, faces)
embedding_data = (TRANS, SHIFTS)
return cls(sizes, subentity_data, embedding_data)
def size(self, codim=0):
assert 0 <= codim <= 2, 'Invalid codimension'
return self.sizes[codim]
def subentities(self, codim=0, subentity_codim=None):
assert 0 <= codim <= 2, 'Invalid codimension'
if subentity_codim is None:
subentity_codim = codim + 1
assert codim <= subentity_codim <= self.dim, 'Invalid subentity codimensoin'
if codim == 0:
return self.subentity_data[subentity_codim]
else:
return super().subentities(codim, subentity_codim)
def embeddings(self, codim=0):
if codim == 0:
return self.embedding_data
else:
return super().embeddings(codim)
def visualize(self, U, codim=2, **kwargs):
"""Visualize scalar data associated to the grid as a patch plot.
Parameters
----------
U
|NumPy array| of the data to visualize. If `U.dim == 2 and len(U) > 1`, the
data is visualized as a time series of plots. Alternatively, a tuple of
|Numpy arrays| can be provided, in which case a subplot is created for
each entry of the tuple. The lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
kwargs
See :func:`~pymor.discretizers.builtin.gui.visualizers.PatchVisualizer.visualize`
"""
from pymor.discretizers.builtin.gui.visualizers import PatchVisualizer
from pymor.vectorarrays.interface import VectorArray
from pymor.vectorarrays.numpy import NumpyVectorSpace
if isinstance(U, (np.ndarray, VectorArray)):
U = (U,)
assert all(isinstance(u, (np.ndarray, VectorArray)) for u in U)
U = tuple(NumpyVectorSpace.make_array(u) if isinstance(u, np.ndarray) else
u if isinstance(u.space, NumpyVectorSpace) else
NumpyVectorSpace.make_array(u.to_numpy())
for u in U)
PatchVisualizer(self, codim=codim).visualize(U, **kwargs)
def __str__(self):
return 'UnstructuredTriangleGrid with {} triangles, {} edges, {} vertices'.format(*self.sizes)
def compute_edges(subentities):
X = np.empty_like(subentities, dtype=[('l', np.int32), ('h', np.int32)])
X['l'][:, 0] = np.min(subentities[:, 1:3], axis=1)
X['l'][:, 1] = np.min(subentities[:, 0:3:2], axis=1)
X['l'][:, 2] = np.min(subentities[:, 0:2], axis=1)
X['h'][:, 0] = np.max(subentities[:, 1:3], axis=1)
X['h'][:, 1] = np.max(subentities[:, 0:3:2], axis=1)
X['h'][:, 2] = np.max(subentities[:, 0:2], axis=1)
U, I = np.unique(X, return_inverse=True)
return I.reshape(subentities.shape).astype(np.int32), len(U)
|
3,912 |
load image
|
# SPDX-FileCopyrightText: 2009-2023 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
__all__ = (
"load_image",
)
# limited replacement for BPyImage.comprehensiveImageLoad
def METHOD_NAME(
imagepath,
dirname="",
place_holder=False,
recursive=False,
ncase_cmp=True,
convert_callback=None,
verbose=False,
relpath=None,
check_existing=False,
force_reload=False,
):
"""
Return an image from the file path with options to search multiple paths
and return a placeholder if its not found.
:arg filepath: The image filename
If a path precedes it, this will be searched as well.
:type filepath: string
:arg dirname: is the directory where the image may be located - any file at
the end will be ignored.
:type dirname: string
:arg place_holder: if True a new place holder image will be created.
this is useful so later you can relink the image to its original data.
:type place_holder: bool
:arg recursive: If True, directories will be recursively searched.
Be careful with this if you have files in your root directory because
it may take a long time.
:type recursive: bool
:arg ncase_cmp: on non windows systems, find the correct case for the file.
:type ncase_cmp: bool
:arg convert_callback: a function that takes an existing path and returns
a new one. Use this when loading image formats blender may not support,
the CONVERT_CALLBACK can take the path for a GIF (for example),
convert it to a PNG and return the PNG's path.
For formats blender can read, simply return the path that is given.
:type convert_callback: function
:arg relpath: If not None, make the file relative to this path.
:type relpath: None or string
:arg check_existing: If true,
returns already loaded image datablock if possible
(based on file path).
:type check_existing: bool
:arg force_reload: If true,
force reloading of image (only useful when `check_existing`
is also enabled).
:type force_reload: bool
:return: an image or None
:rtype: :class:`bpy.types.Image`
"""
import os
import bpy
# -------------------------------------------------------------------------
# Utility Functions
def _image_load_placeholder(path):
name = path
if type(path) is str:
name = name.encode("utf-8", "replace")
name = name.decode("utf-8", "replace")
name = os.path.basename(name)
image = bpy.data.images.new(name, 128, 128)
# allow the path to be resolved later
image.filepath = path
image.source = 'FILE'
return image
def _image_load(path):
import bpy
if convert_callback:
path = convert_callback(path)
# Ensure we're not relying on the 'CWD' to resolve the path.
if not os.path.isabs(path):
path = os.path.abspath(path)
try:
image = bpy.data.images.load(path, check_existing=check_existing)
except RuntimeError:
image = None
if verbose:
if image:
print(" image loaded '%s'" % path)
else:
print(" image load failed '%s'" % path)
# image path has been checked so the path could not be read for some
# reason, so be sure to return a placeholder
if place_holder and image is None:
image = _image_load_placeholder(path)
if image:
if force_reload:
image.reload()
if relpath is not None:
# make relative
from bpy.path import relpath as relpath_fn
# can't always find the relative path
# (between drive letters on windows)
try:
filepath_rel = relpath_fn(path, start=relpath)
except ValueError:
filepath_rel = None
if filepath_rel is not None:
image.filepath_raw = filepath_rel
return image
def _recursive_search(paths, filename_check):
for path in paths:
for dirpath, _dirnames, filenames in os.walk(path):
# skip '.svn'
if dirpath[0] in {".", b'.'}:
continue
for filename in filenames:
if filename_check(filename):
yield os.path.join(dirpath, filename)
# -------------------------------------------------------------------------
imagepath = bpy.path.native_pathsep(imagepath)
if verbose:
print("load_image('%s', '%s', ...)" % (imagepath, dirname))
if os.path.exists(imagepath):
return _image_load(imagepath)
variants = [imagepath]
if dirname:
variants += [
os.path.join(dirname, imagepath),
os.path.join(dirname, bpy.path.basename(imagepath)),
]
for filepath_test in variants:
if ncase_cmp:
ncase_variants = (
filepath_test,
bpy.path.resolve_ncase(filepath_test),
)
else:
ncase_variants = (filepath_test, )
for nfilepath in ncase_variants:
if os.path.exists(nfilepath):
return _image_load(nfilepath)
if recursive:
search_paths = []
for dirpath_test in (os.path.dirname(imagepath), dirname):
if os.path.exists(dirpath_test):
search_paths.append(dirpath_test)
search_paths[:] = bpy.path.reduce_dirs(search_paths)
imagepath_base = bpy.path.basename(imagepath)
if ncase_cmp:
imagepath_base = imagepath_base.lower()
def image_filter(fn):
return (imagepath_base == fn.lower())
else:
def image_filter(fn):
return (imagepath_base == fn)
nfilepath = next(_recursive_search(search_paths, image_filter), None)
if nfilepath is not None:
return _image_load(nfilepath)
# None of the paths exist so return placeholder
if place_holder:
return _image_load_placeholder(imagepath)
# TODO comprehensiveImageLoad also searched in bpy.config.textureDir
return None
|
3,913 |
add html score
|
import re # noqa: EXE002
def eval_strings(str_list):
scored_list = [_score(string) for string in str_list]
scored_list.sort(key=lambda element: element[1], reverse=True)
return [score_tupel[0] for score_tupel in scored_list]
def _score(string):
score = 0
score = _add_length_score(string, score)
score = _add_rare_special_character_score(string, score)
score = _add_special_character_ratio_score(string, score)
score = _add_case_ratio_score(string, score)
score = _add_quad_characters_score(string, score)
score = _add_dictionary_score(string, score)
score = _add_path_score(string, score)
score = _add_possible_year_score(string, score)
score = _add_possible_version_number_score(string, score)
score = _add_format_string_score(string, score)
score = _add_mail_adress_score(string, score)
score = _add_underscore_or_period_at_beginning_score(string, score)
score = _add_parameter_score(string, score)
score = METHOD_NAME(string, score)
return string, score
def _add_length_score(string, score):
return score + len(string) / 2
def _add_rare_special_character_score(string, score):
rare_characters = ['^', '°', '§', '´', '`', '{', '}']
return score - 15 * len([character for character in rare_characters if character in string])
def _add_special_character_ratio_score(string, score):
regex_non_word = r'\W'
regex_word = r'[a-zA-Z]'
matches_non_word = re.finditer(regex_non_word, string)
matches_word = re.finditer(regex_word, string)
match_num_non_word = len(list(matches_non_word))
match_num_word = len(list(matches_word))
score += _ratio_word_non_word_helper(match_num_word, match_num_non_word)
return score
def _ratio_word_non_word_helper(num_word, num_non_word):
ratio = num_word if num_non_word == 0 else num_word / num_non_word
return 15 if ratio >= 2 else -15 # noqa: PLR2004
def _add_case_ratio_score(string, score):
regex_lower = r'[a-z]'
regex_capital = r'[A-Z]'
matches_lower_case = re.finditer(regex_lower, string)
matches_capital = re.finditer(regex_capital, string)
match_num_lower = len(list(matches_lower_case))
match_num_capital = len(list(matches_capital))
score += _case_ratio_helper(match_num_lower, match_num_capital)
return score
def _case_ratio_helper(num_lower, num_capital):
# all caps
if num_lower == 0 and num_capital >= 6: # noqa: PLR2004
return num_capital / 2
case_ratio = num_lower if num_capital == 0 else num_lower / num_capital
return 10 if case_ratio > 1 else -10
def _add_quad_characters_score(string, score):
matches = re.finditer(r'(\S)\1\1\1', string)
return score - 25 * len(list(matches))
def _add_dictionary_score(string, score):
dictionary = ['version', 'v.', 'http', 'ftp', 'usage', 'Usage', 'ssh', 'SSH', 'password', 'Version']
return score + 30 * len([word for word in dictionary if word in string])
def _add_possible_year_score(string, score):
regex = r'([1][9]\d\d)|([2][0]\d\d)'
matches = re.search(regex, string)
return score + 20 if matches else score
def _add_path_score(string, score):
regex = r'(\/[\w-]+)+(.[a-zA-Z]+)'
matches = re.search(regex, string)
return score + 100 if matches else score
def _add_possible_version_number_score(string, score):
regex = r'\d+\.(\d+\.?)+'
matches = re.search(regex, string)
return score + 35 if matches else score
def _add_format_string_score(string, score):
regex = r'%s|%lu|%u|%lf|%f|%i|%d'
matches = re.finditer(regex, string)
return score - 15 * len(list(matches))
def _add_mail_adress_score(string, score):
regex = r'(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))' # noqa: E501
match = re.search(regex, string)
return score + 150 if match else score
def _add_underscore_or_period_at_beginning_score(string, score):
match = re.search(r'(^_+)|^\.', string)
return score - 25 if match else score
def _add_parameter_score(string, score):
match = re.search(r'^\s*-{1,2}', string)
return score + 35 if match and len(string) > 6 else score # noqa: PLR2004
def METHOD_NAME(string, score):
regex = r'</?[^\\\(\)$\[\]\§\.\,\?<>;|!]+>'
match = re.search(regex, string)
return score + 15 if match else score
|
3,914 |
method
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor log-analytics solution show",
)
class Show(AAZCommand):
"""Get the user solution.
:example: Show a log-analytics solution
az monitor log-analytics solution show --resource-group MyResourceGroup --name SolutionName
"""
_aaz_info = {
"version": "2015-11-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationsmanagement/solutions/{}", "2015-11-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the log-analytics solution. It should be in the format of solutionType(workspaceName). SolutionType part is case sensitive.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SolutionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class SolutionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationsManagement/solutions/{solutionName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"solutionName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2015-11-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.plan = AAZObjectType()
_schema_on_200.properties = AAZObjectType()
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
plan = cls._schema_on_200.plan
plan.name = AAZStrType()
plan.product = AAZStrType()
plan.promotion_code = AAZStrType(
serialized_name="promotionCode",
)
plan.publisher = AAZStrType()
properties = cls._schema_on_200.properties
properties.contained_resources = AAZListType(
serialized_name="containedResources",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.referenced_resources = AAZListType(
serialized_name="referencedResources",
)
properties.workspace_resource_id = AAZStrType(
serialized_name="workspaceResourceId",
flags={"required": True},
)
contained_resources = cls._schema_on_200.properties.contained_resources
contained_resources.Element = AAZStrType()
referenced_resources = cls._schema_on_200.properties.referenced_resources
referenced_resources.Element = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
3,915 |
test add dir
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing for archive."""
import os
import os.path
import tarfile
import unittest
from tools.build_defs.pkg import archive
class TarFileWriterTest(unittest.TestCase):
"""Testing for TarFileWriter class."""
def assertTarFileContent(self, tar, content):
"""Assert that tarfile contains exactly the entry described by `content`.
Args:
tar: the path to the TAR file to test.
content: an array describing the expected content of the TAR file.
Each entry in that list should be a dictionary where each field
is a field to test in the corresponding TarInfo. For
testing the presence of a file "x", then the entry could simply
be `{"name": "x"}`, the missing field will be ignored. To match
the content of a file entry, use the key "data".
"""
with tarfile.open(tar, "r:") as f:
i = 0
for current in f:
error_msg = "Extraneous file at end of archive %s: %s" % (
tar,
current.name
)
self.assertTrue(i < len(content), error_msg)
for k, v in content[i].items():
if k == "data":
value = f.extractfile(current).read()
else:
value = getattr(current, k)
error_msg = " ".join([
"Value `%s` for key `%s` of file" % (value, k),
"%s in archive %s does" % (current.name, tar),
"not match expected value `%s`" % v
])
self.assertEqual(value, v, error_msg)
i += 1
if i < len(content):
self.fail("Missing file %s in archive %s" % (content[i], tar))
def setUp(self):
self.tempfile = os.path.join(os.environ["TEST_TMPDIR"], "test.tar")
def tearDown(self):
if os.path.exists(self.tempfile):
os.remove(self.tempfile)
def testEmptyTarFile(self):
with archive.TarFileWriter(self.tempfile):
pass
self.assertTarFileContent(self.tempfile, [])
def testDottedFiles(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("a")
f.add_file("/b")
f.add_file("./c")
f.add_file("./.d")
f.add_file("..e")
f.add_file(".f")
content = [
{"name": "."}, {"name": "./a"}, {"name": "/b"}, {"name": "./c"},
{"name": "./.d"}, {"name": "./..e"}, {"name": "./.f"}
]
self.assertTarFileContent(self.tempfile, content)
def METHOD_NAME(self):
# For some strange reason, ending slash is stripped by the test
content = [
{"name": ".", "mode": 0o755},
{"name": "./a", "mode": 0o755},
{"name": "./a/b", "data": b"ab", "mode": 0o644},
{"name": "./a/c", "mode": 0o755},
{"name": "./a/c/d", "data": b"acd", "mode": 0o644},
]
tempdir = os.path.join(os.environ["TEST_TMPDIR"], "test_dir")
# Iterate over the `content` array to create the directory
# structure it describes.
for c in content:
if "data" in c:
p = os.path.join(tempdir, c["name"][2:])
os.makedirs(os.path.dirname(p))
with open(p, "wb") as f:
f.write(c["data"])
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("./", tempdir, mode=0o644)
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFile(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileSeparately(self):
d_dir = os.path.join(os.environ["TEST_TMPDIR"], "d_dir")
os.makedirs(d_dir)
with open(os.path.join(d_dir, "dir_file"), "w"):
pass
a_dir = os.path.join(os.environ["TEST_TMPDIR"], "a_dir")
os.makedirs(a_dir)
with open(os.path.join(a_dir, "dir_file"), "w"):
pass
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("d", d_dir)
f.add_file("d/f")
f.add_dir("a", a_dir)
f.add_file("a/b/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/dir_file"},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/dir_file"},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileManually(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d", tarfile.DIRTYPE)
f.add_file("d/f")
f.add_file("a", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b/", tarfile.DIRTYPE)
f.add_file("a/b/c/f")
f.add_file("x/y/f")
f.add_file("x", tarfile.DIRTYPE)
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/c",
"mode": 0o755},
{"name": "./a/b/c/f"},
{"name": "./x",
"mode": 0o755},
{"name": "./x/y",
"mode": 0o755},
{"name": "./x/y/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testChangingRootDirectory(self):
with archive.TarFileWriter(self.tempfile, root_directory="root") as f:
f.add_file("d", tarfile.DIRTYPE)
f.add_file("d/f")
f.add_file("a", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b/", tarfile.DIRTYPE)
f.add_file("a/b/c/f")
f.add_file("x/y/f")
f.add_file("x", tarfile.DIRTYPE)
content = [
{"name": "root",
"mode": 0o755},
{"name": "root/d",
"mode": 0o755},
{"name": "root/d/f"},
{"name": "root/a",
"mode": 0o755},
{"name": "root/a/b",
"mode": 0o755},
{"name": "root/a/b/c",
"mode": 0o755},
{"name": "root/a/b/c/f"},
{"name": "root/x",
"mode": 0o755},
{"name": "root/x/y",
"mode": 0o755},
{"name": "root/x/y/f"},
]
self.assertTarFileContent(self.tempfile, content)
if __name__ == "__main__":
unittest.main()
|
3,916 |
ekaf provider
|
from unittest.mock import MagicMock
import pytest
from feeluown import models
from feeluown.library import (
AbstractProvider, ProviderV2, ModelType, ProviderFlags as PF,
AlbumModel, ArtistModel, BriefVideoModel, BriefSongModel,
Library, SongModel, BriefAlbumModel, BriefArtistModel
)
from feeluown.media import Quality, Media, MediaType
from feeluown.utils.reader import create_reader
FakeSource = 'fake' # v1 provider
EkafSource = 'ekaf' # v2 provider
class FakeProvider(AbstractProvider):
@property
def identifier(self):
return 'fake'
@property
def name(self):
return 'FAKE'
def search(self, keyword, **kwargs):
return FakeSearchModel(q=keyword, songs=[_song1, _song2, _song3])
class EkafProvider(AbstractProvider, ProviderV2):
class meta:
identifier = 'ekaf'
name = 'EKAF'
flags = {
ModelType.album: (PF.model_v2 | PF.get),
ModelType.artist: (PF.model_v2 | PF.get | PF.songs_rd),
}
def __init__(self):
super().__init__()
@property
def identifier(self):
return EkafSource
@property
def name(self):
return 'EKAF'
def song_get(self, identifier):
if identifier == _ekaf_song0.identifier:
return _ekaf_song0
def song_list_quality(self, song):
return []
def song_get_media(self, song, quality):
return []
def song_get_mv(self, song):
if song.identifier == _ekaf_brief_song0.identifier:
return _ekaf_brief_mv0
def album_get(self, identifier):
if identifier == _ekaf_album0.identifier:
return _ekaf_album0
def artist_get(self, identifier):
if identifier == _ekaf_artist0.identifier:
return _ekaf_artist0
def artist_create_songs_rd(self, _):
return create_reader([])
def video_list_quality(self, video):
if video.identifier == _ekaf_brief_mv0.identifier:
return [Quality.Video.hd]
def video_get_media(self, video, quality):
if video.identifier == _ekaf_brief_mv0.identifier \
and quality is Quality.Video.hd:
return Media('http://ekaf.org/mv0.mp4',
type_=MediaType.video,)
return None
_fake_provider = FakeProvider()
class FakeSongModel(models.SongModel):
class Meta:
provider = _fake_provider
class FakeArtistModel(models.ArtistModel):
class Meta:
provider = _fake_provider
class FakeAlbumModel(models.AlbumModel):
class Meta:
provider = _fake_provider
class FakeSearchModel(models.SearchModel):
class Meta:
provider = _fake_provider
_song1 = FakeSongModel(identifier=1, url='1.mp3')
_song2 = FakeSongModel(identifier=2, url='2.mp3')
_song3 = FakeSongModel(identifier=3)
_ekaf_brief_song0 = BriefSongModel(source=EkafSource,
identifier='0')
_ekaf_brief_album0 = BriefAlbumModel(source=EkafSource,
identifier='0')
_ekaf_brief_artist0 = BriefArtistModel(source=EkafSource,
identifier='0')
_ekaf_album0 = AlbumModel(source=EkafSource,
identifier='0', name='0', cover='',
description='', songs=[], artists=[])
_ekaf_artist0 = ArtistModel(source=EkafSource,
identifier='0', name='0', pic_url='',
description='', hot_songs=[], aliases=[])
_ekaf_brief_mv0 = BriefVideoModel(source=EkafSource,
identifier='0', title='')
_ekaf_song0 = SongModel(source=EkafSource,
identifier='0', title='0', album=_ekaf_brief_album0,
artists=[_ekaf_brief_artist0], duration=0)
@pytest.fixture
def artist():
return FakeArtistModel(identifier=0, name='mary')
@pytest.fixture
def album():
return FakeAlbumModel(identifier=0, name='blue and green')
@pytest.fixture
def ekaf_brief_song0():
return _ekaf_brief_song0
@pytest.fixture
def ekaf_song0():
return _ekaf_song0
@pytest.fixture
def ekaf_album0():
return _ekaf_album0
@pytest.fixture
def ekaf_artist0():
return _ekaf_artist0
@pytest.fixture
def song(artist, album):
return FakeSongModel(
identifier=0,
title='hello world',
artists=[artist],
album=album,
duration=600000,
url='http://xxx.com/xxx.mp3')
@pytest.fixture
def song_standby(song):
return FakeSongModel(
identifier=100,
title=song.title,
artists=song.artists,
album=song.album,
duration=song.duration,
url='standby.mp3'
)
@pytest.fixture
def song1(): return _song1
@pytest.fixture
def song2(): return _song2
@pytest.fixture
def song3(): return _song3
@pytest.fixture
def provider():
"""provider is a v1 provider"""
return _fake_provider
@pytest.fixture
def METHOD_NAME():
"""ekaf_provider is a v2 provider"""
return EkafProvider()
@pytest.fixture
def library(provider, METHOD_NAME):
library = Library()
library.register(provider)
library.register(METHOD_NAME)
return library
@pytest.fixture
def app_mock():
return MagicMock()
|
3,917 |
script
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
# pylint disable: protected-access
from __future__ import annotations
import ast
import inspect
import sys
import types
from typing import Any, Callable, Optional, Sequence
import onnx.helper
import onnxscript
from onnxscript import converter, irbuilder, values
from onnxscript._internal import ast_utils
def script_check(
f: ast.FunctionDef,
opset: values.Opset,
global_names: dict[str, Any],
source: str,
default_opset: Optional[values.Opset] = None,
) -> irbuilder.IRFunction:
"""Check that a function falls into the ONNXScript subset of Python."""
# See if conversion succeeds.
# TODO: cleanup Converter interface/API, separating checker from
# converter
convert = converter.Converter(
opset=opset,
global_names=global_names,
source=source,
default_opset=default_opset,
)
return convert.translate_function_def(f)
def METHOD_NAME(
opset: Optional[values.Opset] = None,
default_opset: Optional[values.Opset] = None,
**kwargs: Any,
) -> Callable[[types.FunctionType], onnxscript.OnnxFunction]:
"""Main decorator. Declares a function as an onnx function.
Args:
opset: Opset the function belongs to (see :ref:`l-api-opsets`).
default_opset: Opset to use for operators not in the function's opset.
kwargs: Additional keyword arguments.
Returns:
an instance of :class:`onnxscript.values.OnnxFunction`
Example:
::
@script()
def log2(x):
one = op.Constant(value=make_tensor('one', TensorProto.FLOAT, [1], [1]))
return op.Div(op.Log(x), op.CastLike(op.Log(cst), x))
Or:
::
from onnxscript.onnx_opset import opset16
@script(opset16)
def log2(x):
one = op.Constant(value=make_tensor('one', TensorProto.FLOAT, [1], [1]))
return op.Div(op.Log(x), op.CastLike(op.Log(cst), x))
"""
opset = opset or values.Opset("this", 1)
if not isinstance(opset, values.Opset):
raise TypeError(
"Script parameter must be an opset. Did you use @script instead of @script()?"
)
def transform(f: types.FunctionType) -> onnxscript.OnnxFunction:
if not inspect.isfunction(f):
raise TypeError("The ONNXScript decorator should be applied to functions only.")
src, f_ast = ast_utils.get_src_and_ast(f)
# The script should be compiled using the globals/locals at the definition site.
# This allows the script to reference names defined outside the script,
# which is used for a few different purposes.
# The following is an approximate solution that works for normal use.
module = inspect.getmodule(f)
closure = inspect.getclosurevars(f)
env = module.__dict__.copy()
env.update(closure.nonlocals)
result = script_check(f_ast, opset, env, src, default_opset=default_opset)
# TODO: add transformations.
return onnxscript.OnnxFunction(opset, f, result, src, kwargs)
return transform
def graph() -> Callable[[types.FunctionType], values.OnnxClosure]:
"""A parametric decorator used to annotate nested-functions that are used
as graph-attributes.
Returns:
A decorator that returns its input function, but attaches a graph_proto
attribute representing the input function. The translation is not
done at this time, but previously when the outer-level function
was translated to an OnnxFunction. The decorator just looks up
and retrieves the GraphProto representation previously generated.
Example:
::
@script()
def cumulative_sum(X: INT64['N']):
# Translation of cumulative_sum by @script will also translate Sum
# into a GraphProto, which will be stored in the OnnxFunction generated
# for cumulative_sum. At run-time (in eager-mode), the @graph decorator
# retrieves the pre-computed GraphProto and attaches it to the Sum function.
@graph()
def Sum(sum_in, next):
sum_out = sum_in + next
scan_out = op.Identity(sum_out)
return sum_out, scan_out
zero = op.Constant(value_int=0)
# The call to higher-order operator Scan below uses the above function
# Sum as a graph-attribute.
all_sum, result = op.Scan (zero, X, body=Sum, num_scan_inputs=1)
return result
"""
# This is a bit fragile. We want to get the ONNXFunction object representing
# the outer-scope ONNXScript function from the execution stack. The caller of
# @graph is the original script function (cumulative_sum in the above example),
# and the caller of that function is the wrapper function/method in the
# corresponding OnnxFunction object.
# Currently, there is no support for eager-mode execution of nested functions,
# so we don't need to handle doubly nested functions (e.g., a function defined
# inside Sum in the above example).
function_frame = sys._getframe(1) # pylint: disable=protected-access
wrapper_frame = sys._getframe(3) # pylint: disable=protected-access
onnx_function = wrapper_frame.f_locals["self"]
nested_functions = onnx_function.function_ir.nested_functions
def transform(f: types.FunctionType) -> values.OnnxClosure:
return values.OnnxClosure(nested_functions[f.__name__], function_frame, f)
return transform
def is_converted_fun(f: Any) -> bool:
"""Return True if f is a function converted by onnxscript decorator."""
return isinstance(f, onnxscript.OnnxFunction)
def export_onnx_lib(functions: Sequence[values.OnnxFunction], filename: str) -> None:
# Since we don't yet have LibProto defined, we use a ModelProto as a temporary
# container for the list of functions exported as a library, with an empty graph
# and dummy opset_imports.
model = onnx.helper.make_model(
onnx.GraphProto(),
functions=[f.to_function_proto() for f in functions],
producer_name="p2o",
opset_imports=[onnx.helper.make_opsetid("", 15)],
)
onnx.save(model, filename)
|
3,918 |
properties
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
'get_service_output',
]
@pulumi.output_type
class GetServiceResult:
"""
Properties for the database account.
"""
def __init__(__self__, id=None, name=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Any:
"""
Services response resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_service(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Gets the status of service.
Azure REST API version: 2023-04-15.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: Cosmos DB service name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:documentdb:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'properties'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_service)
def get_service_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceResult]:
"""
Gets the status of service.
Azure REST API version: 2023-04-15.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: Cosmos DB service name.
"""
...
|
3,919 |
is usage record created
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and contributors
# For license information, please see license.txt
from typing import List
from press.press.doctype.plan.plan import Plan
import frappe
from frappe.model.document import Document
from press.utils import log_error
from press.overrides import get_permission_query_conditions_for_doctype
class Subscription(Document):
def validate(self):
self.validate_duplicate()
def on_update(self):
doc = self.get_subscribed_document()
plan_field = doc.meta.get_field("plan")
if not (plan_field and plan_field.options == "Plan"):
return
if self.enabled and doc.plan != self.plan:
doc.plan = self.plan
doc.save()
if not self.enabled and doc.plan:
doc.plan = ""
doc.save()
def enable(self):
try:
self.enabled = True
self.save()
except Exception:
frappe.log_error(title="Enable Subscription Error")
def disable(self):
try:
self.enabled = False
self.save()
except Exception:
frappe.log_error(title="Disable Subscription Error")
@frappe.whitelist()
def create_usage_record(self):
cannot_charge = not self.can_charge_for_subscription()
if cannot_charge:
return
if self.METHOD_NAME():
return
team = frappe.get_cached_doc("Team", self.team)
if team.parent_team:
team = frappe.get_cached_doc("Team", team.parent_team)
if not team.get_upcoming_invoice():
team.create_upcoming_invoice()
plan = frappe.get_cached_doc("Plan", self.plan)
amount = plan.get_price_for_interval(self.interval, team.currency)
usage_record = frappe.get_doc(
doctype="Usage Record",
team=team.name,
document_type=self.document_type,
document_name=self.document_name,
plan=plan.name,
amount=amount,
subscription=self.name,
interval=self.interval,
site=frappe.get_value(
"Marketplace App Subscription", self.marketplace_app_subscription, "site"
)
if self.document_type == "Marketplace App"
else None,
)
usage_record.insert()
usage_record.submit()
return usage_record
def can_charge_for_subscription(self):
doc = self.get_subscribed_document()
if not doc:
return False
if hasattr(doc, "can_charge_for_subscription"):
return doc.can_charge_for_subscription(self)
return True
def METHOD_NAME(self):
filters = {
"team": self.team,
"document_type": self.document_type,
"document_name": self.document_name,
"subscription": self.name,
"interval": self.interval,
"plan": self.plan,
}
if self.interval == "Daily":
filters.update({"date": frappe.utils.today()})
if self.interval == "Monthly":
date = frappe.utils.getdate()
first_day = frappe.utils.get_first_day(date)
last_day = frappe.utils.get_last_day(date)
filters.update({"date": ("between", (first_day, last_day))})
result = frappe.db.get_all("Usage Record", filters=filters, limit=1)
return bool(result)
def validate_duplicate(self):
if not self.is_new():
return
filters = {
"team": self.team,
"document_type": self.document_type,
"document_name": self.document_name,
}
if self.document_type == "Marketplace App":
filters.update({"marketplace_app_subscription": self.marketplace_app_subscription})
results = frappe.db.get_all(
"Subscription",
filters,
pluck="name",
limit=1,
)
if results:
link = frappe.utils.get_link_to_form("Subscription", results[0])
frappe.throw(f"A Subscription already exists: {link}", frappe.DuplicateEntryError)
def get_subscribed_document(self):
if not hasattr(self, "_subscribed_document"):
self._subscribed_document = frappe.get_doc(self.document_type, self.document_name)
return self._subscribed_document
@classmethod
def get_sites_without_offsite_backups(cls) -> List[str]:
plans = Plan.get_ones_without_offsite_backups()
return frappe.get_all(
"Subscription",
filters={"document_type": "Site", "plan": ("in", plans)},
pluck="document_name",
)
def create_usage_records():
"""
Creates daily usage records for paid Subscriptions
"""
free_sites = sites_with_free_hosting()
subscriptions = frappe.db.get_all(
"Subscription",
filters={
"enabled": True,
"plan": ("in", paid_plans()),
"name": ("not in", created_usage_records(free_sites)),
"document_name": ("not in", free_sites),
},
pluck="name",
limit=2000,
)
for name in subscriptions:
subscription = frappe.get_cached_doc("Subscription", name)
try:
subscription.create_usage_record()
frappe.db.commit()
except Exception:
frappe.db.rollback()
log_error(title="Create Usage Record Error", name=name)
def paid_plans():
return frappe.db.get_all(
"Plan",
{
"document_type": (
"in",
("Site", "Server", "Database Server", "Self Hosted Server", "Marketplace App"),
),
"is_trial_plan": 0,
"price_inr": (">", 0),
},
pluck="name",
ignore_ifnull=True,
)
def sites_with_free_hosting():
"""Includes sites that have standard hosting plan from Marketplace Plan"""
marketplace_paid_plans = frappe.get_all(
"Marketplace App Plan",
{"is_free": 0, "standard_hosting_plan": ("is", "set")},
pluck="name",
)
sites_with_standard_hosting = frappe.get_all(
"Marketplace App Subscription",
{"marketplace_app_plan": ("in", marketplace_paid_plans), "status": "Active"},
pluck="site",
)
free_sites = frappe.get_all(
"Site", filters={"free": True, "status": "Active"}, pluck="name"
)
return sites_with_standard_hosting + free_sites
def created_usage_records(free_sites, date=None):
date = date or frappe.utils.today()
"""Returns created usage records for a particular date"""
return frappe.get_all(
"Usage Record",
filters={
"document_type": ("in", ("Site", "Server", "Database Server", "Self Hosted Server")),
"date": date,
"document_name": ("not in", free_sites),
},
pluck="subscription",
ignore_ifnull=True,
)
get_permission_query_conditions = get_permission_query_conditions_for_doctype(
"Subscription"
)
|
3,920 |
new to old
|
#=======================================================================
#
# Python Lexical Analyser
#
# Converting NFA to DFA
#
#=======================================================================
from __future__ import absolute_import
from . import Machines
from .Machines import LOWEST_PRIORITY
from .Transitions import TransitionMap
def nfa_to_dfa(old_machine, debug=None):
"""
Given a nondeterministic Machine, return a new equivalent
Machine which is deterministic.
"""
# We build a new machine whose states correspond to sets of states
# in the old machine. Initially we add a new state corresponding to
# the epsilon-closure of each initial old state. Then we give transitions
# to each new state which are the union of all transitions out of any
# of the corresponding old states. The new state reached on a given
# character is the one corresponding to the set of states reachable
# on that character from any of the old states. As new combinations of
# old states are created, new states are added as needed until closure
# is reached.
new_machine = Machines.FastMachine()
state_map = StateMap(new_machine)
# Seed the process using the initial states of the old machine.
# Make the corresponding new states into initial states of the new
# machine with the same names.
for (key, old_state) in old_machine.initial_states.items():
new_state = state_map.old_to_new(epsilon_closure(old_state))
new_machine.make_initial_state(key, new_state)
# Tricky bit here: we add things to the end of this list while we're
# iterating over it. The iteration stops when closure is achieved.
for new_state in new_machine.states:
transitions = TransitionMap()
for old_state in state_map.METHOD_NAME(new_state):
for event, old_target_states in old_state.transitions.items():
if event and old_target_states:
transitions.add_set(event, set_epsilon_closure(old_target_states))
for event, old_states in transitions.items():
new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states))
if debug:
debug.write("\n===== State Mapping =====\n")
state_map.dump(debug)
return new_machine
def set_epsilon_closure(state_set):
"""
Given a set of states, return the union of the epsilon
closures of its member states.
"""
result = {}
for state1 in state_set:
for state2 in epsilon_closure(state1):
result[state2] = 1
return result
def epsilon_closure(state):
"""
Return the set of states reachable from the given state
by epsilon moves.
"""
# Cache the result
result = state.epsilon_closure
if result is None:
result = {}
state.epsilon_closure = result
add_to_epsilon_closure(result, state)
return result
def add_to_epsilon_closure(state_set, state):
"""
Recursively add to |state_set| states reachable from the given state
by epsilon moves.
"""
if not state_set.get(state, 0):
state_set[state] = 1
state_set_2 = state.transitions.get_epsilon()
if state_set_2:
for state2 in state_set_2:
add_to_epsilon_closure(state_set, state2)
class StateMap(object):
"""
Helper class used by nfa_to_dfa() to map back and forth between
sets of states from the old machine and states of the new machine.
"""
new_machine = None # Machine
old_to_new_dict = None # {(old_state,...) : new_state}
new_to_old_dict = None # {id(new_state) : old_state_set}
def __init__(self, new_machine):
self.new_machine = new_machine
self.old_to_new_dict = {}
self.new_to_old_dict = {}
def old_to_new(self, old_state_set):
"""
Return the state of the new machine corresponding to the
set of old machine states represented by |state_set|. A new
state will be created if necessary. If any of the old states
are accepting states, the new state will be an accepting state
with the highest priority action from the old states.
"""
key = self.make_key(old_state_set)
new_state = self.old_to_new_dict.get(key, None)
if not new_state:
action = self.highest_priority_action(old_state_set)
new_state = self.new_machine.new_state(action)
self.old_to_new_dict[key] = new_state
self.new_to_old_dict[id(new_state)] = old_state_set
#for old_state in old_state_set.keys():
#new_state.merge_actions(old_state)
return new_state
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
for state in state_set:
priority = state.action_priority
if priority > best_priority:
best_action = state.action
best_priority = priority
return best_action
# def old_to_new_set(self, old_state_set):
# """
# Return the new state corresponding to a set of old states as
# a singleton set.
# """
# return {self.old_to_new(old_state_set):1}
def METHOD_NAME(self, new_state):
"""Given a new state, return a set of corresponding old states."""
return self.new_to_old_dict[id(new_state)]
def make_key(self, state_set):
"""
Convert a set of states into a uniquified
sorted tuple suitable for use as a dictionary key.
"""
lst = list(state_set)
lst.sort()
return tuple(lst)
def dump(self, file):
from .Transitions import state_set_str
for new_state in self.new_machine.states:
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set)))
|
3,921 |
set on press
|
import math
from travertino.size import at_least
from ..libs import activity
from ..libs.android.graphics import (
DashPathEffect,
Matrix,
Paint,
Paint__Style,
Path,
Path__Direction,
)
from .base import Widget
class DrawHandler(activity.IDrawHandler):
def __init__(self, interface):
self.interface = interface
super().__init__()
def handleDraw(self, canvas):
canvas.save()
self.interface._draw(self.interface._impl, path=Path(), canvas=canvas)
class Canvas(Widget):
def create(self):
# Our native widget is a DrawHandlerView, which delegates drawing to DrawHandler,
# so we can pass the `android.graphics.Canvas` around as `canvas`.
self.native = activity.DrawHandlerView(
self._native_activity.getApplicationContext()
)
self.native.setDrawHandler(DrawHandler(self.interface))
def set_hidden(self, hidden):
self.interface.factory.not_implemented("Canvas.set_hidden()")
def redraw(self):
pass
def METHOD_NAME(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_press()")
def set_on_release(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_release()")
def set_on_drag(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_drag()")
def set_on_alt_press(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_alt_press()")
def set_on_alt_release(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_alt_release()")
def set_on_alt_drag(self, handler):
self.interface.factory.not_implemented("Canvas.set_on_alt_drag()")
# Basic paths
def new_path(self, *args, **kwargs):
self.interface.factory.not_implemented("Canvas.new_path()")
def closed_path(self, x, y, path, *args, **kwargs):
path.close()
def move_to(self, x, y, path, *args, **kwargs):
path.moveTo(self.container.scale * x, self.container.scale * y)
def line_to(self, x, y, path, *args, **kwargs):
path.lineTo(self.container.scale * x, self.container.scale * y)
# Basic shapes
def bezier_curve_to(self, cp1x, cp1y, cp2x, cp2y, x, y, path, *args, **kwargs):
path.cubicTo(
cp1x * self.container.scale,
cp1y * self.container.scale,
cp2x * self.container.scale,
cp2y * self.container.scale,
x * self.container.scale,
y * self.container.scale,
)
def quadratic_curve_to(self, cpx, cpy, x, y, path, *args, **kwargs):
path.quadTo(
cpx * self.container.scale,
cpy * self.container.scale,
x * self.container.scale,
y * self.container.scale,
)
def arc(
self, x, y, radius, startangle, endangle, anticlockwise, path, *args, **kwargs
):
sweep_angle = endangle - startangle
if anticlockwise:
sweep_angle -= math.radians(360)
path.arcTo(
self.container.scale * (x - radius),
self.container.scale * (y - radius),
self.container.scale * (x + radius),
self.container.scale * (y + radius),
math.degrees(startangle),
math.degrees(sweep_angle),
False,
)
def ellipse(
self,
x,
y,
radiusx,
radiusy,
rotation,
startangle,
endangle,
anticlockwise,
path,
*args,
**kwargs
):
sweep_angle = endangle - startangle
if anticlockwise:
sweep_angle -= math.radians(360)
ellipse_path = Path()
ellipse_path.addArc(
self.container.scale * (x - radiusx),
self.container.scale * (y - radiusy),
self.container.scale * (x + radiusx),
self.container.scale * (y + radiusy),
math.degrees(startangle),
math.degrees(sweep_angle),
)
rotation_matrix = Matrix()
rotation_matrix.postRotate(
math.degrees(rotation),
self.container.scale * x,
self.container.scale * y,
)
ellipse_path.transform(rotation_matrix)
path.addPath(ellipse_path)
def rect(self, x, y, width, height, path, *args, **kwargs):
path.addRect(
self.container.scale * x,
self.container.scale * y,
self.container.scale * (x + width),
self.container.scale * (y + height),
Path__Direction.CW,
)
# Drawing Paths
def fill(self, color, fill_rule, preserve, path, canvas, *args, **kwargs):
draw_paint = Paint()
draw_paint.setAntiAlias(True)
draw_paint.setStyle(Paint__Style.FILL)
if color is None:
a, r, g, b = 255, 0, 0, 0
else:
a, r, g, b = round(color.a * 255), int(color.r), int(color.g), int(color.b)
draw_paint.setARGB(a, r, g, b)
canvas.drawPath(path, draw_paint)
path.reset()
def stroke(self, color, line_width, line_dash, path, canvas, *args, **kwargs):
draw_paint = Paint()
draw_paint.setAntiAlias(True)
draw_paint.setStrokeWidth(self.container.scale * line_width)
draw_paint.setStyle(Paint__Style.STROKE)
if color is None:
a, r, g, b = 255, 0, 0, 0
else:
a, r, g, b = round(color.a * 255), int(color.r), int(color.g), int(color.b)
if line_dash is not None:
draw_paint.setPathEffect(
DashPathEffect(
[(self.container.scale * float(d)) for d in line_dash], 0.0
)
)
draw_paint.setARGB(a, r, g, b)
canvas.drawPath(path, draw_paint)
path.reset()
# Transformations
def rotate(self, radians, canvas, *args, **kwargs):
canvas.rotate(math.degrees(radians))
def scale(self, sx, sy, canvas, *args, **kwargs):
canvas.scale(float(sx), float(sy))
def translate(self, tx, ty, canvas, *args, **kwargs):
canvas.translate(self.container.scale * tx, self.container.scale * ty)
def reset_transform(self, canvas, *args, **kwargs):
canvas.restore()
canvas.save()
# Text
def measure_text(self, text, font, tight=False):
self.interface.factory.not_implemented("Canvas.measure_text")
def write_text(self, text, x, y, font, *args, **kwargs):
self.interface.factory.not_implemented("Canvas.write_text")
def get_image_data(self):
self.interface.factory.not_implemented("Canvas.get_image_data()")
# Rehint
def set_on_resize(self, handler):
self.interface.factory.not_implemented("Canvas.on_resize")
def rehint(self):
self.interface.intrinsic.width = at_least(0)
self.interface.intrinsic.height = at_least(0)
|
3,922 |
get port
|
from optparse import OptionParser
import Initializer
from InputHandlerThread import InputHandlerThread
Initializer.init_path()
from gnuradio import blocks
from gnuradio import gr
import osmosdr
class top_block(gr.top_block):
def __init__(self, sample_rate, frequency, freq_correction, rf_gain, if_gain, bb_gain, bandwidth, port):
gr.top_block.__init__(self, "Top Block")
self.sample_rate = sample_rate
self.rf_gain = rf_gain
self.port = port
self.if_gain = if_gain
self.frequency = frequency
self.freq_correction = freq_correction
self.bb_gain = bb_gain
self.bandwidth = bandwidth
self.osmosdr_source_0 = osmosdr.source(
args="numchan=" + str(1) + " " + 'uhd'
)
self.osmosdr_source_0.set_time_unknown_pps(osmosdr.time_spec_t())
self.osmosdr_source_0.set_sample_rate(sample_rate)
self.osmosdr_source_0.set_center_freq(frequency, 0)
self.osmosdr_source_0.set_freq_corr(freq_correction, 0)
self.osmosdr_source_0.set_gain(rf_gain, 0)
self.osmosdr_source_0.set_if_gain(if_gain, 0)
self.osmosdr_source_0.set_bb_gain(bb_gain, 0)
self.osmosdr_source_0.set_antenna('', 0)
self.osmosdr_source_0.set_bandwidth(bandwidth, 0)
self.blocks_tcp_server_sink_0 = blocks.tcp_server_sink(gr.sizeof_gr_complex*1, '127.0.0.1', port, False)
self.connect((self.osmosdr_source_0, 0), (self.blocks_tcp_server_sink_0, 0))
def get_sample_rate(self):
return self.sample_rate
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
self.osmosdr_source_0.set_sample_rate(self.sample_rate)
def get_rf_gain(self):
return self.rf_gain
def set_rf_gain(self, rf_gain):
self.rf_gain = rf_gain
self.osmosdr_source_0.set_gain(self.rf_gain, 0)
def METHOD_NAME(self):
return self.port
def set_port(self, port):
self.port = port
def get_if_gain(self):
return self.if_gain
def set_if_gain(self, if_gain):
self.if_gain = if_gain
self.osmosdr_source_0.set_if_gain(self.if_gain, 0)
def get_frequency(self):
return self.frequency
def set_frequency(self, frequency):
self.frequency = frequency
self.osmosdr_source_0.set_center_freq(self.frequency, 0)
def get_freq_correction(self):
return self.freq_correction
def set_freq_correction(self, freq_correction):
self.freq_correction = freq_correction
self.osmosdr_source_0.set_freq_corr(self.freq_correction, 0)
def get_direct_sampling_mode(self):
return self.direct_sampling_mode
def set_direct_sampling_mode(self, direct_sampling_mode):
self.direct_sampling_mode = direct_sampling_mode
def get_channel_index(self):
return self.channel_index
def set_channel_index(self, channel_index):
self.channel_index = channel_index
def get_bb_gain(self):
return self.bb_gain
def set_bb_gain(self, bb_gain):
self.bb_gain = bb_gain
self.osmosdr_source_0.set_bb_gain(self.bb_gain, 0)
def get_bandwidth(self):
return self.bandwidth
def set_bandwidth(self, bandwidth):
self.bandwidth = bandwidth
self.osmosdr_source_0.set_bandwidth(self.bandwidth, 0)
def get_antenna_index(self):
return self.antenna_index
def set_antenna_index(self, antenna_index):
self.antenna_index = antenna_index
if __name__ == '__main__':
parser = OptionParser(usage='%prog: [options]')
parser.add_option('-s', '--sample-rate', dest='sample_rate', default=100000)
parser.add_option('-f', '--frequency', dest='frequency', default=433000)
parser.add_option('-g', '--gain', dest='rf_gain', default=30)
parser.add_option('-i', '--if-gain', dest='if_gain', default=30)
parser.add_option('-b', '--bb-gain', dest='bb_gain', default=30)
parser.add_option('-w', '--bandwidth', dest='bandwidth', default=250000)
parser.add_option('-c', '--freq-correction', dest='freq_correction', default=0)
parser.add_option('-d', '--direct-sampling', dest='direct_sampling', default=0)
parser.add_option('-n', '--channel-index', dest='channel_index', default=0)
parser.add_option('-a', '--antenna-index', dest='antenna_index', default=0)
parser.add_option('-p', '--port', dest='port', default=1234)
(options, args) = parser.parse_args()
tb = top_block(int(options.sample_rate), int(options.frequency), int(options.freq_correction), int(options.rf_gain), int(options.if_gain), int(options.bb_gain), int(options.bandwidth), int(options.port))
iht = InputHandlerThread(tb)
iht.start()
tb.start()
tb.wait()
|
3,923 |
test node frame
|
import backend as F
import dgl
import numpy as np
from utils import parametrize_idtype
def create_graph(idtype, num_node):
g = dgl.graph([])
g = g.astype(idtype).to(F.ctx())
g.add_nodes(num_node)
return g
@parametrize_idtype
def test_node_removal(idtype):
g = create_graph(idtype, 10)
g.add_edges(0, 0)
assert g.num_nodes() == 10
g.ndata["id"] = F.arange(0, 10)
# remove nodes
g.remove_nodes(range(4, 7))
assert g.num_nodes() == 7
assert F.array_equal(g.ndata["id"], F.tensor([0, 1, 2, 3, 7, 8, 9]))
assert dgl.NID not in g.ndata
assert dgl.EID not in g.edata
# add nodes
g.add_nodes(3)
assert g.num_nodes() == 10
assert F.array_equal(
g.ndata["id"], F.tensor([0, 1, 2, 3, 7, 8, 9, 0, 0, 0])
)
# remove nodes
g.remove_nodes(range(1, 4), store_ids=True)
assert g.num_nodes() == 7
assert F.array_equal(g.ndata["id"], F.tensor([0, 7, 8, 9, 0, 0, 0]))
assert dgl.NID in g.ndata
assert dgl.EID in g.edata
@parametrize_idtype
def test_multigraph_node_removal(idtype):
g = create_graph(idtype, 5)
for i in range(5):
g.add_edges(i, i)
g.add_edges(i, i)
assert g.num_nodes() == 5
assert g.num_edges() == 10
# remove nodes
g.remove_nodes([2, 3])
assert g.num_nodes() == 3
assert g.num_edges() == 6
# add nodes
g.add_nodes(1)
g.add_edges(1, 1)
g.add_edges(1, 1)
assert g.num_nodes() == 4
assert g.num_edges() == 8
# remove nodes
g.remove_nodes([0])
assert g.num_nodes() == 3
assert g.num_edges() == 6
@parametrize_idtype
def test_multigraph_edge_removal(idtype):
g = create_graph(idtype, 5)
for i in range(5):
g.add_edges(i, i)
g.add_edges(i, i)
assert g.num_nodes() == 5
assert g.num_edges() == 10
# remove edges
g.remove_edges([2, 3])
assert g.num_nodes() == 5
assert g.num_edges() == 8
# add edges
g.add_edges(1, 1)
g.add_edges(1, 1)
assert g.num_nodes() == 5
assert g.num_edges() == 10
# remove edges
g.remove_edges([0, 1])
assert g.num_nodes() == 5
assert g.num_edges() == 8
@parametrize_idtype
def test_edge_removal(idtype):
g = create_graph(idtype, 5)
for i in range(5):
for j in range(5):
g.add_edges(i, j)
g.edata["id"] = F.arange(0, 25)
# remove edges
g.remove_edges(range(13, 20))
assert g.num_nodes() == 5
assert g.num_edges() == 18
assert F.array_equal(
g.edata["id"], F.tensor(list(range(13)) + list(range(20, 25)))
)
assert dgl.NID not in g.ndata
assert dgl.EID not in g.edata
# add edges
g.add_edges(3, 3)
assert g.num_nodes() == 5
assert g.num_edges() == 19
assert F.array_equal(
g.edata["id"], F.tensor(list(range(13)) + list(range(20, 25)) + [0])
)
# remove edges
g.remove_edges(range(2, 10), store_ids=True)
assert g.num_nodes() == 5
assert g.num_edges() == 11
assert F.array_equal(
g.edata["id"], F.tensor([0, 1, 10, 11, 12, 20, 21, 22, 23, 24, 0])
)
assert dgl.EID in g.edata
@parametrize_idtype
def test_node_and_edge_removal(idtype):
g = create_graph(idtype, 10)
for i in range(10):
for j in range(10):
g.add_edges(i, j)
g.edata["id"] = F.arange(0, 100)
assert g.num_nodes() == 10
assert g.num_edges() == 100
# remove nodes
g.remove_nodes([2, 4])
assert g.num_nodes() == 8
assert g.num_edges() == 64
# remove edges
g.remove_edges(range(10, 20))
assert g.num_nodes() == 8
assert g.num_edges() == 54
# add nodes
g.add_nodes(2)
assert g.num_nodes() == 10
assert g.num_edges() == 54
# add edges
for i in range(8, 10):
for j in range(8, 10):
g.add_edges(i, j)
assert g.num_nodes() == 10
assert g.num_edges() == 58
# remove edges
g.remove_edges(range(10, 20))
assert g.num_nodes() == 10
assert g.num_edges() == 48
@parametrize_idtype
def METHOD_NAME(idtype):
g = create_graph(idtype, 10)
data = np.random.rand(10, 3)
new_data = data.take([0, 1, 2, 7, 8, 9], axis=0)
g.ndata["h"] = F.tensor(data)
# remove nodes
g.remove_nodes(range(3, 7))
assert F.allclose(g.ndata["h"], F.tensor(new_data))
@parametrize_idtype
def test_edge_frame(idtype):
g = create_graph(idtype, 10)
g.add_edges(list(range(10)), list(range(1, 10)) + [0])
data = np.random.rand(10, 3)
new_data = data.take([0, 1, 2, 7, 8, 9], axis=0)
g.edata["h"] = F.tensor(data)
# remove edges
g.remove_edges(range(3, 7))
assert F.allclose(g.edata["h"], F.tensor(new_data))
@parametrize_idtype
def test_issue1287(idtype):
# reproduce https://github.com/dmlc/dgl/issues/1287.
# setting features after remove nodes
g = create_graph(idtype, 5)
g.add_edges([0, 2, 3, 1, 1], [1, 0, 3, 1, 0])
g.remove_nodes([0, 1])
g.ndata["h"] = F.randn((g.num_nodes(), 3))
g.edata["h"] = F.randn((g.num_edges(), 2))
# remove edges
g = create_graph(idtype, 5)
g.add_edges([0, 2, 3, 1, 1], [1, 0, 3, 1, 0])
g.remove_edges([0, 1])
g = g.to(F.ctx())
g.ndata["h"] = F.randn((g.num_nodes(), 3))
g.edata["h"] = F.randn((g.num_edges(), 2))
if __name__ == "__main__":
test_node_removal()
test_edge_removal()
test_multigraph_node_removal()
test_multigraph_edge_removal()
test_node_and_edge_removal()
METHOD_NAME()
test_edge_frame()
test_frame_size()
|
3,924 |
test x edges lat
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from astropy.table import Table
from gammapy.estimators import ImageProfile, ImageProfileEstimator
from gammapy.maps import WcsGeom, WcsNDMap
from gammapy.utils.testing import assert_quantity_allclose, mpl_plot_check
@pytest.fixture(scope="session")
def checkerboard_image():
nxpix, nypix = 10, 6
# set up data as a checkerboard of 0.5 and 1.5, so that the mean and sum
# are not completely trivial to compute
data = 1.5 * np.ones((nypix, nxpix))
data[slice(0, nypix + 1, 2), slice(0, nxpix + 1, 2)] = 0.5
data[slice(1, nypix + 1, 2), slice(1, nxpix + 1, 2)] = 0.5
geom = WcsGeom.create(npix=(nxpix, nypix), frame="galactic", binsz=0.02)
return WcsNDMap(geom=geom, data=data, unit="cm-2 s-1")
@pytest.fixture(scope="session")
def cosine_profile():
table = Table()
table["x_ref"] = np.linspace(-90, 90, 11) * u.deg
table["profile"] = np.cos(table["x_ref"].to("rad")) * u.Unit("cm-2 s-1")
table["profile_err"] = 0.1 * table["profile"]
return ImageProfile(table)
class TestImageProfileEstimator:
@staticmethod
def test_lat_profile_sum(checkerboard_image):
p = ImageProfileEstimator(axis="lat", method="sum")
profile = p.run(checkerboard_image)
desired = 10 * np.ones(6) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
@staticmethod
def test_lon_profile_sum(checkerboard_image):
p = ImageProfileEstimator(axis="lon", method="sum")
profile = p.run(checkerboard_image)
desired = 6 * np.ones(10) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
@staticmethod
def test_radial_profile_sum(checkerboard_image):
center = SkyCoord(0, 0, unit="deg", frame="galactic")
p = ImageProfileEstimator(axis="radial", method="sum", center=center)
profile = p.run(checkerboard_image)
desired = [4.0, 8.0, 20.0, 12.0, 12.0] * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
with pytest.raises(ValueError):
ImageProfileEstimator(axis="radial")
@staticmethod
def test_lat_profile_mean(checkerboard_image):
p = ImageProfileEstimator(axis="lat", method="mean")
profile = p.run(checkerboard_image)
desired = np.ones(6) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
@staticmethod
def test_lon_profile_mean(checkerboard_image):
p = ImageProfileEstimator(axis="lon", method="mean")
profile = p.run(checkerboard_image)
desired = np.ones(10) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
@staticmethod
def METHOD_NAME(checkerboard_image):
x_edges = Angle(np.linspace(-0.06, 0.06, 4), "deg")
p = ImageProfileEstimator(x_edges=x_edges, axis="lat", method="sum")
profile = p.run(checkerboard_image)
desired = 20 * np.ones(3) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
@staticmethod
def test_x_edges_lon(checkerboard_image):
x_edges = Angle(np.linspace(-0.1, 0.1, 6), "deg")
p = ImageProfileEstimator(x_edges=x_edges, axis="lon", method="sum")
profile = p.run(checkerboard_image)
desired = 12 * np.ones(5) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
class TestImageProfile:
@staticmethod
def test_normalize(cosine_profile):
normalized = cosine_profile.normalize(mode="integral")
profile = normalized.profile
assert_quantity_allclose(profile.sum(), 1 * u.Unit("cm-2 s-1"))
normalized = cosine_profile.normalize(mode="peak")
profile = normalized.profile
assert_quantity_allclose(profile.max(), 1 * u.Unit("cm-2 s-1"))
@staticmethod
def test_profile_x_edges(cosine_profile):
assert_quantity_allclose(cosine_profile.x_ref.sum(), 0 * u.deg)
@staticmethod
@pytest.mark.parametrize("kernel", ["gauss", "box"])
def test_smooth(cosine_profile, kernel):
# smoothing should preserve the mean
desired_mean = cosine_profile.profile.mean()
smoothed = cosine_profile.smooth(kernel, radius=3)
assert_quantity_allclose(smoothed.profile.mean(), desired_mean)
# smoothing should decrease errors
assert smoothed.profile_err.mean() < cosine_profile.profile_err.mean()
@staticmethod
def test_peek(cosine_profile):
with mpl_plot_check():
cosine_profile.peek()
|
3,925 |
set thread priority
|
from _typeshed import Incomplete
import _win32typing
from win32.lib.pywintypes import error as error
def STARTUPINFO() -> _win32typing.PySTARTUPINFO: ...
def beginthreadex(sa: _win32typing.PySECURITY_ATTRIBUTES, stackSize, entryPoint, args, flags) -> tuple[int, Incomplete]: ...
def CreateRemoteThread(
hprocess: int, sa: _win32typing.PySECURITY_ATTRIBUTES, stackSize, entryPoint, Parameter, flags
) -> tuple[int, Incomplete]: ...
def CreateProcess(
__appName: str | None,
__commandLine: str,
__processAttributes: _win32typing.PySECURITY_ATTRIBUTES | None,
__threadAttributes: _win32typing.PySECURITY_ATTRIBUTES | None,
__bInheritHandles: int | bool,
__dwCreationFlags: int,
__newEnvironment: dict[str, str] | None,
__currentDirectory: str | None,
__startupinfo: _win32typing.PySTARTUPINFO,
) -> tuple[int, int, Incomplete, Incomplete]: ...
def CreateProcessAsUser(
hToken: int,
appName: str,
commandLine: str,
processAttributes: _win32typing.PySECURITY_ATTRIBUTES,
threadAttributes: _win32typing.PySECURITY_ATTRIBUTES,
bInheritHandles,
dwCreationFlags,
newEnvironment,
currentDirectory: str,
startupinfo: _win32typing.PySTARTUPINFO,
) -> tuple[int, int, Incomplete, Incomplete]: ...
def GetCurrentProcess() -> int: ...
def GetProcessVersion(processId): ...
def GetCurrentProcessId(): ...
def GetStartupInfo() -> _win32typing.PySTARTUPINFO: ...
def GetPriorityClass(handle: int): ...
def GetExitCodeThread(handle: int): ...
def GetExitCodeProcess(__handle: int) -> int: ...
def GetWindowThreadProcessId(__hwnd: int | None) -> tuple[int, int]: ...
def METHOD_NAME(handle: int, nPriority) -> None: ...
def GetThreadPriority(handle: int): ...
def GetProcessPriorityBoost(Process: int): ...
def SetProcessPriorityBoost(Process: int, DisablePriorityBoost) -> None: ...
def GetThreadPriorityBoost(Thread: int): ...
def SetThreadPriorityBoost(Thread: int, DisablePriorityBoost) -> None: ...
def GetThreadIOPendingFlag(Thread: int): ...
def GetThreadTimes(Thread: int): ...
def GetProcessId(Process: int): ...
def SetPriorityClass(__handle: int, __dwPriorityClass: int) -> None: ...
def AttachThreadInput(idAttach, idAttachTo, Attach) -> None: ...
def SetThreadIdealProcessor(handle: int, dwIdealProcessor): ...
def GetProcessAffinityMask(hProcess: int) -> tuple[Incomplete, Incomplete]: ...
def SetProcessAffinityMask(hProcess: int, mask) -> None: ...
def SetThreadAffinityMask(hThread: int, ThreadAffinityMask): ...
def SuspendThread(handle: int): ...
def ResumeThread(handle: int): ...
def TerminateProcess(__handle: int, __exitCode: int) -> None: ...
def ExitProcess(exitCode) -> None: ...
def EnumProcesses() -> tuple[Incomplete, Incomplete]: ...
def EnumProcessModules(hProcess: int) -> tuple[Incomplete, Incomplete]: ...
def EnumProcessModulesEx(hProcess: int, FilterFlag) -> tuple[Incomplete, Incomplete]: ...
def GetModuleFileNameEx(hProcess: int, hModule: int): ...
def GetProcessMemoryInfo(hProcess: int): ...
def GetProcessTimes(hProcess: int): ...
def GetProcessIoCounters(hProcess: int): ...
def GetProcessWindowStation() -> None: ...
def GetProcessWorkingSetSize(hProcess: int) -> tuple[Incomplete, Incomplete]: ...
def SetProcessWorkingSetSize(hProcess: int, MinimumWorkingSetSize, MaximumWorkingSetSize) -> None: ...
def GetProcessShutdownParameters() -> tuple[Incomplete, Incomplete]: ...
def SetProcessShutdownParameters(Level, Flags) -> None: ...
def GetGuiResources(Process: int, Flags): ...
def IsWow64Process(__Process: int | None = ...) -> bool: ...
def ReadProcessMemory(*args, **kwargs): ... # incomplete
def VirtualAllocEx(*args, **kwargs): ... # incomplete
def VirtualFreeEx(*args, **kwargs): ... # incomplete
def WriteProcessMemory(*args, **kwargs): ... # incomplete
ABOVE_NORMAL_PRIORITY_CLASS: int
BELOW_NORMAL_PRIORITY_CLASS: int
CREATE_BREAKAWAY_FROM_JOB: int
CREATE_DEFAULT_ERROR_MODE: int
CREATE_NEW_CONSOLE: int
CREATE_NEW_PROCESS_GROUP: int
CREATE_NO_WINDOW: int
CREATE_PRESERVE_CODE_AUTHZ_LEVEL: int
CREATE_SEPARATE_WOW_VDM: int
CREATE_SHARED_WOW_VDM: int
CREATE_SUSPENDED: int
CREATE_UNICODE_ENVIRONMENT: int
DEBUG_ONLY_THIS_PROCESS: int
DEBUG_PROCESS: int
DETACHED_PROCESS: int
HIGH_PRIORITY_CLASS: int
IDLE_PRIORITY_CLASS: int
MAXIMUM_PROCESSORS: int
NORMAL_PRIORITY_CLASS: int
REALTIME_PRIORITY_CLASS: int
STARTF_FORCEOFFFEEDBACK: int
STARTF_FORCEONFEEDBACK: int
STARTF_RUNFULLSCREEN: int
STARTF_USECOUNTCHARS: int
STARTF_USEFILLATTRIBUTE: int
STARTF_USEPOSITION: int
STARTF_USESHOWWINDOW: int
STARTF_USESIZE: int
STARTF_USESTDHANDLES: int
THREAD_MODE_BACKGROUND_BEGIN: int
THREAD_MODE_BACKGROUND_END: int
THREAD_PRIORITY_ABOVE_NORMAL: int
THREAD_PRIORITY_BELOW_NORMAL: int
THREAD_PRIORITY_HIGHEST: int
THREAD_PRIORITY_IDLE: int
THREAD_PRIORITY_LOWEST: int
THREAD_PRIORITY_NORMAL: int
THREAD_PRIORITY_TIME_CRITICAL: int
LIST_MODULES_32BIT: int
LIST_MODULES_64BIT: int
LIST_MODULES_ALL: int
LIST_MODULES_DEFAULT: int
UNICODE: int
|
3,926 |
get relative uri
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.texinfo
~~~~~~~~~~~~~~~~~~~~~~~
Texinfo builder.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from six import iteritems
from docutils import nodes
from docutils.io import FileOutput
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.locale import _
from sphinx.builders import Builder
from sphinx.environment import NoUri
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, copyfile
from sphinx.util.console import bold, darkgreen
from sphinx.writers.texinfo import TexinfoWriter
TEXINFO_MAKEFILE = '''\
# Makefile for Sphinx Texinfo output
infodir ?= /usr/share/info
MAKEINFO = makeinfo --no-split
MAKEINFO_html = makeinfo --no-split --html
MAKEINFO_plaintext = makeinfo --no-split --plaintext
TEXI2PDF = texi2pdf --batch --expand
INSTALL_INFO = install-info
ALLDOCS = $(basename $(wildcard *.texi))
all: info
info: $(addsuffix .info,$(ALLDOCS))
plaintext: $(addsuffix .txt,$(ALLDOCS))
html: $(addsuffix .html,$(ALLDOCS))
pdf: $(addsuffix .pdf,$(ALLDOCS))
install-info: info
\tfor f in *.info; do \\
\t cp -t $(infodir) "$$f" && \\
\t $(INSTALL_INFO) --info-dir=$(infodir) "$$f" ; \\
\tdone
uninstall-info: info
\tfor f in *.info; do \\
\t rm -f "$(infodir)/$$f" ; \\
\t $(INSTALL_INFO) --delete --info-dir=$(infodir) "$$f" ; \\
\tdone
%.info: %.texi
\t$(MAKEINFO) -o '$@' '$<'
%.txt: %.texi
\t$(MAKEINFO_plaintext) -o '$@' '$<'
%.html: %.texi
\t$(MAKEINFO_html) -o '$@' '$<'
%.pdf: %.texi
\t-$(TEXI2PDF) '$<'
\t-$(TEXI2PDF) '$<'
\t-$(TEXI2PDF) '$<'
clean:
\trm -f *.info *.pdf *.txt *.html
\trm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ky *.pg
\trm -f *.vr *.tp *.fn *.fns *.def *.defs *.cp *.cps *.ge *.ges *.mo
.PHONY: all info plaintext html pdf install-info uninstall-info clean
'''
class TexinfoBuilder(Builder):
"""
Builds Texinfo output to create Info documentation.
"""
name = 'texinfo'
format = 'texinfo'
supported_image_types = ['image/png', 'image/jpeg',
'image/gif']
def init(self):
self.docnames = []
self.document_data = []
def get_outdated_docs(self):
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def METHOD_NAME(self, from_, to, typ=None):
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self):
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
self.warn('no "texinfo_documents" config value found; no documents '
'will be written')
return
# assign subdirs to titles
self.titles = []
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
self.warn('"texinfo_documents" config value references unknown '
'document %s' % docname)
continue
self.document_data.append(entry)
if docname.endswith(SEP+'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write(self, *ignored):
self.init_document_data()
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
if len(entry) > 7:
toctree_only = entry[7]
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
self.info("processing " + targetname + "... ", nonl=1)
doctree = self.assemble_doctree(
docname, toctree_only,
appendices=(self.config.texinfo_appendices or []))
self.info("writing... ", nonl=1)
self.post_process_images(doctree)
docwriter = TexinfoWriter(self)
settings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values()
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
settings.texinfo_elements = self.config.texinfo_elements
settings.texinfo_dir_entry = direntry or ''
settings.texinfo_dir_category = category or ''
settings.texinfo_dir_description = description or ''
settings.docname = docname
doctree.settings = settings
docwriter.write(doctree, destination)
self.info("done")
def assemble_doctree(self, indexfile, toctree_only, appendices):
self.docnames = set([indexfile] + appendices)
self.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
new_sect += nodes.title(u'<Set title in conf.py>',
u'<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
darkgreen, [indexfile])
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
self.info()
self.info("resolving references...")
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes = [nodes.emphasis(sectname, sectname)]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
return largetree
def finish(self):
# copy image files
if self.images:
self.info(bold('copying images...'), nonl=1)
for src, dest in iteritems(self.images):
self.info(' '+src, nonl=1)
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, dest))
self.info()
self.info(bold('copying Texinfo support files... '), nonl=True)
# copy Makefile
fn = path.join(self.outdir, 'Makefile')
self.info(fn, nonl=1)
try:
mkfile = open(fn, 'w')
try:
mkfile.write(TEXINFO_MAKEFILE)
finally:
mkfile.close()
except (IOError, OSError) as err:
self.warn("error writing file %s: %s" % (fn, err))
self.info(' done')
|
3,927 |
test module has saved field
|
# Copyright The Caikit Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This test suite ensures that any model metadata (things inside a config.yml file) is persisted when
a module is loaded and then re-saved
"""
# Standard
from typing import Any, Dict, Set
import os
import tempfile
# Third Party
import pytest
# Local
from caikit.core import toolkit
# pylint: disable=import-error
from sample_lib.data_model import SampleTask
# pylint: disable=import-error
from sample_lib.modules.sample_task import SampleModule
# Unit Test Infrastructure
from tests.base import TestCaseBase
import caikit.core
# scratch:
# - might want to support like a `saved` date?
# - want a unique id
# - do we want to easily support editing metadata in the case that somebody:
# - loads a model
# - changes the python model object
# - re-saves the model?
# - What about the `version` field? It's unused now but if it does change should we allow it to
# re-save with a new version?
def _load_model_metadata(model_path: str) -> Dict[str, Any]:
return toolkit.load_yaml(os.path.join(model_path, "config.yml"))
def _check_dicts_equal(
this: Dict[str, Any], that: Dict[str, Any], fields_to_not_check: Set[str]
):
these_fields = set(this.keys()) - fields_to_not_check
those_fields = set(that.keys()) - fields_to_not_check
assert these_fields == those_fields
for field in these_fields:
assert this.get(field) == that.get(field)
@pytest.fixture
def fixtures_dir():
return TestCaseBase.fixtures_dir
@pytest.fixture
# pylint: disable=redefined-outer-name
def sample_model_path(fixtures_dir):
return os.path.join(fixtures_dir, "sample_module")
# pylint: disable=redefined-outer-name
def test_module_metadata_is_persisted(sample_model_path):
"""Make sure that if you load and then re-save any model, the metadata in the config.yml is
persisted."""
# Get the module metadata:
initial_metadata = _load_model_metadata(sample_model_path)
model = caikit.core.load(sample_model_path)
with tempfile.TemporaryDirectory() as tempdir:
model.save(tempdir)
resaved_metadata = _load_model_metadata(tempdir)
fields_to_not_check = {"saved", "sample_lib_version", "train"}
_check_dicts_equal(initial_metadata, resaved_metadata, fields_to_not_check)
# Fun little extra check, the `train` field _should_ be modified by the code
assert "optim_method" not in resaved_metadata["train"]
# pylint: disable=redefined-outer-name
def test_loaded_modules_have_metadata(sample_model_path):
"""Make sure a model has metadata after being loaded"""
expected_metadata = _load_model_metadata(sample_model_path)
model_loaded_with_core = caikit.core.load(sample_model_path)
model_directly_loaded = SampleModule.load(sample_model_path)
# TODO: figure out "module_id" and "model_path" as well...
_check_dicts_equal(
expected_metadata,
model_loaded_with_core.metadata,
{"module_id", "model_path"},
)
_check_dicts_equal(
expected_metadata,
model_directly_loaded.metadata,
{"module_id", "model_path"},
)
def METHOD_NAME():
"""Make sure that if you load a model and then save it multiple times,
the "saved" field should track each timestamp when you save, and should be different
"""
with tempfile.TemporaryDirectory() as tempdir:
model1 = SampleModule()
path1 = os.path.join(tempdir, "test1")
model1.save(path1)
resaved_metadata1 = _load_model_metadata(path1)
model2 = caikit.core.load(path1)
path2 = os.path.join(tempdir, "test1")
model2.save(path2)
resaved_metadata2 = _load_model_metadata(path2)
assert "saved" in resaved_metadata1
assert "saved" in resaved_metadata2
assert resaved_metadata1["saved"] != resaved_metadata2["saved"]
def test_module_has_tracking_id_field():
with tempfile.TemporaryDirectory() as tempdir:
model1 = SampleModule()
path1 = os.path.join(tempdir, "test1")
model1.save(path1)
resaved_metadata1 = _load_model_metadata(path1)
model2 = caikit.core.load(path1)
path2 = os.path.join(tempdir, "test1")
model2.save(path2)
resaved_metadata2 = _load_model_metadata(path2)
assert "tracking_id" in resaved_metadata1
assert "tracking_id" in resaved_metadata2
assert resaved_metadata1["tracking_id"] == resaved_metadata2["tracking_id"]
# pylint: disable=redefined-outer-name
def test_load_can_be_called_directly_with_non_standard_kwargs(sample_model_path):
initial_metadata = _load_model_metadata(sample_model_path)
# note that
# - no positional arguments given
# - path is `model_path` not `module_path`
model = SampleModule.load(foo="bar", test_kw="arg", model_path=sample_model_path)
assert len(model.metadata) > 0
_check_dicts_equal(initial_metadata, model.metadata, {"module_id", "model_path"})
# Write a class that doesn't have a `xxx_path` arg for load
@caikit.core.module(
"00110203-0809-beef-baad-0a0b0c0d0e0f", "FunkyModule", "0.0.1", SampleTask
)
class _FunkyModel(SampleModule):
@classmethod
def load(cls, some_really_odd_param_name):
return super().load(some_really_odd_param_name)
# check this doesn't raise
# (it won't extract metadata though...)
_FunkyModel.load(some_really_odd_param_name=sample_model_path)
# pylint: disable=redefined-outer-name
def test_parent_class_loads_work(sample_model_path):
"""This test ensures that our metadata injector works on modules that inherit from other
classes"""
model = SampleModule.load(sample_model_path)
assert isinstance(model, SampleModule)
|
3,928 |
get function
|
import re
import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
class CountString(TransformPrimitive):
"""Determines how many times a given string shows up in a text field.
Args:
string (str): The string to determine the count of. Defaults to
the word "the".
ignore_case (bool): Determines if case of the string should be
considered or not. Defaults to true.
ignore_non_alphanumeric (bool): Determines if non-alphanumeric
characters should be used in the search. Defaults to False.
is_regex (bool): Defines if the string argument is a regex or not.
Defaults to False.
match_whole_words_only (bool): Determines if whole words should be
matched or not. For example searching for word `the` against
`then, the, there` should only return `the` if this argument
was True. Defaults to False.
Examples:
>>> count_string = CountString(string="the")
>>> count_string(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Match case of string
>>> count_string_ignore_case = CountString(string="the", ignore_case=False)
>>> count_string_ignore_case(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[0.0, 1.0, 1.0]
>>> # Ignore non-alphanumeric characters in the search
>>> count_string_ignore_non_alphanumeric = CountString(string="the",
... ignore_non_alphanumeric=True)
>>> count_string_ignore_non_alphanumeric(["Th*/e problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Specify the string as a regex
>>> count_string_is_regex = CountString(string="t.e", is_regex=True)
>>> count_string_is_regex(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Match whole words only
>>> count_string_match_whole_words_only = CountString(string="the",
... match_whole_words_only=True)
>>> count_string_match_whole_words_only(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 0.0, 2.0]
"""
name = "count_string"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
def __init__(
self,
string="the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
):
self.string = string
self.ignore_case = ignore_case
self.ignore_non_alphanumeric = ignore_non_alphanumeric
self.match_whole_words_only = match_whole_words_only
self.is_regex = is_regex
# we don't want to strip non alphanumeric characters from the pattern
# ie h.ll. should match "hello" so we can't strip the dots to make hll
if not is_regex:
self.pattern = re.escape(self.process_text(string))
else:
self.pattern = string
if ignore_case:
self.pattern = self.pattern.lower()
# \b\b.*\b\b is the same as \b.*\b so we don't have to check if
# the pattern is given to us as regex and if it already has leading
# and trailing \b's
if match_whole_words_only:
self.pattern = "\\b" + self.pattern + "\\b"
def process_text(self, text):
if self.ignore_non_alphanumeric:
text = re.sub("[^0-9a-zA-Z ]+", "", text)
if self.ignore_case:
text = text.lower()
return text
def METHOD_NAME(self):
def count_string(words):
if not isinstance(words, str):
return np.nan
words = self.process_text(words)
return len(re.findall(self.pattern, words))
return np.vectorize(count_string, otypes=[float])
|
3,929 |
test resource invocation default config
|
from contextlib import contextmanager
import pytest
from dagster import Field, Noneable, Selector, build_init_resource_context, resource
from dagster._core.errors import (
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
)
def test_resource_invocation_no_arg():
@resource
def basic_resource():
return 5
assert basic_resource() == 5
def test_resource_invocation_none_arg():
@resource
def basic_resource(_):
return 5
assert basic_resource(None) == 5
with pytest.raises(
DagsterInvalidInvocationError,
match=(
"Resource initialization function has context argument, but no "
"context was provided when invoking."
),
):
basic_resource()
@resource
def basic_resource_arb_context(arb_context):
return 5
assert basic_resource_arb_context(None) == 5
assert basic_resource_arb_context(arb_context=None) == 5
with pytest.raises(
DagsterInvalidInvocationError,
match="Resource initialization expected argument 'arb_context'.",
):
assert basic_resource_arb_context(wrong_context=None) == 5
def test_resource_invocation_with_resources():
@resource(required_resource_keys={"foo"})
def resource_reqs_resources(init_context):
return init_context.resources.foo
with pytest.raises(
DagsterInvalidInvocationError,
match="Resource has required resources, but no context was provided.",
):
resource_reqs_resources(None)
context = build_init_resource_context()
with pytest.raises(
DagsterInvalidDefinitionError,
match="resource with key 'foo' required was not provided.",
):
resource_reqs_resources(context)
context = build_init_resource_context(resources={"foo": "bar"})
assert resource_reqs_resources(context) == "bar"
def test_resource_invocation_with_cm_resource():
teardown_log = []
@resource
@contextmanager
def cm_resource(_):
try:
yield "foo"
finally:
teardown_log.append("collected")
with cm_resource(None) as resource_val:
assert resource_val == "foo"
assert not teardown_log
assert teardown_log == ["collected"]
def test_resource_invocation_with_config():
@resource(config_schema={"foo": str})
def resource_reqs_config(context):
assert context.resource_config["foo"] == "bar"
return 5
# Ensure that error is raised when we attempt to invoke with a None context
with pytest.raises(
DagsterInvalidInvocationError,
match="Resource has required config schema, but no context was provided.",
):
resource_reqs_config(None)
# Ensure that error is raised when context does not have the required config.
context = build_init_resource_context()
with pytest.raises(
DagsterInvalidConfigError,
match="Error in config for resource",
):
resource_reqs_config(context)
with pytest.raises(
DagsterInvalidConfigError,
match="Error when applying config mapping for resource",
):
resource_reqs_config.configured({"foobar": "bar"})(None)
# Ensure that if you configure the respirce, you can provide a none-context.
result = resource_reqs_config.configured({"foo": "bar"})(None)
assert result == 5
result = resource_reqs_config(build_init_resource_context(config={"foo": "bar"}))
assert result == 5
def test_failing_resource():
@resource
def fails(_):
raise Exception("Oh no!")
with pytest.raises(Exception, match="Oh no!"):
fails(None)
def test_resource_invocation_dict_config():
@resource(config_schema=dict)
def resource_requires_dict(context):
assert context.resource_config == {"foo": "bar"}
return context.resource_config
assert resource_requires_dict(build_init_resource_context(config={"foo": "bar"})) == {
"foo": "bar"
}
@resource(config_schema=Noneable(dict))
def resource_noneable_dict(context):
return context.resource_config
assert resource_noneable_dict(build_init_resource_context()) is None
assert resource_noneable_dict(None) is None
def METHOD_NAME():
@resource(config_schema={"foo": Field(str, is_required=False, default_value="bar")})
def resource_requires_config(context):
assert context.resource_config["foo"] == "bar"
return context.resource_config["foo"]
assert resource_requires_config(None) == "bar"
@resource(config_schema=Field(str, is_required=False, default_value="bar"))
def resource_requires_config_val(context):
assert context.resource_config == "bar"
return context.resource_config
assert resource_requires_config_val(None) == "bar"
@resource(
config_schema={
"foo": Field(str, is_required=False, default_value="bar"),
"baz": str,
}
)
def resource_requires_config_partial(context):
assert context.resource_config["foo"] == "bar"
assert context.resource_config["baz"] == "bar"
return context.resource_config["foo"] + context.resource_config["baz"]
assert (
resource_requires_config_partial(build_init_resource_context(config={"baz": "bar"}))
== "barbar"
)
def test_resource_invocation_kitchen_sink_config():
@resource(
config_schema={
"str_field": str,
"int_field": int,
"list_int": [int],
"list_list_int": [[int]],
"dict_field": {"a_string": str},
"list_dict_field": [{"an_int": int}],
"selector_of_things": Selector(
{"select_list_dict_field": [{"an_int": int}], "select_int": int}
),
"optional_list_of_optional_string": Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.resource_config
resource_config = {
"str_field": "kjf",
"int_field": 2,
"list_int": [3],
"list_list_int": [[1], [2, 3]],
"dict_field": {"a_string": "kdjfkd"},
"list_dict_field": [{"an_int": 2}, {"an_int": 4}],
"selector_of_things": {"select_int": 3},
"optional_list_of_optional_string": ["foo", None],
}
assert kitchen_sink(build_init_resource_context(config=resource_config)) == resource_config
def test_resource_dep_no_context():
@resource(required_resource_keys={"foo"})
def the_resource():
pass
the_resource()
with pytest.raises(
DagsterInvalidInvocationError,
match=(
"Attempted to invoke resource with argument, but underlying "
"function has no context argument. Either specify a context argument on "
"the resource function, or remove the passed-in argument."
),
):
the_resource(None)
|
3,930 |
on agent finish
|
import asyncio
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langflow.api.v1.schemas import ChatResponse
from typing import Any, Dict, List, Union
from fastapi import WebSocket
from langchain.schema import AgentAction, LLMResult, AgentFinish
from loguru import logger
# https://github.com/hwchase17/chat-langchain/blob/master/callback.py
class AsyncStreamingLLMCallbackHandler(AsyncCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, websocket: WebSocket):
self.websocket = websocket
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
await self.websocket.send_json(resp.dict())
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
async def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
async def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=f"Tool input: {input_str}",
)
await self.websocket.send_json(resp.dict())
async def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
observation_prefix = kwargs.get("observation_prefix", "Tool output: ")
split_output = output.split()
first_word = split_output[0]
rest_of_output = split_output[1:]
# Create a formatted message.
intermediate_steps = f"{observation_prefix}{first_word}"
# Create a ChatResponse instance.
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=intermediate_steps,
)
rest_of_resps = [
ChatResponse(
message="",
type="stream",
intermediate_steps=f"{word}",
)
for word in rest_of_output
]
resps = [resp] + rest_of_resps
# Try to send the response, handle potential errors.
try:
# This is to emulate the stream of tokens
for resp in resps:
await self.websocket.send_json(resp.dict())
except Exception as exc:
logger.error(f"Error sending response: {exc}")
async def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
async def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
# This runs when first sending the prompt
# to the LLM, adding it will send the final prompt
# to the frontend
async def on_agent_action(self, action: AgentAction, **kwargs: Any):
log = f"Thought: {action.log}"
# if there are line breaks, split them and send them
# as separate messages
if "\n" in log:
logs = log.split("\n")
for log in logs:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.dict())
else:
resp = ChatResponse(message="", type="stream", intermediate_steps=log)
await self.websocket.send_json(resp.dict())
async def METHOD_NAME(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
resp = ChatResponse(
message="",
type="stream",
intermediate_steps=finish.log,
)
await self.websocket.send_json(resp.dict())
class StreamingLLMCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, websocket):
self.websocket = websocket
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(message=token, type="stream", intermediate_steps="")
loop = asyncio.get_event_loop()
coroutine = self.websocket.send_json(resp.dict())
asyncio.run_coroutine_threadsafe(coroutine, loop)
|
3,931 |
get rendering for
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import copy
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.process import buildstep
from buildbot.process import results
from buildbot.warnings import warn_deprecated
class ShellArg(results.ResultComputingConfigMixin):
publicAttributes = (
results.ResultComputingConfigMixin.resultConfig +
["command", "logname"])
def __init__(self, command=None, logname=None, logfile=None, **kwargs):
name = self.__class__.__name__
if command is None:
config.error(f"the 'command' parameter of {name} must not be None")
self.command = command
self.logname = logname
if logfile is not None:
warn_deprecated('2.10.0', "{}: logfile is deprecated, use logname")
if self.logname is not None:
config.error(("{}: the 'logfile' parameter must not be specified when 'logname' " +
"is set").format(name))
self.logname = logfile
for k, v in kwargs.items():
if k not in self.resultConfig:
config.error(f"the parameter '{k}' is not handled by ShellArg")
setattr(self, k, v)
# we don't validate anything yet as we can have renderables.
def validateAttributes(self):
# only make the check if we have a list
if not isinstance(self.command, (str, list)):
config.error(f"{self.command} is an invalid command, it must be a string or a list")
if isinstance(self.command, list):
if not all(isinstance(x, str) for x in self.command):
config.error(f"{self.command} must only have strings in it")
runConfParams = [(p_attr, getattr(self, p_attr))
for p_attr in self.resultConfig]
not_bool = [(p_attr, p_val) for (p_attr, p_val) in runConfParams if not isinstance(p_val,
bool)]
if not_bool:
config.error(f"{repr(not_bool)} must be booleans")
@defer.inlineCallbacks
def METHOD_NAME(self, build):
rv = copy.copy(self)
for p_attr in self.publicAttributes:
res = yield build.render(getattr(self, p_attr))
setattr(rv, p_attr, res)
return rv
class ShellSequence(buildstep.ShellMixin, buildstep.BuildStep):
last_command = None
renderables = ['commands']
def __init__(self, commands=None, **kwargs):
self.commands = commands
kwargs = self.setupShellMixin(kwargs, prohibitArgs=['command'])
super().__init__(**kwargs)
def shouldRunTheCommand(self, cmd):
return bool(cmd)
def getFinalState(self):
return self.describe(True)
@defer.inlineCallbacks
def runShellSequence(self, commands):
terminate = False
if commands is None:
log.msg("After rendering, ShellSequence `commands` is None")
return results.EXCEPTION
overall_result = results.SUCCESS
for arg in commands:
if not isinstance(arg, ShellArg):
log.msg("After rendering, ShellSequence `commands` list "
"contains something that is not a ShellArg")
return results.EXCEPTION
try:
arg.validateAttributes()
except config.ConfigErrors as e:
log.msg(f"After rendering, ShellSequence `commands` is invalid: {e}")
return results.EXCEPTION
# handle the command from the arg
command = arg.command
if not self.shouldRunTheCommand(command):
continue
# keep the command around so we can describe it
self.last_command = command
cmd = yield self.makeRemoteShellCommand(command=command,
stdioLogName=arg.logname)
yield self.runCommand(cmd)
overall_result, terminate = results.computeResultAndTermination(
arg, cmd.results(), overall_result)
if terminate:
break
return overall_result
def run(self):
return self.runShellSequence(self.commands)
|
3,932 |
test precision
|
"""
Test ants_image.py
nptest.assert_allclose
self.assertEqual
self.assertTrue
def test_initialize_and_get_value(self):
for img, metric in self.metrics:
img2 = img.clone() * 1.1
metric.set_fixed_image(img)
metric.set_moving_image(img2)
metric.set_sampling()
metric.initialize()
value = metric.get_value()
def test__call__(self):
for img, metric in self.metrics:
img2 = img.clone() * 1.1
imgmask = img > img.mean()
imgmask2 = img2 > img2.mean()
val = metric(img,img2)
val = metric(img,img2, fixed_mask=imgmask, moving_mask=imgmask2)
val = metric(img,img2, sampling_percentage=0.8)
"""
import os
import unittest
from common import run_tests
from tempfile import mktemp
import numpy as np
import numpy.testing as nptest
import ants
class TestClass_ANTsImageToImageMetric(unittest.TestCase):
"""
Test ants.ANTsImage class
"""
def setUp(self):
img2d = ants.image_read(ants.get_ants_data('r16'))
img3d = ants.image_read(ants.get_ants_data('mni'))
metric2d = ants.new_ants_metric(precision='float', dimension=2)
metric3d = ants.new_ants_metric(precision='float', dimension=3)
self.imgs = [img2d, img3d]
self.metrics = [metric2d, metric3d]
def tearDown(self):
pass
def test__repr__(self):
for metric in self.metrics:
r = metric.__repr__()
def METHOD_NAME(self):
for metric in self.metrics:
precison = metric.precision
def dimension(self):
for metric in self.metrics:
dimension = metric.dimension
def metrictype(self):
for metric in self.metrics:
mtype = metric.metrictype
def is_vector(self):
for metric in self.metrics:
isvec = metric.is_vector
def pointer(self):
for metric in self.metrics:
ptr = metric.pointer
def test_set_fixed_image(self):
# def set_fixed_image(self, image):
for img, metric in zip(self.imgs,self.metrics):
metric.set_fixed_image(img)
def test_set_fixed_mask(self):
# def set_fixed_image(self, image):
for img, metric in zip(self.imgs,self.metrics):
mask = img > img.mean()
metric.set_fixed_mask(img)
def test_set_moving_image(self):
# def set_fixed_image(self, image):
for img, metric in zip(self.imgs,self.metrics):
metric.set_moving_image(img)
def test_set_moving_mask(self):
# def set_fixed_image(self, image):
for img, metric in zip(self.imgs,self.metrics):
mask = img > img.mean()
metric.set_moving_mask(img)
def test_set_sampling(self):
for img, metric in zip(self.imgs,self.metrics):
with self.assertRaises(Exception):
# set sampling without moving+fixed images
metric.set_sampling()
img2 = img.clone()
metric.set_fixed_image(img)
metric.set_moving_image(img2)
metric.set_sampling()
def test_initialize(self):
for img, metric in zip(self.imgs,self.metrics):
with self.assertRaises(Exception):
# initialize without moving+fixed images
metric.initialize()
img2 = img.clone()
metric.set_fixed_image(img)
metric.set_moving_image(img2)
metric.set_sampling()
metric.initialize()
def test_get_value(self):
for img, metric in zip(self.imgs,self.metrics):
with self.assertRaises(Exception):
# initialize without moving+fixed images
metric.get_value()
img2 = img.clone()
metric.set_fixed_image(img)
metric.set_moving_image(img2)
metric.set_sampling()
metric.get_value()
def test__call__(self):
for img, metric in zip(self.imgs,self.metrics):
img2 = img.clone() * 1.1
imgmask = img > img.mean()
imgmask2 = img2 > img2.mean()
val = metric(img,img2)
# val = metric(img,img2, fixed_mask=imgmask, moving_mask=imgmask2)
val = metric(img,img2, sampling_percentage=0.8)
if __name__ == '__main__':
run_tests()
|
3,933 |
get prep value
|
#
# Copyright © Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Helper functions and classees from Weblate.
The code here is copy of code from Weblate, taken from
weblate/utils/validators.py and weblate/utils/fields.py.
"""
import json
import os.path
import re
from email.mime.image import MIMEImage
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.core.serializers.json import DjangoJSONEncoder
from django.core.validators import validate_email as validate_email_django
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import get_language, get_language_bidi
from django.utils.translation import gettext as _
from html2text import HTML2Text
# Reject some suspicious e-mail addresses, based on checks enforced by Exim MTA
EMAIL_BLACKLIST = re.compile(r"^([./|]|.*([@%!`#&?]|/\.\./))")
def validate_email(value):
try:
validate_email_django(value)
except ValidationError:
raise ValidationError(_("Enter a valid e-mail address."))
user_part = value.rsplit("@", 1)[0]
if EMAIL_BLACKLIST.match(user_part):
raise ValidationError(_("Enter a valid e-mail address."))
class JSONField(models.TextField):
"""JSON serializaed TextField."""
def __init__(self, **kwargs):
if "default" not in kwargs:
kwargs["default"] = {}
super().__init__(**kwargs)
def to_python(self, value):
"""Convert a string from the database to a Python value."""
if not value:
return None
try:
return json.loads(value)
except ValueError:
return value
def METHOD_NAME(self, value):
"""Convert the value to a string that can be stored in the database."""
if not value:
return None
if isinstance(value, (dict, list)):
return json.dumps(value, cls=DjangoJSONEncoder)
return super().METHOD_NAME(value)
def from_db_value(self, value, *args, **kwargs):
return self.to_python(value)
def get_db_prep_save(self, value, *args, **kwargs):
if value is None:
value = {}
return json.dumps(value, cls=DjangoJSONEncoder)
def value_from_object(self, obj):
value = super().value_from_object(obj)
return json.dumps(value, cls=DjangoJSONEncoder)
def send_notification(notification, recipients, **kwargs):
if not recipients:
return
# HTML to text conversion
html2text = HTML2Text(bodywidth=78)
html2text.unicode_snob = True
html2text.ignore_images = True
html2text.pad_tables = True
# Logos
images = []
for name in ("email-logo.png", "email-logo-footer.png"):
filename = os.path.join(settings.STATIC_ROOT, name)
with open(filename, "rb") as handle:
image = MIMEImage(handle.read())
image.add_header("Content-ID", f"<{name}@cid.weblate.org>")
image.add_header("Content-Disposition", "inline", filename=name)
images.append(image)
# Context and subject
context = {
"LANGUAGE_CODE": get_language(),
"LANGUAGE_BIDI": get_language_bidi(),
}
context.update(kwargs)
subject = render_to_string(f"mail/{notification}_subject.txt", context).strip()
context["subject"] = subject
# Render body
body = render_to_string(f"mail/{notification}.html", context).strip()
# Prepare e-mail
email = EmailMultiAlternatives(
subject,
html2text.handle(body),
"[email protected]",
recipients,
)
email.mixed_subtype = "related"
for image in images:
email.attach(image)
email.attach_alternative(body, "text/html")
# Include invoice PDF if exists
if "invoice" in kwargs:
with open(kwargs["invoice"].pdf_path, "rb") as handle:
email.attach(
os.path.basename(kwargs["invoice"].pdf_path),
handle.read(),
"application/pdf",
)
email.send()
|
3,934 |
create resource
|
"""
Copyright 2019 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: [email protected]
"""
import logging
from typing import Any, Dict
import pytest
from inmanta.agent.handler import CRUDHandlerGeneric as CRUDHandler
from inmanta.agent.handler import HandlerContext, ResourcePurged
from inmanta.const import ResourceState
from inmanta.resources import Id, PurgeableResource, resource
from utils import log_contains, no_error_in_logs
@pytest.mark.parametrize(
"purged_desired,purged_actual,excn,create,delete",
[
(True, False, True, False, False),
(True, True, False, False, False),
(True, False, False, False, True),
(True, True, True, False, False),
(False, False, True, True, False),
(False, True, False, True, False),
(False, True, True, True, False),
(False, False, False, False, False),
],
)
@pytest.mark.parametrize("updated", [True, False])
def test_CRUD_handler_purged_response(purged_desired, purged_actual, excn, create, delete, updated, caplog):
"""
purged_actual and excn are conceptually equivalent, this test case serves to prove that they are in fact, equivalent
"""
caplog.set_level(logging.DEBUG)
@resource("aa::Aa", "aa", "aa")
class TestResource(PurgeableResource):
fields = ("value",)
class DummyCrud(CRUDHandler[TestResource]):
def __init__(self):
self.updated = False
self.created = False
self.deleted = False
def read_resource(self, ctx: HandlerContext, resource: TestResource) -> None:
resource.purged = purged_actual
if updated:
resource.value = "b"
if excn:
raise ResourcePurged()
def update_resource(self, ctx: HandlerContext, changes: dict, resource: TestResource) -> None:
self.updated = True
def METHOD_NAME(self, ctx: HandlerContext, resource: TestResource) -> None:
self.created = True
def delete_resource(self, ctx: HandlerContext, resource: TestResource) -> None:
self.deleted = True
res = TestResource(Id("aa::Aa", "aa", "aa", "aa", 1))
res.purged = purged_desired
res.value = "a"
ctx = HandlerContext(res, False)
handler = DummyCrud()
handler.execute(ctx, res, False)
assert handler.updated == ((not (create or delete)) and updated and not purged_desired)
assert handler.created == create
assert handler.deleted == delete
no_error_in_logs(caplog)
log_contains(caplog, "inmanta.agent.handler", logging.DEBUG, "resource aa::Aa[aa,aa=aa],v=1: Calling read_resource")
@pytest.mark.parametrize(
"running_test",
[True, False],
)
def test_3470_CRUD_handler_with_unserializable_changes(running_test: bool, monkeypatch, caplog):
"""
This test case checks that unserializable items in the 'changes' set not longer make
the CRUD handler hang in production and that an exception is raised when the
RUNNING_TEST variable is set to True.
"""
import inmanta
monkeypatch.setattr(inmanta, "RUNNING_TESTS", running_test)
@resource(name="aa::Aa", id_attribute="aa", agent="aa")
class TestResource(PurgeableResource):
fields = ("value",)
class DummyCrud(CRUDHandler[TestResource]):
def __init__(self):
self.updated = False
def read_resource(self, ctx: HandlerContext, resource: TestResource) -> None:
resource.value = "b"
def update_resource(self, ctx: HandlerContext, changes: Dict[str, Dict[str, Any]], resource: TestResource) -> None:
self.updated = True
res = TestResource(Id(entity_type="aa::Aa", agent_name="aa", attribute="aa", attribute_value="aa", version=1))
# Sets are not JSON serializable
res.value = {"a"}
res.purged = False
ctx = HandlerContext(res, dry_run=False)
handler = DummyCrud()
if running_test:
handler.execute(ctx, res, dry_run=False)
assert ctx.status is ResourceState.failed
error_message = "Failed to serialize attribute {'a'}"
assert error_message in caplog.text
else:
handler.execute(ctx, res, dry_run=False)
assert ctx.status is ResourceState.deployed
assert handler.updated
@pytest.mark.parametrize(
"running_test",
[True, False],
)
def test_3470_CRUD_handler_with_unserializable_items_log_message(running_test: bool, monkeypatch, caplog):
"""
This test case checks that unserializable log messages no longer make
the CRUD handler hang in production and that an exception is raised when the
RUNNING_TEST variable is set to True.
"""
import inmanta
monkeypatch.setattr(inmanta, "RUNNING_TESTS", running_test)
@resource(name="aa::Aa", id_attribute="aa", agent="aa")
class TestResource(PurgeableResource):
fields = ("value",)
class DummyCrud(CRUDHandler[TestResource]):
def __init__(self):
self.updated = False
def read_resource(self, ctx: HandlerContext, resource: TestResource) -> None:
resource.value = "b"
unserializable_set = {"b"}
ctx.debug(msg="Unserializable kwargs: ", kwargs={"unserializable": unserializable_set})
def update_resource(self, ctx: HandlerContext, changes: Dict[str, Dict[str, Any]], resource: TestResource) -> None:
self.updated = True
res = TestResource(Id(entity_type="aa::Aa", agent_name="aa", attribute="aa", attribute_value="aa", version=1))
res.value = "a"
res.purged = False
ctx = HandlerContext(res, dry_run=False)
handler = DummyCrud()
if running_test:
handler.execute(ctx, res, dry_run=False)
error_message = "Failed to serialize argument for log message"
assert ctx.status is ResourceState.failed
assert error_message in caplog.text
else:
handler.execute(ctx, res, dry_run=False)
assert ctx.status is ResourceState.deployed
assert handler.updated
|
3,935 |
search packages info
|
from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser # type: ignore
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.basecommand import Command
from pip._internal.status_codes import ERROR, SUCCESS
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = METHOD_NAME(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def METHOD_NAME(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
|
3,936 |
dup
|
import socket
import warnings
class TransportSocket:
"""A socket-like wrapper for exposing real transport sockets.
These objects can be safely returned by APIs like
`transport.get_extra_info('socket')`. All potentially disruptive
operations (like "socket.close()") are banned.
"""
__slots__ = ('_sock',)
def __init__(self, sock: socket.socket):
self._sock = sock
def _na(self, what):
warnings.warn(
f"Using {what} on sockets returned from get_extra_info('socket') "
f"will be prohibited in asyncio 3.9. Please report your use case "
f"to bugs.python.org.",
DeprecationWarning, source=self)
@property
def family(self):
return self._sock.family
@property
def type(self):
return self._sock.type
@property
def proto(self):
return self._sock.proto
def __repr__(self):
s = (
f"<asyncio.TransportSocket fd={self.fileno()}, "
f"family={self.family!s}, type={self.type!s}, "
f"proto={self.proto}"
)
if self.fileno() != -1:
try:
laddr = self.getsockname()
if laddr:
s = f"{s}, laddr={laddr}"
except socket.error:
pass
try:
raddr = self.getpeername()
if raddr:
s = f"{s}, raddr={raddr}"
except socket.error:
pass
return f"{s}>"
def __getstate__(self):
raise TypeError("Cannot serialize asyncio.TransportSocket object")
def fileno(self):
return self._sock.fileno()
def METHOD_NAME(self):
return self._sock.METHOD_NAME()
def get_inheritable(self):
return self._sock.get_inheritable()
def shutdown(self, how):
# asyncio doesn't currently provide a high-level transport API
# to shutdown the connection.
self._sock.shutdown(how)
def getsockopt(self, *args, **kwargs):
return self._sock.getsockopt(*args, **kwargs)
def setsockopt(self, *args, **kwargs):
self._sock.setsockopt(*args, **kwargs)
def getpeername(self):
return self._sock.getpeername()
def getsockname(self):
return self._sock.getsockname()
def getsockbyname(self):
return self._sock.getsockbyname()
def accept(self):
self._na('accept() method')
return self._sock.accept()
def connect(self, *args, **kwargs):
self._na('connect() method')
return self._sock.connect(*args, **kwargs)
def connect_ex(self, *args, **kwargs):
self._na('connect_ex() method')
return self._sock.connect_ex(*args, **kwargs)
def bind(self, *args, **kwargs):
self._na('bind() method')
return self._sock.bind(*args, **kwargs)
def ioctl(self, *args, **kwargs):
self._na('ioctl() method')
return self._sock.ioctl(*args, **kwargs)
def listen(self, *args, **kwargs):
self._na('listen() method')
return self._sock.listen(*args, **kwargs)
def makefile(self):
self._na('makefile() method')
return self._sock.makefile()
def sendfile(self, *args, **kwargs):
self._na('sendfile() method')
return self._sock.sendfile(*args, **kwargs)
def close(self):
self._na('close() method')
return self._sock.close()
def detach(self):
self._na('detach() method')
return self._sock.detach()
def sendmsg_afalg(self, *args, **kwargs):
self._na('sendmsg_afalg() method')
return self._sock.sendmsg_afalg(*args, **kwargs)
def sendmsg(self, *args, **kwargs):
self._na('sendmsg() method')
return self._sock.sendmsg(*args, **kwargs)
def sendto(self, *args, **kwargs):
self._na('sendto() method')
return self._sock.sendto(*args, **kwargs)
def send(self, *args, **kwargs):
self._na('send() method')
return self._sock.send(*args, **kwargs)
def sendall(self, *args, **kwargs):
self._na('sendall() method')
return self._sock.sendall(*args, **kwargs)
def set_inheritable(self, *args, **kwargs):
self._na('set_inheritable() method')
return self._sock.set_inheritable(*args, **kwargs)
def share(self, process_id):
self._na('share() method')
return self._sock.share(process_id)
def recv_into(self, *args, **kwargs):
self._na('recv_into() method')
return self._sock.recv_into(*args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
self._na('recvfrom_into() method')
return self._sock.recvfrom_into(*args, **kwargs)
def recvmsg_into(self, *args, **kwargs):
self._na('recvmsg_into() method')
return self._sock.recvmsg_into(*args, **kwargs)
def recvmsg(self, *args, **kwargs):
self._na('recvmsg() method')
return self._sock.recvmsg(*args, **kwargs)
def recvfrom(self, *args, **kwargs):
self._na('recvfrom() method')
return self._sock.recvfrom(*args, **kwargs)
def recv(self, *args, **kwargs):
self._na('recv() method')
return self._sock.recv(*args, **kwargs)
def settimeout(self, value):
if value == 0:
return
raise ValueError(
'settimeout(): only 0 timeout is allowed on transport sockets')
def gettimeout(self):
return 0
def setblocking(self, flag):
if not flag:
return
raise ValueError(
'setblocking(): transport sockets cannot be blocking')
def __enter__(self):
self._na('context manager protocol')
return self._sock.__enter__()
def __exit__(self, *err):
self._na('context manager protocol')
return self._sock.__exit__(*err)
|
3,937 |
role deleted
|
# coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from ..client import BaseClient
from ..client import createApiClient
from ..client import config
from ..client import createTemporaryCredentials
from ..client import createSession
_defaultConfig = config
class AuthEvents(BaseClient):
"""
The auth service is responsible for storing credentials, managing
assignment of scopes, and validation of request signatures from other
services.
These exchanges provides notifications when credentials or roles are
updated. This is mostly so that multiple instances of the auth service
can purge their caches and synchronize state. But you are of course
welcome to use these for other purposes, monitoring changes for example.
"""
classOptions = {
"exchangePrefix": "exchange/taskcluster-auth/v1/",
}
serviceName = 'auth'
apiVersion = 'v1'
def clientCreated(self, *args, **kwargs):
"""
Client Created Messages
Message that a new client has been created.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-created',
'name': 'clientCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientUpdated(self, *args, **kwargs):
"""
Client Updated Messages
Message that a new client has been updated.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-updated',
'name': 'clientUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def clientDeleted(self, *args, **kwargs):
"""
Client Deleted Messages
Message that a new client has been deleted.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'client-deleted',
'name': 'clientDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/client-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleCreated(self, *args, **kwargs):
"""
Role Created Messages
Message that a new role has been created.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-created',
'name': 'roleCreated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def roleUpdated(self, *args, **kwargs):
"""
Role Updated Messages
Message that a new role has been updated.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-updated',
'name': 'roleUpdated',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
"""
Role Deleted Messages
Message that a new role has been deleted.
This exchange takes the following keys:
* reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
"""
ref = {
'exchange': 'role-deleted',
'name': 'roleDeleted',
'routingKey': [
{
'multipleWords': True,
'name': 'reserved',
},
],
'schema': 'v1/role-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
funcinfo = {
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']
|
3,938 |
test image editor static
|
# (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Tests pertaining to the ImageEditor
"""
import unittest
import pkg_resources
from pyface.api import Image, ImageResource
from traits.api import File, HasTraits
from traitsui.api import ImageEditor, Item, View
from traitsui.tests._tools import (
BaseTestMixin,
create_ui,
is_qt,
is_mac_os,
requires_toolkit,
ToolkitName,
)
filename1 = pkg_resources.resource_filename(
"traitsui", "examples/demo/Extras/images/python-logo.png"
)
filename2 = pkg_resources.resource_filename(
"traitsui", "examples/demo/Extras/images/info.png"
)
class ImageDisplay(HasTraits):
image = Image()
@requires_toolkit([ToolkitName.wx, ToolkitName.qt])
class TestImageEditor(BaseTestMixin, unittest.TestCase):
def METHOD_NAME(self):
obj1 = ImageDisplay()
view = View(
Item(
'image',
editor=ImageEditor(
image=ImageResource(filename1),
),
)
)
# This should not fail.
with create_ui(obj1, dict(view=view)) as ui:
pass
def test_image_editor_resource(self):
obj1 = ImageDisplay(
image=ImageResource(filename1)
)
view = View(
Item(
'image',
editor=ImageEditor()
)
)
# This should not fail.
with create_ui(obj1, dict(view=view)) as ui:
obj1.image = ImageResource(filename2)
def test_image_editor_array(self):
try:
import numpy as np
from pyface.api import ArrayImage
except ImportError:
self.skipTest("NumPy is not available")
gradient1 = np.empty(shape=(256, 256, 3), dtype='uint8')
gradient1[:, :, 0] = np.arange(256).reshape(256, 1)
gradient1[:, :, 1] = np.arange(256).reshape(1, 256)
gradient1[:, :, 2] = np.arange(255, -1, -1).reshape(1, 256)
gradient2 = np.empty(shape=(256, 256, 3), dtype='uint8')
gradient2[:, :, 0] = np.arange(255, -1, -1).reshape(256, 1)
gradient2[:, :, 1] = np.arange(256).reshape(1, 256)
gradient2[:, :, 2] = np.arange(255, -1, -1).reshape(1, 256)
obj1 = ImageDisplay(
image=ArrayImage(data=gradient1)
)
view = View(
Item(
'image',
editor=ImageEditor()
)
)
# This should not fail.
with create_ui(obj1, dict(view=view)) as ui:
obj1.image = ArrayImage(data=gradient2)
@unittest.skipIf(is_mac_os, "Segfault on MacOS, see issue #1979")
def test_image_editor_pillow(self):
try:
import PIL.Image
from pyface.api import PILImage
except ImportError:
self.skipTest("Pillow is not available")
if is_qt:
try:
# is ImageQt available as well
from PIL.ImageQt import ImageQt
except ImportError:
self.skipTest("ImageQt is not available")
pil_image_1 = PIL.Image.open(filename1)
pil_image_2 = PIL.Image.open(filename2)
obj1 = ImageDisplay(
image=PILImage(image=pil_image_1)
)
view = View(
Item(
'image',
editor=ImageEditor()
)
)
# This should not fail.
with create_ui(obj1, dict(view=view)) as ui:
obj1.image = PILImage(image=pil_image_2)
def test_image_editor_none(self):
obj1 = ImageDisplay(image=None)
view = View(
Item(
'image',
editor=ImageEditor()
)
)
# This should not fail.
with create_ui(obj1, dict(view=view)) as ui:
pass
|
3,939 |
pid
|
from __future__ import annotations
import shlex
import string
import pwndbg.auxv
import pwndbg.commands
import pwndbg.gdblib.file
import pwndbg.gdblib.net
import pwndbg.gdblib.proc
import pwndbg.lib.cache
from pwndbg.color import message
from pwndbg.commands import CommandCategory
"""
PEDA prints it out like this:
exe = /bin/bash
fd[0] -> /dev/pts/96
fd[1] -> /dev/pts/96
fd[2] -> /dev/pts/96
pid = 31102
ppid = 31096
uid = [287138, 287138, 287138, 287138]
gid = [5000, 5000, 5000, 5000]
"""
capabilities = {
0: "CAP_CHOWN",
1: "CAP_DAC_OVERRIDE",
2: "CAP_DAC_READ_SEARCH",
3: "CAP_FOWNER",
4: "CAP_FSETID",
5: "CAP_KILL",
6: "CAP_SETGID",
7: "CAP_SETUID",
8: "CAP_SETPCAP",
9: "CAP_LINUX_IMMUTABLE",
10: "CAP_NET_BIND_SERVICE",
11: "CAP_NET_BROADCAST",
12: "CAP_NET_ADMIN",
13: "CAP_NET_RAW",
14: "CAP_IPC_LOCK",
15: "CAP_IPC_OWNER",
16: "CAP_SYS_MODULE",
17: "CAP_SYS_RAWIO",
18: "CAP_SYS_CHROOT",
19: "CAP_SYS_PTRACE",
20: "CAP_SYS_PACCT",
21: "CAP_SYS_ADMIN",
22: "CAP_SYS_BOOT",
23: "CAP_SYS_NICE",
24: "CAP_SYS_RESOURCE",
25: "CAP_SYS_TIME",
26: "CAP_SYS_TTY_CONFIG",
27: "CAP_MKNOD",
28: "CAP_LEASE",
29: "CAP_AUDIT_WRITE",
30: "CAP_AUDIT_CONTROL",
31: "CAP_SETFCAP",
32: "CAP_MAC_OVERRIDE",
33: "CAP_MAC_ADMIN",
34: "CAP_SYSLOG",
35: "CAP_WAKE_ALARM",
36: "CAP_BLOCK_SUSPEND",
}
class Process:
def __init__(self, METHOD_NAME=None, tid=None) -> None:
if METHOD_NAME is None:
METHOD_NAME = pwndbg.gdblib.proc.METHOD_NAME
if tid is None:
tid = pwndbg.gdblib.proc.tid
if not tid:
tid = METHOD_NAME
self.METHOD_NAME = METHOD_NAME
self.tid = tid
@property
@pwndbg.lib.cache.cache_until("stop")
def selinux(self):
path = "/proc/%i/task/%i/attr/current" % (self.METHOD_NAME, self.tid)
raw = pwndbg.gdblib.file.get(path)
return raw.decode().rstrip("\x00").strip()
@property
@pwndbg.lib.cache.cache_until("stop")
def cmdline(self):
raw = pwndbg.gdblib.file.get(f"/proc/{self.METHOD_NAME}/cmdline")
return " ".join(map(shlex.quote, raw.decode().split("\x00")))
@property
@pwndbg.lib.cache.cache_until("stop")
def cwd(self):
link = pwndbg.gdblib.file.readlink(f"/proc/{self.METHOD_NAME}/cwd")
return f"'{link}'"
@property
@pwndbg.lib.cache.cache_until("stop")
def status(self):
raw = pwndbg.gdblib.file.get("/proc/%i/task/%i/status" % (self.METHOD_NAME, self.tid))
status = {}
for line in raw.splitlines():
if not line:
continue
k_v = line.split(maxsplit=1)
if len(k_v) == 1:
k_v.append(b"")
k, v = k_v
k = k.decode("latin-1")
v = v.decode("latin-1")
k = k.lower().rstrip(":")
# bit fields
if set(v) < set(string.hexdigits) and len(v) == 16:
try:
v = int(v, 16)
except AttributeError:
pass
# vm stats
elif v.endswith(" kB"):
v = int(v.split()[0]) * (1 << 10)
elif v.endswith(" mB"):
v = int(v.split()[0]) * (1 << 20)
# misc integers like pid and ppid
elif str(v).isdigit():
v = int(v)
# uid and gid and groups
elif all(s.isdigit() for s in v.split()):
v = list(map(int, v.split()))
# capability sets
if k in ["capeff", "capinh", "capprm", "capbnd"]:
orig: int = v
v = []
for i in range(max(capabilities) + 1):
if (orig >> i) & 1 == 1:
v.append(capabilities[i])
status[k] = v
setattr(self, k, v)
return status
@property
@pwndbg.lib.cache.cache_until("stop")
def open_files(self):
fds = {}
for i in range(self.fdsize):
link = pwndbg.gdblib.file.readlink("/proc/%i/fd/%i" % (pwndbg.gdblib.proc.METHOD_NAME, i))
if link:
fds[i] = link
return fds
@property
@pwndbg.lib.cache.cache_until("stop")
def connections(self):
# Connections look something like this:
# socket:[102422]
fds = self.open_files
socket = "socket:["
result = []
functions = [pwndbg.gdblib.net.tcp, pwndbg.gdblib.net.unix, pwndbg.gdblib.net.netlink]
for fd, path in fds.items():
if socket not in path:
continue
inode = path[len(socket) : -1]
inode = int(inode)
for func in functions:
for x in func():
if x.inode == inode:
x.fd = fd
result.append(x)
return tuple(result)
@pwndbg.commands.ArgparsedCommand(
"Gets the pid.", aliases=["getpid"], category=CommandCategory.PROCESS
)
@pwndbg.commands.OnlyWhenRunning
def METHOD_NAME() -> None:
print(pwndbg.gdblib.proc.METHOD_NAME)
@pwndbg.commands.ArgparsedCommand(
"Display information about the running process.", category=CommandCategory.PROCESS
)
@pwndbg.commands.OnlyWhenRunning
def procinfo() -> None:
"""
Display information about the running process.
"""
if pwndbg.gdblib.qemu.is_qemu():
print(
message.error(
"QEMU target detected: showing result for the qemu process"
" - so it will be a bit inaccurate (excessive for the parts"
" used directly by the qemu process)"
)
)
exe = pwndbg.auxv.get()["AT_EXECFN"]
print("%-10s %r" % ("exe", exe))
proc = Process()
# qemu-usermode fail!
if not proc.status:
return
print("%-10s %s" % ("cmdline", proc.cmdline))
print("%-10s %s" % ("cwd", proc.cwd))
files = dict(proc.open_files)
for c in proc.connections:
files[c.fd] = str(c)
print("%-10s %s" % ("pid", proc.METHOD_NAME))
print("%-10s %s" % ("tid", proc.tid))
if proc.selinux != "unconfined":
print("%-10s %s" % ("selinux", proc.selinux))
print("%-10s %s" % ("ppid", proc.ppid))
if not pwndbg.gdblib.android.is_android():
print("%-10s %s" % ("uid", proc.uid))
print("%-10s %s" % ("gid", proc.gid))
print("%-10s %s" % ("groups", proc.groups))
else:
print("%-10s %s" % ("uid", list(map(pwndbg.lib.android.aid_name, proc.uid))))
print("%-10s %s" % ("gid", list(map(pwndbg.lib.android.aid_name, proc.gid))))
print("%-10s %s" % ("groups", list(map(pwndbg.lib.android.aid_name, proc.groups))))
for fd, path in files.items():
if not set(path) < set(string.printable):
path = repr(path)
print("%-10s %s" % ("fd[%i]" % fd, path))
return
|
3,940 |
create valid node values
|
from __future__ import annotations
import contextlib
import functools
import os
import pathlib
import subprocess
from dataclasses import dataclass
from typing import IO, TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union
from chia.data_layer.data_layer_util import NodeType, Side, Status
from chia.data_layer.data_store import DataStore
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
# from subprocess.pyi
_FILE = Union[None, int, IO[Any]]
if TYPE_CHECKING:
# these require Python 3.9 at runtime
os_PathLike_str = os.PathLike[str]
subprocess_CompletedProcess_str = subprocess.CompletedProcess[str]
else:
os_PathLike_str = os.PathLike
subprocess_CompletedProcess_str = subprocess.CompletedProcess
async def general_insert(
data_store: DataStore,
tree_id: bytes32,
key: bytes,
value: bytes,
reference_node_hash: bytes32,
side: Optional[Side],
) -> bytes32:
insert_result = await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=reference_node_hash,
side=side,
status=Status.COMMITTED,
)
return insert_result.node_hash
@dataclass(frozen=True)
class Example:
expected: Program
terminal_nodes: List[bytes32]
async def add_0123_example(data_store: DataStore, tree_id: bytes32) -> Example:
expected = Program.to(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
)
insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id)
c_hash = await insert(key=b"\x02", value=b"\x12\x02", reference_node_hash=None, side=None)
b_hash = await insert(key=b"\x01", value=b"\x11\x01", reference_node_hash=c_hash, side=Side.LEFT)
d_hash = await insert(key=b"\x03", value=b"\x13\x03", reference_node_hash=c_hash, side=Side.RIGHT)
a_hash = await insert(key=b"\x00", value=b"\x10\x00", reference_node_hash=b_hash, side=Side.LEFT)
return Example(expected=expected, terminal_nodes=[a_hash, b_hash, c_hash, d_hash])
async def add_01234567_example(data_store: DataStore, tree_id: bytes32) -> Example:
expected = Program.to(
(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
(
(
(b"\x04", b"\x14\x04"),
(b"\x05", b"\x15\x05"),
),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
),
)
insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id)
g_hash = await insert(key=b"\x06", value=b"\x16\x06", reference_node_hash=None, side=None)
c_hash = await insert(key=b"\x02", value=b"\x12\x02", reference_node_hash=g_hash, side=Side.LEFT)
b_hash = await insert(key=b"\x01", value=b"\x11\x01", reference_node_hash=c_hash, side=Side.LEFT)
d_hash = await insert(key=b"\x03", value=b"\x13\x03", reference_node_hash=c_hash, side=Side.RIGHT)
a_hash = await insert(key=b"\x00", value=b"\x10\x00", reference_node_hash=b_hash, side=Side.LEFT)
f_hash = await insert(key=b"\x05", value=b"\x15\x05", reference_node_hash=g_hash, side=Side.LEFT)
h_hash = await insert(key=b"\x07", value=b"\x17\x07", reference_node_hash=g_hash, side=Side.RIGHT)
e_hash = await insert(key=b"\x04", value=b"\x14\x04", reference_node_hash=f_hash, side=Side.LEFT)
return Example(expected=expected, terminal_nodes=[a_hash, b_hash, c_hash, d_hash, e_hash, f_hash, g_hash, h_hash])
@dataclass
class ChiaRoot:
path: pathlib.Path
scripts_path: pathlib.Path
def run(
self,
args: List[Union[str, os_PathLike_str]],
*other_args: Any,
check: bool = True,
encoding: str = "utf-8",
stdout: Optional[_FILE] = subprocess.PIPE,
stderr: Optional[_FILE] = subprocess.PIPE,
**kwargs: Any,
) -> subprocess_CompletedProcess_str:
# TODO: --root-path doesn't seem to work here...
kwargs.setdefault("env", {})
kwargs["env"]["CHIA_ROOT"] = os.fspath(self.path)
kwargs["env"]["CHIA_KEYS_ROOT"] = os.fspath(self.path)
# This is for windows
if "SYSTEMROOT" in os.environ:
kwargs["env"]["SYSTEMROOT"] = os.environ["SYSTEMROOT"]
modified_args: List[Union[str, os_PathLike_str]] = [
self.scripts_path.joinpath("chia"),
"--root-path",
self.path,
*args,
]
processed_args: List[str] = [os.fspath(element) for element in modified_args]
final_args = [processed_args, *other_args]
kwargs["check"] = check
kwargs["encoding"] = encoding
kwargs["stdout"] = stdout
kwargs["stderr"] = stderr
return subprocess.run(*final_args, **kwargs)
def read_log(self) -> str:
return self.path.joinpath("log", "debug.log").read_text(encoding="utf-8")
def print_log(self) -> None:
log_text: Optional[str]
try:
log_text = self.read_log()
except FileNotFoundError:
log_text = None
if log_text is None:
print(f"---- no log at: {self.path}")
else:
print(f"---- start of: {self.path}")
print(log_text)
print(f"---- end of: {self.path}")
@contextlib.contextmanager
def print_log_after(self) -> Iterator[None]:
try:
yield
finally:
self.print_log()
def METHOD_NAME(
node_type: NodeType,
left_hash: Optional[bytes32] = None,
right_hash: Optional[bytes32] = None,
) -> Dict[str, Any]:
if node_type == NodeType.INTERNAL:
return {
"hash": Program.to((left_hash, right_hash)).get_tree_hash_precalc(left_hash, right_hash),
"node_type": node_type,
"left": left_hash,
"right": right_hash,
"key": None,
"value": None,
}
elif node_type == NodeType.TERMINAL:
key = b""
value = b""
return {
"hash": Program.to((key, value)).get_tree_hash(),
"node_type": node_type,
"left": None,
"right": None,
"key": key,
"value": value,
}
raise Exception(f"Unhandled node type: {node_type!r}")
|
3,941 |
generate
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import is_apple_os
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
import os
required_conan_version = ">=1.53.0"
class SquirrelConan(ConanFile):
name = "squirrel"
description = "Squirrel is a high level imperative, object-oriented programming " \
"language, designed to be a light-weight scripting language that " \
"fits in the size, memory bandwidth, and real-time requirements " \
"of applications like video games."
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.squirrel-lang.org/"
topics = ("programming-language", "object-oriented", "scripting")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if Version(self.version) <= "3.1":
if is_apple_os(self):
raise ConanInvalidConfiguration(f"{self.ref} and earlier does not support Macos")
if self.settings.compiler == "clang":
compiler_version = Version(self.settings.compiler.version)
if compiler_version < "9" or compiler_version >= "11":
raise ConanInvalidConfiguration(
f"{self.ref} and earlier does not support Clang {compiler_version}"
)
if self.settings.compiler == "gcc":
compiler_version = Version(self.settings.compiler.version)
if compiler_version >= "12":
raise ConanInvalidConfiguration(
f"{self.ref} and earlier does not support gcc {compiler_version}"
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def METHOD_NAME(self):
tc = CMakeToolchain(self)
tc.variables["DISABLE_DYNAMIC"] = not self.options.shared
tc.variables["DISABLE_STATIC"] = self.options.shared
tc.METHOD_NAME()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
copy(self, pattern="COPYRIGHT", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "squirrel")
# CMakeDeps generator uses the global target if a downstream recipe depends on squirrel globally,
# and squirrel::squirrel is stolen by libsquirrel component (if shared) which doesn't depend on all other components.
# So this unofficial target is created as a workaround.
self.cpp_info.set_property("cmake_target_name", "squirrel::squirel-all-do-not-use")
suffix = "" if self.options.shared else "_static"
# squirrel
self.cpp_info.components["libsquirrel"].set_property("cmake_target_name", f"squirrel::squirrel{suffix}")
self.cpp_info.components["libsquirrel"].libs = [f"squirrel{suffix}"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["libsquirrel"].system_libs.append("m")
# sqstdlib
self.cpp_info.components["sqstdlib"].set_property("cmake_target_name", f"squirrel::sqstdlib{suffix}")
self.cpp_info.components["sqstdlib"].libs = [f"sqstdlib{suffix}"]
self.cpp_info.components["sqstdlib"].requires = ["libsquirrel"]
binpath = os.path.join(self.package_folder, "bin")
self.output.info(f"Appending PATH env var : {binpath}")
self.env_info.PATH.append(binpath)
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components["libsquirrel"].names["cmake_find_package"] = f"squirrel{suffix}"
self.cpp_info.components["libsquirrel"].names["cmake_find_package_multi"] = f"squirrel{suffix}"
self.cpp_info.components["sqstdlib"].names["cmake_find_package"] = f"sqstdlib{suffix}"
self.cpp_info.components["sqstdlib"].names["cmake_find_package_multi"] = f"sqstdlib{suffix}"
|
3,942 |
increment read counters
|
import errno
from leapp.libraries.actor import spamassassinconfigread_spamc
from leapp.libraries.common.testutils import make_IOError
class MockFileOperations(object):
def __init__(self, to_raise=None):
self.files = {}
self.files_read = {}
self.read_called = 0
self.to_raise = to_raise
def METHOD_NAME(self, path):
self.read_called += 1
self.files_read.setdefault(path, 0)
self.files_read[path] += 1
def read(self, path):
self.METHOD_NAME(path)
if self.to_raise is not None:
raise self.to_raise
try:
return self.files[path]
except KeyError:
raise make_IOError(errno.ENOENT)
def test_parse_spamc_ssl_argument():
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl sslv3')
assert value == 'sslv3'
# equal sign format
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl=tlsv1')
assert value == 'tlsv1'
def test_parse_spamc_ssl_argument_without_valid_argument():
# unknown argument
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl foo')
assert value is None
# --ssl followed by another option
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl -B')
assert value is None
# space surrounding the equal sign. This amounts to an unrecognized
# argument (empty string)
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl= tlsv1')
assert value is None
# space surrounding the equal sign. This amounts to an unrecognized
# argument ("=tlsv1")
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl =tlsv1')
assert value is None
def test_parse_spamc_ssl_argument_multiline():
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('-B --ssl \n sslv3 -c\n-H')
assert value == 'sslv3'
def test_parse_spamc_ssl_argument_tls_supersedes_ssl():
# Due to the way the spamc cmdline parser works, if '--ssl tlsv1' is
# specified, all other --ssl options are effectively ignored and tlsv1 is
# used.
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl sslv3 --ssl tlsv1')
assert value == 'tlsv1'
# reverse order
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl tlsv1 --ssl sslv3')
assert value == 'tlsv1'
def test_parse_spamc_ssl_argument_comment():
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('# --ssl tlsv1')
assert value is None
# comment mixed with actual option
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('# --ssl tlsv1\n--ssl sslv3')
assert value == 'sslv3'
# comment mixed with actual option
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl tlsv1\n# --ssl sslv3')
assert value == 'tlsv1'
def test_parse_spamc_ssl_argument_crazy_corner_cases():
# The option and value can have a comment in between
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl\n# foo\ntlsv1')
assert value == 'tlsv1'
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl\n# foo\n# bar\nsslv3')
assert value == 'sslv3'
value = spamassassinconfigread_spamc._parse_spamc_ssl_argument('--ssl\n# foo\n# tlsv1\nsslv3')
assert value == 'sslv3'
def test_get_spamc_ssl_argument():
path = '/etc/mail/spamassassin/spamc.conf'
fileops = MockFileOperations()
fileops.files[path] = '--ssl sslv3'
value = spamassassinconfigread_spamc.get_spamc_ssl_argument(fileops.read)
assert fileops.files_read == {path: 1}
assert value == 'sslv3'
def test_get_spamc_ssl_argument_empty():
path = '/etc/mail/spamassassin/spamc.conf'
fileops = MockFileOperations()
fileops.files[path] = ''
value = spamassassinconfigread_spamc.get_spamc_ssl_argument(fileops.read)
assert fileops.files_read == {path: 1}
assert value is None
def test_get_spamc_ssl_argument_nonexistent():
path = '/etc/mail/spamassassin/spamc.conf'
fileops = MockFileOperations()
value = spamassassinconfigread_spamc.get_spamc_ssl_argument(fileops.read)
assert fileops.files_read == {path: 1}
assert value is None
def test_get_spamc_ssl_argument_inaccessible():
path = '/etc/mail/spamassassin/spamc.conf'
fileops = MockFileOperations(to_raise=make_IOError(errno.EACCES))
value = spamassassinconfigread_spamc.get_spamc_ssl_argument(fileops.read)
assert fileops.files_read == {path: 1}
assert value is None
|
3,943 |
reload
|
import web
import datetime
from infogami.infobase import client
from openlibrary.core import helpers as h
import contextlib
__all__ = ["InvalidationProcessor"]
class InvalidationProcessor:
"""Application processor to invalidate/update locally cached documents.
The openlibrary application caches some documents like templates, macros,
javascripts etc. locally for variety of reasons. This class implements a
way to make sure those documents are kept up-to-date with the db within
some allowed constraints.
This implements a kind of lazy consistency, which guaranties the following:
* If a client makes an update, he will continue to see that update on
subsequent requests.
* If a client sees an update made by somebody else, he will continue to
see that update on subsequent requests.
* A client sees older version of a document no longer than the specified
timeout (in seconds) after the document is updated.
It means that the following conditions will never happen:
* A client edits a page and reloading the same page shows an older
version.
* A client loads a page and reloading the same page shows an older version.
* A client continue to see an older version of a document for very long time.
It is implemented as follows:
* If there is an update, set a cookie with time of the update as value.
* If the cookie timestamp is more than the last_poll_time, trigger reload.
* If the cookie timestamp is less than the last_update_time, set the
cookie with last_update_time.
* If the current time is more than timeout seconds since last_poll_time,
trigger reload.
When the reload is triggered:
* A request to the datebase is made to find list of documents modified after the last_poll_time.
* Trigger on_new_version event for each modified document. The application
code that is handling the caching must listen to that event and
invalidate/update its cached copy.
How to use::
from infogami.utils import delegate
from infogami.infobase import client
p = InvalidationProcessor(["/templates/", "/macros/"])
# install the application processor
delegate.app.add_processor(p)
# add the hook to get notifications when a document is modified
client.hooks.append(p.hook)
Glossary:
* cookie_timestamp: value of the invalidation cookie.
* last_poll_time: timestamp of the latest reload
* last_update_time: timestamp of the most recent update known to this
process.
"""
def __init__(self, prefixes, timeout=60, cookie_name="lastupdate"):
self.prefixes = prefixes
self.timeout = datetime.timedelta(0, timeout)
self.cookie_name = cookie_name
self.last_poll_time = datetime.datetime.utcnow()
self.last_update_time = self.last_poll_time
# set expire_time slightly more than timeout
self.expire_time = 3 * timeout
self.hook = _InvalidationHook(
prefixes=prefixes, cookie_name=cookie_name, expire_time=self.expire_time
)
def __call__(self, handler):
def t(date):
return date.isoformat().split("T")[-1]
cookie_time = self.get_cookie_time()
if self.is_timeout() or cookie_time and cookie_time > self.last_poll_time:
self.METHOD_NAME()
# last update in recent timeout seconds?
has_recent_update = (self.last_poll_time - self.last_update_time) < self.timeout
if has_recent_update and (
cookie_time is None or cookie_time < self.last_update_time
):
web.setcookie(
self.cookie_name,
self.last_update_time.isoformat(),
expires=self.expire_time,
)
return handler()
def is_timeout(self):
t = datetime.datetime.utcnow()
dt = t - self.last_poll_time
return dt > self.timeout
def get_cookie_time(self):
cookies = web.cookies()
if self.cookie_name in cookies:
return self.parse_datetime(cookies[self.cookie_name])
def parse_datetime(self, datestr):
try:
return h.parse_datetime(datestr)
except ValueError:
return None
def METHOD_NAME(self):
"""Triggers on_new_version event for all the documents modified since last_poll_time."""
t = datetime.datetime.utcnow()
reloaded = False
keys = []
for prefix in self.prefixes:
q = {
"key~": prefix + "*",
"last_modified>": self.last_poll_time.isoformat(),
"limit": 1000,
}
keys += web.ctx.site.things(q)
if keys:
web.ctx._invalidation_inprogress = True
docs = web.ctx.site.get_many(keys)
for doc in docs:
with contextlib.suppress(Exception):
client._run_hooks("on_new_version", doc)
self.last_update_time = max(doc.last_modified for doc in docs)
reloaded = True
del web.ctx._invalidation_inprogress
self.last_poll_time = t
return reloaded
class _InvalidationHook:
"""Infogami client hook to get notification on edits.
This sets a cookie when any of the documents under the given prefixes is modified.
"""
def __init__(self, prefixes, cookie_name, expire_time):
self.prefixes = prefixes
self.cookie_name = cookie_name
self.expire_time = expire_time
def __call__(self):
return self
def on_new_version(self, doc):
if web.ctx.get("_invalidation_inprogress"):
# This event is triggered from invalidation. ignore it.
return
if any(doc.key.startswith(prefix) for prefix in self.prefixes):
# The supplied does doesn't have the updated last_modified time.
# Fetch the document afresh to get the correct last_modified time.
doc = web.ctx.site.get(doc.key)
t = doc.last_modified
web.setcookie(self.cookie_name, t.isoformat(), expires=self.expire_time)
|
3,944 |
test wchar ptr
|
import unittest
from ctypes import *
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
self.assertEqual(a[0:2], b[0:2])
self.assertEqual(a[0:2:], b[0:2:])
self.assertEqual(len(a), len(b))
self.assertEqual(a[5:7], b[5:7])
self.assertEqual(a[5:7:], b[5:7:])
self.assertEqual(a[-1], b[-1])
self.assertEqual(a[:], b[:])
self.assertEqual(a[::], b[::])
self.assertEqual(a[10::-1], b[10::-1])
self.assertEqual(a[30:20:-1], b[30:20:-1])
self.assertEqual(a[:12:6], b[:12:6])
self.assertEqual(a[2:6:4], b[2:6:4])
a[0:5] = range(5, 10)
self.assertEqual(a[0:5], range(5, 10))
self.assertEqual(a[0:5:], range(5, 10))
self.assertEqual(a[4::-1], range(9, 4, -1))
def test_setslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
a[32:47] = range(32, 47)
self.assertEqual(a[32:47], range(32, 47))
a[32:47] = range(132, 147)
self.assertEqual(a[32:47:], range(132, 147))
a[46:31:-1] = range(232, 247)
self.assertEqual(a[32:47:1], range(246, 231, -1))
a[32:47] = range(1132, 1147)
self.assertEqual(a[:], b)
a[32:47:7] = range(3)
b[32:47:7] = range(3)
self.assertEqual(a[:], b)
a[33::-3] = range(12)
b[33::-3] = range(12)
self.assertEqual(a[:], b)
from operator import setslice, setitem
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, "abcde")
self.assertRaises(TypeError, setitem, a, slice(0, 5), "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, ["a", "b", "c", "d", "e"])
self.assertRaises(TypeError, setitem, a, slice(0, 5),
["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setslice, a, 0, 5, [1, 2, 3, 4, 3.14])
self.assertRaises(TypeError, setitem, a, slice(0, 5),
[1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of same size
self.assertRaises(ValueError, setslice, a, 0, 5, range(32))
self.assertRaises(ValueError, setitem, a, slice(0, 5), range(32))
def test_char_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:3], s[:3])
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
self.assertEqual(res[0:-1:-1], s[0::-1])
import operator
self.assertRaises(ValueError, operator.getitem,
res, slice(None, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(0, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(None, 5, -1))
self.assertRaises(ValueError, operator.getitem,
res, slice(-5, None, None))
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), u"abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], range(ord("a"), ord("z")+1))
self.assertEqual(res[:len(s):], range(ord("a"), ord("z")+1))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = "abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.assertEqual(res, s)
finally:
del dll.my_strdup.errcheck
def test_char_array(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.assertEqual(p[:], s)
self.assertEqual(p[::], s)
self.assertEqual(p[::-1], s[::-1])
self.assertEqual(p[5::-2], s[5::-2])
self.assertEqual(p[2:5:-3], s[2:5:-3])
try:
c_wchar
except NameError:
pass
else:
def METHOD_NAME(self):
s = u"abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
import operator
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), u"abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
return
res = dll.my_wcsdup(s)
tmpl = range(ord("a"), ord("z")+1)
self.assertEqual(res[:len(s)-1], tmpl)
self.assertEqual(res[:len(s)-1:], tmpl)
self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main()
|
3,945 |
list
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.DataBoxEdge/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.databoxedge.v2021_02_01.DataBoxEdgeManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""List all the supported operations.
List all the supported operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2021_02_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-02-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01"))
cls: ClsType[_models.OperationsList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationsList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/providers/Microsoft.DataBoxEdge/operations"}
|
3,946 |
test disconnect proxy port to block
|
import pytest
from gaphor import UML
from gaphor.diagram.connectors import Connector
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.SysML import sysml
from gaphor.SysML.blocks.block import BlockItem
from gaphor.SysML.blocks.connectors import BlockProperyProxyPortConnector
from gaphor.SysML.blocks.proxyport import ProxyPortItem
from gaphor.SysML.blocks.property import PropertyItem
from gaphor.UML.deployments import ConnectorItem
@pytest.fixture
def block_item(diagram, element_factory):
return diagram.create(BlockItem, subject=element_factory.create(sysml.Block))
@pytest.fixture
def property_item(diagram, element_factory):
type = element_factory.create(sysml.Block)
prop = diagram.create(PropertyItem, subject=element_factory.create(sysml.Property))
prop.subject.type = type
return prop
@pytest.fixture
def proxy_port_item(diagram):
return diagram.create(ProxyPortItem)
def connected_proxy_port_item(diagram, element_factory):
proxy_port_item = diagram.create(ProxyPortItem)
block_item = diagram.create(BlockItem, subject=element_factory.create(sysml.Block))
connector = Connector(block_item, proxy_port_item)
connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
return proxy_port_item
@pytest.fixture
def head_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def tail_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def other_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def connector_item(diagram):
return diagram.create(ConnectorItem)
def test_connection_is_allowed(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
assert isinstance(connector, BlockProperyProxyPortConnector)
assert connector.allow(proxy_port_item.handles()[0], block_item.ports()[0])
def test_connect_proxy_port_to_block(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
connected = connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
assert connected
assert proxy_port_item.subject
assert proxy_port_item.subject.encapsulatedClassifier is block_item.subject
assert proxy_port_item.subject in block_item.subject.ownedPort
def METHOD_NAME(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
connector.disconnect(proxy_port_item.handles()[0])
assert proxy_port_item.subject is None
assert proxy_port_item.diagram
def test_connect_proxy_port_to_property(property_item, proxy_port_item):
connector = Connector(property_item, proxy_port_item)
connected = connector.connect(
proxy_port_item.handles()[0], property_item.ports()[0]
)
assert connected
assert proxy_port_item.subject
assert proxy_port_item.subject.encapsulatedClassifier is property_item.subject.type
assert proxy_port_item.subject in property_item.subject.type.ownedPort
def test_allow_connector_to_proxy_port(
connector_item: ConnectorItem, head_proxy_port_item: ProxyPortItem
):
assert allow(connector_item, connector_item.handles()[0], head_proxy_port_item)
def test_connect_connector_on_one_end_to_proxy_port(
connector_item: ConnectorItem, head_proxy_port_item: ProxyPortItem
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
assert connector_item.subject is None
def test_connect_connector_on_both_ends_to_proxy_port(
connector_item: ConnectorItem,
head_proxy_port_item: ProxyPortItem,
tail_proxy_port_item: ProxyPortItem,
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
connect(connector_item, connector_item.handles()[1], tail_proxy_port_item)
assert connector_item.subject
assert head_proxy_port_item.subject in connector_item.subject.end[:].role
assert tail_proxy_port_item.subject in connector_item.subject.end[:].role
def test_disconnect_connector_from_proxy_port(
connector_item: ConnectorItem,
head_proxy_port_item: ProxyPortItem,
tail_proxy_port_item: ProxyPortItem,
element_factory,
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
connect(connector_item, connector_item.handles()[1], tail_proxy_port_item)
disconnect(connector_item, connector_item.handles()[0])
assert not connector_item.subject
assert element_factory.lselect(UML.Connector) == []
assert element_factory.lselect(UML.ConnectorEnd) == []
assert head_proxy_port_item.subject in element_factory.select(UML.Port)
assert tail_proxy_port_item.subject in element_factory.select(UML.Port)
|
3,947 |
put
|
import time
import logging
import functools
from typing import Dict
import tornado.web
from opentelemetry import trace
import numpy as np
from libertem.io.dataset import load, detect, get_dataset_cls
from .base import CORSMixin, log_message
from libertem.common.async_utils import sync_to_async
from .messages import Message
from .state import SharedState
log = logging.getLogger(__name__)
tracer = trace.get_tracer(__name__)
def prime_numba_cache(ds):
dtypes = (np.float32, None)
for dtype in dtypes:
roi = np.zeros(ds.shape.nav, dtype=bool).reshape((-1,))
roi[max(-ds._meta.sync_offset, 0)] = True
from libertem.udf.sum import SumUDF
from libertem.udf.raw import PickUDF
from libertem.io.corrections.corrset import CorrectionSet
from libertem.io.dataset.base import Negotiator
# need to have at least one UDF; here we run for both sum and pick
# to reduce the initial latency when switching to pick mode
udfs = [SumUDF(), PickUDF()]
neg = Negotiator()
if ds.supports_correction():
corr_dtypes = (np.float32, None)
else:
corr_dtypes = (None, )
for udf in udfs:
for corr_dtype in corr_dtypes:
if corr_dtype is not None:
corrections = CorrectionSet(dark=np.zeros(ds.shape.sig, dtype=corr_dtype))
else:
corrections = None
found_first_tile = False
for p in ds.get_partitions():
if found_first_tile:
break
p.set_corrections(corrections)
tiling_scheme = neg.get_scheme(
udfs=[udf],
dataset=ds,
approx_partition_shape=p.shape,
read_dtype=dtype,
roi=roi,
corrections=corrections,
)
for t in p.get_tiles(tiling_scheme=tiling_scheme, roi=roi):
found_first_tile = True
break
class DataSetDetailHandler(CORSMixin, tornado.web.RequestHandler):
def initialize(self, state: SharedState, event_registry):
self.state = state
self.dataset_state = state.dataset_state
self.event_registry = event_registry
async def delete(self, uuid):
try:
self.dataset_state[uuid]
except KeyError:
self.set_status(404, "dataset with uuid %s not found" % uuid)
return
await self.dataset_state.remove(uuid)
msg = Message(self.state).delete_dataset(uuid)
log_message(msg)
self.event_registry.broadcast_event(msg)
self.write(msg)
async def prime_numba_caches(self, ds):
executor = self.state.executor_state.get_executor()
log.info("starting warmup")
t0 = time.time()
# first: make sure the jited functions used for I/O are compiled
# by running a single-core workload on each host:
await executor.run_each_host(functools.partial(prime_numba_cache, ds=ds))
t1 = time.time()
# second: make sure each worker *process* has the jited functions
# loaded from the cache
# XXX doesn't seem to be needed actually!
# await executor.run_each_worker(functools.partial(prime_numba_cache, ds=ds))
# t2 = time.time()
log.info("warmup done, took %.3fs", (t1 - t0))
async def METHOD_NAME(self, uuid):
request_data: Dict = tornado.escape.json_decode(self.request.body)
params = request_data['dataset']['params']
params["type"] = ds_type = params["type"].upper()
cls = get_dataset_cls(ds_type)
ConverterCls = cls.get_msg_converter()
converter = ConverterCls()
try:
dataset_params = converter.to_python(params)
executor = self.state.executor_state.get_executor()
ds = await load(filetype=cls, executor=executor, enable_async=True, **dataset_params)
with tracer.start_as_current_span("prime_numba_caches"):
await self.prime_numba_caches(ds)
self.dataset_state.register(
uuid=uuid,
dataset=ds,
params=request_data['dataset'],
converted=dataset_params,
)
details = await self.dataset_state.serialize(dataset_id=uuid)
msg = Message(self.state).create_dataset(dataset=uuid, details=details)
log_message(msg)
self.write(msg)
self.event_registry.broadcast_event(msg)
except Exception as e:
if uuid in self.dataset_state:
await self.dataset_state.remove(uuid)
msg = Message(self.state).create_dataset_error(uuid, str(e))
log_message(msg, exception=True)
self.write(msg)
return
class DataSetDetectHandler(tornado.web.RequestHandler):
def initialize(self, state: SharedState, event_registry):
self.state = state
self.event_registry = event_registry
async def get(self):
path = self.request.arguments['path'][0].decode("utf8")
executor = self.state.executor_state.get_executor()
detected_params = await sync_to_async(
detect, path=path, executor=executor.ensure_sync()
)
if not detected_params:
msg = Message(self.state).dataset_detect_failed(path=path)
log_message(msg)
self.write(msg)
return
params = detected_params["parameters"]
info = {}
if "info" in detected_params:
info = detected_params["info"]
params.update({"type": detected_params["type"].upper()})
info.update({"type": detected_params["type"].upper()})
msg = Message(self.state).dataset_detect(params=params, info=info)
log_message(msg)
self.write(msg)
|
3,948 |
process pick points
|
from __future__ import absolute_import, division, print_function
# TODO:
# - cached scenes
from crys3d import hklview
import cctbx.miller.display
import wx
from math import sqrt
import sys
class hklview_2d (wx.Panel, cctbx.miller.display.render_2d) :
def __init__ (self, *args, **kwds) :
wx.Panel.__init__(self, *args, **kwds)
font = wx.Font(14, wx.MODERN, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftClick, self)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp, self)
self.Bind(wx.EVT_MOTION, self.OnMouseMotion, self)
self.scene = None
self.miller_array = None
self.parent = self.GetParent()
self.settings = self.parent.settings
self.was_dragged = False
self.initLeft = None, None
self._points_2d = []
self._radii_2d = []
self._clicked = None
def GetSize (self) :
return wx.Panel.GetSize(self)
# XXX silent keyword 'zoom=False' is for compatibility with view_3d.py
def set_miller_array (self, array, zoom=False, merge=None) :
self.miller_array = array
self.merge = merge
if (array is not None) :
self.construct_reciprocal_space(merge=merge)
def construct_reciprocal_space (self, merge=None) :
self.scene = hklview.scene(miller_array=self.miller_array,
merge=merge,
settings=self.settings)
self._clicked = None
self.setup_colors()
def update_settings (self) :
self.construct_reciprocal_space(merge=self.merge)
self.Refresh()
def get_color (self, c) :
return (int(c[0]*255), int(c[1]*255), int(c[2]*255))
def draw_line (self, canvas, x1, y1, x2, y2) :
gc = canvas
x_axis = gc.CreatePath()
x_axis.MoveToPoint(x1, y1)
x_axis.AddLineToPoint(x2, y2)
x_axis.CloseSubpath()
gc.SetPen(wx.Pen(self.get_color(self._foreground)))
gc.PushState()
gc.StrokePath(x_axis)
gc.PopState()
def draw_text (self, canvas, text, x, y) :
gc = canvas
gc.SetPen(wx.Pen(self.get_color(self._foreground)))
gc.DrawText(text, x, y)
def draw_open_circle (self, canvas, x, y, radius, color=None) :
gc = canvas
path = gc.CreatePath()
path.AddCircle(0, 0, radius)
path.CloseSubpath()
gc.PushState()
gc.Translate(x,y)
gc.SetBrush(wx.TRANSPARENT_BRUSH)
if (color is None) :
color = self._foreground
pen = wx.Pen(self.get_color(color))
gc.SetPen(pen)
gc.StrokePath(path)
gc.PopState()
def draw_filled_circle (self, canvas, x, y, radius, color) :
gc = canvas
path = gc.CreatePath()
path.AddCircle(0, 0, radius)
path.CloseSubpath()
gc.PushState()
gc.Translate(x,y)
if (color is None) :
color = self._foreground
pen = wx.Pen(self.get_color(color))
brush = wx.Brush(self.get_color(color))
gc.SetPen(pen)
gc.SetBrush(brush)
gc.FillPath(path)
gc.PopState()
def paint (self, gc) :
font = self.GetFont()
font.SetFamily(wx.FONTFAMILY_MODERN)
if (self.settings.black_background) :
gc.SetFont(gc.CreateFont(font, (255,255,255)))
else :
gc.SetFont(gc.CreateFont(font, (0,0,0)))
self.render(gc)
def save_screen_shot (self, **kwds) :
pass
def METHOD_NAME (self, x, y) :
context = wx.ClientDC( self )
w, h = self.GetClientSize()
if wx.VERSION[0] > 3:
bitmap = wx.Bitmap( w, h, -1 )
else:
# https://discuss.wxpython.org/t/wx-bitmap-x-y-and-wx-bitmap-create-x-y-vs-wx-emptybitmap-x-y/29463
bitmap = wx.EmptyBitmap(w, h, -1)
memory = wx.MemoryDC(bitmap)
memory.SelectObject(bitmap)
memory.Blit(0, 0, w, h, context, 0, 0)
memory.SelectObject(wx.NullBitmap)
if (wx.Platform == '__WXMAC__') :
pixelData = wx.AlphaPixelData(bitmap)
pixelAccessor = pixelData.GetPixels()
pixelAccessor.MoveTo(pixelData, x, y)
c = pixelAccessor.Get()
else :
c = memory.GetPixel(x, y)
bg = self.GetBackgroundColour()
self._clicked = None
min_dist = sys.maxsize
closest_hkl = None
for k, (x2,y2) in enumerate(self._points_2d) :
dist = sqrt((x2-x)**2 + (y2-y)**2)
if (dist <= (self._radii_2d[k] + 2)) :
self._clicked = k
break
hkl = d_min = value = None
if (self._clicked is not None) :
self.GetParent().update_clicked(self._clicked)
self.Refresh()
# placeholder - mimics gltbx.wx_viewer.wxGLWindow.fit_into_viewport
def fit_into_viewport (self) :
pass
# mimics gltbx.wx_viewer.wxGLWindow.save_screen_shot
def save_screen_shot (self, file_name, extensions=None) :
rect = self.GetRect()
if wx.VERSION[0] > 3:
bitmap = wx.Bitmap(rect.width, rect.height)
else:
# https://discuss.wxpython.org/t/wx-bitmap-x-y-and-wx-bitmap-create-x-y-vs-wx-emptybitmap-x-y/29463
bitmap = wx.EmptyBitmap(rect.width, rect.height)
memory_dc = wx.MemoryDC()
memory_dc.SelectObject(bitmap)
#memory_dc.SetBackgroundMode(wx.TRANSPARENT)
if (self.settings.black_background) :
memory_dc.SetBackground(wx.BLACK_BRUSH)
else :
memory_dc.SetBackground(wx.WHITE_BRUSH)
memory_dc.Clear()
gc = wx.GraphicsContext.Create(memory_dc)
self.paint(gc)
bitmap.SaveFile(file_name, wx.BITMAP_TYPE_PNG)
def OnPaint (self, event) :
if (self.scene is None) :
return
if (self.settings.black_background) :
self.SetBackgroundColour((0,0,0))
else :
self.SetBackgroundColour((255,255,255))
dc = wx.AutoBufferedPaintDCFactory(self)
if (self.settings.black_background) :
dc.SetBackground(wx.BLACK_BRUSH)
else :
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
self.paint(gc)
def OnLeftClick (self, evt) :
self.initLeft = evt.GetX(), evt.GetY()
def OnLeftUp (self, evt) :
x = evt.GetX()
y = evt.GetY()
if (not self.was_dragged) :
if (x == self.initLeft[0]) and (y == self.initLeft[1]) :
self.METHOD_NAME(x,y)
self.was_dragged = False
def OnMouseMotion (self, evt) :
if (not evt.Dragging()) :
return
elif (evt.LeftIsDown()) :
self.was_dragged = True
def OnChar (self, evt) :
pass
|
3,949 |
determine validation permission
|
"""empty message
Revision ID: c40e1fdf6b70
Revises: 84c793a951b2
Create Date: 2020-02-04 22:23:22.457001
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "c40e1fdf6b70"
down_revision = "84c793a951b2"
branch_labels = None
depends_on = None
class Determiner:
def determine_mapping_permission(self, val, reverse=False):
# restrict_mapping_level_to_project=True => LEVEL = 1
# restrict_mapping_level_to_project=False => ANY = 0
permissions = {True: 1, False: 0}
if reverse:
return list(permissions.keys())[list(permissions.values()).index(val)]
return permissions.get(val)
def METHOD_NAME(self, val, reverse=False):
# (restrict_validation_role=True, restrict_validation_level_intermediate=True) => TEAMS_LEVEL = 3
# (restrict_validation_role=True, restrict_validation_level_intermediate=False) => TEAMS = 2
# (restrict_validation_role=False, restrict_validation_level_intermediate=True) => LEVEL = 1
# (restrict_validation_role=False, restrict_validation_level_intermediate=False) => ANY = 0
permissions = {
"True,True": 3,
"True,False": 2,
"False,True": 1,
"False,False": 0,
}
if reverse:
return list(permissions.keys())[list(permissions.values()).index(val)]
return permissions.get(val)
def upgrade():
conn = op.get_bind()
conn.execute(sa.text("ALTER TABLE projects ADD mapping_permission Integer;"))
conn.execute(sa.text("ALTER TABLE projects ADD validation_permission Integer;"))
fetch_all_projects = "select id, restrict_mapping_level_to_project, \
restrict_validation_role, restrict_validation_level_intermediate from projects;"
all_projects = conn.execute(sa.text(fetch_all_projects))
for project in all_projects:
mapping_permission = None
validation_permission = None
project_id = project[0]
mapping_restriction = project[1]
validation_role_restriction = project[2]
validation_level_restriction = project[3]
# Map existing restrictions to V4 permission integers
d = Determiner()
mapping_permission = d.determine_mapping_permission(mapping_restriction)
validation_restriction = (
str(validation_role_restriction) + "," + str(validation_level_restriction)
)
validation_permission = d.METHOD_NAME(
validation_restriction
)
update_query = (
"update projects set mapping_permission = '"
+ str(mapping_permission)
+ "', validation_permission = '"
+ str(validation_permission)
+ "' where id = "
+ str(project_id)
)
op.execute(update_query)
op.drop_column("projects", "restrict_mapping_level_to_project")
op.drop_column("projects", "restrict_validation_role")
op.drop_column("projects", "restrict_validation_level_intermediate")
def downgrade():
conn = op.get_bind()
conn.execute(
sa.text("ALTER TABLE projects ADD restrict_mapping_level_to_project boolean;")
)
conn.execute(sa.text("ALTER TABLE projects ADD restrict_validation_role boolean;"))
conn.execute(
sa.text(
"ALTER TABLE projects ADD restrict_validation_level_intermediate boolean;"
)
)
fetch_all_projects = (
"select id, mapping_permission, validation_permission from projects;"
)
all_projects = conn.execute(sa.text(fetch_all_projects))
for project in all_projects:
project_id = project[0]
mapping_permission = project[1]
validation_permission = project[2]
mapping_restriction = False
validation_role_restriction = None
validation_level_restriction = None
# Reverse map V4 permission integers to V3 restrictions
d = Determiner()
try:
mapping_restriction = d.determine_mapping_permission(
mapping_permission, True
)
except Exception:
mapping_restriction = False
validation_restriction = d.METHOD_NAME(
validation_permission, True
).split(",")
validation_role_restriction = validation_restriction[0]
validation_level_restriction = validation_restriction[1]
update_query = (
"update projects set restrict_mapping_level_to_project = '"
+ str(mapping_restriction)
+ "', restrict_validation_role = '"
+ str(validation_role_restriction)
+ "', restrict_validation_level_intermediate = '"
+ str(validation_level_restriction)
+ "' where id = "
+ str(project_id)
)
op.execute(update_query)
op.drop_column("projects", "mapping_permission")
op.drop_column("projects", "validation_permission")
|
3,950 |
get alazar config
|
import unittest
with_alazar = True
def get_pulse():
from qupulse.pulses import TablePulseTemplate as TPT, SequencePulseTemplate as SPT, RepetitionPulseTemplate as RPT
ramp = TPT(identifier='ramp', channels={'out', 'trigger'})
ramp.add_entry(0, 'start', channel='out')
ramp.add_entry('duration', 'stop', 'linear', channel='out')
ramp.add_entry(0, 1, channel='trigger')
ramp.add_entry('duration', 1, 'hold', channel='trigger')
ramp.add_measurement_declaration('meas', 0, 'duration')
base = SPT([(ramp, dict(start='min', stop='max', duration='tau/3'), dict(meas='A')),
(ramp, dict(start='max', stop='max', duration='tau/3'), dict(meas='B')),
(ramp, dict(start='max', stop='min', duration='tau/3'), dict(meas='C'))], {'min', 'max', 'tau'})
repeated = RPT(base, 'n')
root = SPT([repeated, repeated, repeated], {'min', 'max', 'tau', 'n'})
return root
def METHOD_NAME():
from atsaverage import alazar
from atsaverage.config import ScanlineConfiguration, CaptureClockConfiguration, EngineTriggerConfiguration,\
TRIGInputConfiguration, InputConfiguration
trig_level = int((5 + 0.4) / 10. * 255)
assert 0 <= trig_level < 256
config = ScanlineConfiguration()
config.triggerInputConfiguration = TRIGInputConfiguration(triggerRange=alazar.TriggerRangeID.etr_5V)
config.triggerConfiguration = EngineTriggerConfiguration(triggerOperation=alazar.TriggerOperation.J,
triggerEngine1=alazar.TriggerEngine.J,
triggerSource1=alazar.TriggerSource.external,
triggerSlope1=alazar.TriggerSlope.positive,
triggerLevel1=trig_level,
triggerEngine2=alazar.TriggerEngine.K,
triggerSource2=alazar.TriggerSource.disable,
triggerSlope2=alazar.TriggerSlope.positive,
triggerLevel2=trig_level)
config.captureClockConfiguration = CaptureClockConfiguration(source=alazar.CaptureClockType.internal_clock,
samplerate=alazar.SampleRateID.rate_100MSPS)
config.inputConfiguration = 4*[InputConfiguration(input_range=alazar.InputRangeID.range_1_V)]
config.totalRecordSize = 0
assert config.totalRecordSize == 0
return config
def get_operations():
from atsaverage.operations import Downsample
return [Downsample(identifier='DS_A', maskID='A'),
Downsample(identifier='DS_B', maskID='B'),
Downsample(identifier='DS_C', maskID='C'),
Downsample(identifier='DS_D', maskID='D')]
def get_window(card):
from atsaverage.gui import ThreadedStatusWindow
window = ThreadedStatusWindow(card)
window.start()
return window
class TaborTests(unittest.TestCase):
@unittest.skip
def test_all(self):
from qupulse.hardware.feature_awg.tabor import TaborChannelTuple, TaborDevice
#import warnings
tawg = TaborDevice(r'USB0::0x168C::0x2184::0000216488::INSTR')
tchannelpair = TaborChannelTuple(tawg, (1, 2), 'TABOR_AB')
tawg.paranoia_level = 2
#warnings.simplefilter('error', Warning)
from qupulse.hardware.setup import HardwareSetup, PlaybackChannel, MarkerChannel
hardware_setup = HardwareSetup()
hardware_setup.set_channel('TABOR_A', PlaybackChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B', PlaybackChannel(tchannelpair, 1))
hardware_setup.set_channel('TABOR_A_MARKER', MarkerChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B_MARKER', MarkerChannel(tchannelpair, 1))
if with_alazar:
from qupulse.hardware.dacs.alazar import AlazarCard
import atsaverage.server
if not atsaverage.server.Server.default_instance.running:
atsaverage.server.Server.default_instance.start(key=b'guest')
import atsaverage.core
alazar = AlazarCard(atsaverage.core.getLocalCard(1, 1))
alazar.register_mask_for_channel('A', 0)
alazar.register_mask_for_channel('B', 0)
alazar.register_mask_for_channel('C', 0)
alazar.config = METHOD_NAME()
alazar.register_operations('test', get_operations())
window = get_window(atsaverage.core.getLocalCard(1, 1))
hardware_setup.register_dac(alazar)
repeated = get_pulse()
from qupulse.pulses.sequencing import Sequencer
sequencer = Sequencer()
sequencer.push(repeated,
parameters=dict(n=1000, min=-0.5, max=0.5, tau=192*3),
channel_mapping={'out': 'TABOR_A', 'trigger': 'TABOR_A_MARKER'},
window_mapping=dict(A='A', B='B', C='C'))
instruction_block = sequencer.build()
hardware_setup.register_program('test', instruction_block)
if with_alazar:
from atsaverage.masks import PeriodicMask
m = PeriodicMask()
m.identifier = 'D'
m.begin = 0
m.end = 1
m.period = 1
m.channel = 0
alazar._registered_programs['test'].masks.append(m)
hardware_setup.arm_program('test')
d = 1
|
3,951 |
calculate xpos
|
"""
This example uses the Coinbase open API to collect the current exchange rates.
Use Switch A to change to a different base exchange currency.
"""
import WIFI_CONFIG
from network_manager import NetworkManager
import uasyncio
import urequests
import time
import math
from stellar import StellarUnicorn
from picographics import PicoGraphics, DISPLAY_STELLAR_UNICORN as DISPLAY
import gc
URL = 'https://api.coinbase.com/v2/exchange-rates?currency={0}'
currencies = {"Bitcoin": "BTC", "Ethereun": "ETH", "Pound": "GBP", "Dollar": "USD", "Dogecoin": "DOGE"}
currency_keys = list(currencies.keys())
ref_currency_name = ""
currency_name = ""
currency_symbol = ""
currency_rate = ""
rate_keys = []
# display options
line_1_line = -1
line_2_line = 4
line_3_line = 9
ref_currency_index = 0
cycles_per_sequence = 200
su = StellarUnicorn()
graphics = PicoGraphics(DISPLAY)
# for Handling the wifi connection
def status_handler(mode, status, ip):
# reports wifi connection status
print(mode, status, ip)
print('Connecting to wifi...')
if status is not None:
if status:
print('Wifi connection successful!')
else:
print('Wifi connection failed!')
try:
network_manager = NetworkManager(WIFI_CONFIG.COUNTRY, status_handler=status_handler)
uasyncio.get_event_loop().run_until_complete(network_manager.client(WIFI_CONFIG.SSID, WIFI_CONFIG.PSK))
except Exception as e:
print(f'Wifi connection failed! {e}')
def get_data(currency_selected):
graphics.set_pen(graphics.create_pen(20, 20, 20))
graphics.clear()
graphics.set_pen(graphics.create_pen(100, 100, 100))
graphics.text("Get", 0, 0, scale=1)
graphics.text("data", 0, 7, scale=1)
su.update(graphics)
gc.collect()
# open the json file
print('Requesting URL:')
print(URL.format(currencies[currency_selected]))
r = urequests.get(URL.format(currencies[currency_selected]))
gc.collect()
# open the json data
data_obj = r.json()
print('Data obtained!')
r.close()
return data_obj
def METHOD_NAME(length, cycle):
cycle_phase = math.cos(math.pi * cycle / (cycles_per_sequence / 2))
pos_x = int((-(length / 2) * 10) - (length / 2) * 10 * cycle_phase)
return pos_x
def update_display(cycle):
graphics.set_pen(graphics.create_pen(20, 20, 20))
graphics.clear()
graphics.set_pen(graphics.create_pen(100, 0, 0))
graphics.text(ref_currency_name, METHOD_NAME((len(ref_currency_name)), cycle), line_1_line, scale=1)
graphics.set_pen(graphics.create_pen(100, 100, 0))
graphics.text(currency_symbol, METHOD_NAME((len(currency_symbol)), cycle), line_2_line, scale=1)
graphics.set_pen(graphics.create_pen(0, 100, 100))
graphics.text(currency_rate, METHOD_NAME((len(currency_rate)), cycle), line_3_line, scale=1)
def update_base_currency(index):
fetched_data = 0
global rates, rate_keys, currency_symbol, currency_rate, ref_currency_name
fetched_data = get_data(currency_keys[index])
rates = fetched_data['data']['rates']
rate_keys = list(rates.keys())
currency_symbol = rate_keys[index]
currency_rate = str(rates[rate_keys[index]])
ref_currency_name = "{0}-{1}".format(currency_keys[index], currencies[currency_keys[index]])
gc.collect()
update_base_currency(ref_currency_index)
update_display(0)
su.update(graphics)
cycle_count = 0
symbol_index = 0
print("Display {0} {1}".format(currency_symbol, currency_rate))
while 1:
if cycle_count > 4 * cycles_per_sequence:
cycle_count = 0
symbol_index += 1
if symbol_index > len(currency_keys):
symbol_index = 0
print("Display {0} {1}".format(currency_symbol, currency_rate))
currency_symbol = rate_keys[symbol_index]
currency_rate = rates[rate_keys[symbol_index]]
if (su.is_pressed(StellarUnicorn.SWITCH_A)):
ref_currency_index += 1
if (ref_currency_index > len(currency_keys)):
ref_currency_index = 0
update_base_currency(ref_currency_index)
if (su.is_pressed(StellarUnicorn.SWITCH_B)):
cycle_count = 0
symbol_index += 1
if symbol_index > len(rate_keys):
symbol_index = 0
currency_symbol = rate_keys[symbol_index]
currency_rate = rates[rate_keys[symbol_index]]
update_display(cycle_count)
su.update(graphics)
cycle_count += 1
time.sleep(0.1)
|
3,952 |
test show with default args
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from unittest.mock import MagicMock, Mock, patch
# Bokeh imports
from bokeh.application.application import Application
from bokeh.io.doc import curdoc
from bokeh.io.output import output_notebook
from bokeh.io.state import State, curstate
from bokeh.models import ColumnDataSource, GlyphRenderer, Plot
# Module under test
import bokeh.io.showing as bis # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@patch('bokeh.io.showing._show_with_state')
def METHOD_NAME(mock__show_with_state: MagicMock) -> None:
curstate().reset()
default_kwargs = dict(browser=None, new="tab", notebook_handle=False)
p = Plot()
bis.show(p, **default_kwargs)
assert mock__show_with_state.call_count == 1
assert mock__show_with_state.call_args[0] == (p, curstate(), None, "tab")
assert mock__show_with_state.call_args[1] == {'notebook_handle': False}
assert curdoc().roots == []
@patch('bokeh.io.showing._show_with_state')
def test_show_with_explicit_args(mock__show_with_state: MagicMock) -> None:
curstate().reset()
kwargs = dict(browser="browser", new="new", notebook_handle=True)
p = Plot()
bis.show(p, **kwargs)
assert mock__show_with_state.call_count == 1
assert mock__show_with_state.call_args[0] == (p, curstate(), "browser", "new")
assert mock__show_with_state.call_args[1] == {'notebook_handle': True}
assert curdoc().roots == []
@patch('bokeh.io.showing.run_notebook_hook')
def test_show_with_app(mock_run_notebook_hook: MagicMock, ipython) -> None:
curstate().reset()
app = Application()
output_notebook()
bis.show(app, notebook_url="baz")
assert curstate().notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 1
assert mock_run_notebook_hook.call_args[0][0] == curstate().notebook_type
assert mock_run_notebook_hook.call_args[0][1:] == ("app", app, curstate(), "baz")
assert mock_run_notebook_hook.call_args[1] == {}
@patch('bokeh.io.showing._show_with_state')
def test_show_doesn_not_adds_obj_to_curdoc(m) -> None:
curstate().reset()
assert curstate().document.roots == []
p = Plot()
bis.show(p)
assert curstate().document.roots == []
p = Plot()
bis.show(p)
assert curstate().document.roots == []
@pytest.mark.parametrize('obj', [1, 2.3, None, "str", GlyphRenderer(data_source=ColumnDataSource())])
def test_show_with_bad_object(obj) -> None:
with pytest.raises(ValueError):
bis.show(obj)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
@patch('bokeh.io.showing.run_notebook_hook')
@patch('bokeh.io.showing._show_file_with_state')
@patch('bokeh.io.showing.get_browser_controller')
def test__show_with_state_with_notebook(
mock_get_browser_controller: MagicMock,
mock__show_file_with_state: MagicMock,
mock_run_notebook_hook: MagicMock) -> None:
mock_get_browser_controller.return_value = "controller"
s = State()
p = Plot()
s.output_notebook()
bis._show_with_state(p, s, "browser", "new")
assert s.notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 1
assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False)
assert mock_run_notebook_hook.call_args[1] == {}
assert mock__show_file_with_state.call_count == 0
s.output_file("foo.html")
bis._show_with_state(p, s, "browser", "new")
assert s.notebook_type == "jupyter"
assert mock_run_notebook_hook.call_count == 2
assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False)
assert mock_run_notebook_hook.call_args[1] == {}
assert mock__show_file_with_state.call_count == 1
assert mock__show_file_with_state.call_args[0] == (p, s, "new", "controller")
assert mock__show_file_with_state.call_args[1] == {}
@patch('bokeh.io.notebook.get_comms')
@patch('bokeh.io.notebook.show_doc')
@patch('bokeh.io.showing._show_file_with_state')
@patch('bokeh.io.showing.get_browser_controller')
def test__show_with_state_with_no_notebook(
mock_get_browser_controller: MagicMock,
mock__show_file_with_state: MagicMock,
mock_show_doc: MagicMock,
mock_get_comms: MagicMock):
mock_get_browser_controller.return_value = "controller"
mock_get_comms.return_value = "comms"
s = State()
s.output_file("foo.html")
bis._show_with_state("obj", s, "browser", "new")
assert s.notebook_type is None
assert mock_show_doc.call_count == 0
assert mock__show_file_with_state.call_count == 1
assert mock__show_file_with_state.call_args[0] == ("obj", s, "new", "controller")
assert mock__show_file_with_state.call_args[1] == {}
@patch('os.path.abspath')
@patch('bokeh.io.showing.save')
def test(mock_save: MagicMock, mock_abspath: MagicMock):
controller = Mock()
mock_save.return_value = "savepath"
s = State()
s.output_file("foo.html")
bis._show_file_with_state("obj", s, "window", controller)
assert mock_save.call_count == 1
assert mock_save.call_args[0] == ("obj",)
assert mock_save.call_args[1] == {"state": s}
assert controller.open.call_count == 1
assert controller.open.call_args[0] == ("file://savepath",)
assert controller.open.call_args[1] == {"new": 1}
bis._show_file_with_state("obj", s, "tab", controller)
assert mock_save.call_count == 2
assert mock_save.call_args[0] == ("obj",)
assert mock_save.call_args[1] == {"state": s}
assert controller.open.call_count == 2
assert controller.open.call_args[0] == ("file://savepath",)
assert controller.open.call_args[1] == {"new": 2}
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
3,953 |
set enabled
|
from abc import ABC, abstractmethod
from decimal import ROUND_HALF_EVEN, ROUND_UP, Decimal
from System.Drawing import (
Color,
Point,
Size,
SystemColors,
)
from travertino.size import at_least
from toga_winforms.colors import native_color
class Scalable:
SCALE_DEFAULT_ROUNDING = ROUND_HALF_EVEN
def init_scale(self, native):
self.scale = native.CreateGraphics().DpiX / 96
# Convert CSS pixels to native pixels
def scale_in(self, value, rounding=SCALE_DEFAULT_ROUNDING):
return self.scale_round(value * self.scale, rounding)
# Convert native pixels to CSS pixels
def scale_out(self, value, rounding=SCALE_DEFAULT_ROUNDING):
if isinstance(value, at_least):
return at_least(self.scale_out(value.value, rounding))
else:
return self.scale_round(value / self.scale, rounding)
def scale_round(self, value, rounding):
return int(Decimal(value).to_integral(rounding))
class Widget(ABC, Scalable):
# In some widgets, attempting to set a background color with any alpha value other
# than 1 raises "System.ArgumentException: Control does not support transparent
# background colors". Those widgets should set this attribute to False.
_background_supports_alpha = True
def __init__(self, interface):
self.interface = interface
self.interface._impl = self
self._container = None
self.native = None
self.create()
self.init_scale(self.native)
self.interface.style.reapply()
@abstractmethod
def create(self):
...
def set_app(self, app):
# No special handling required
pass
def set_window(self, window):
# No special handling required
pass
@property
def container(self):
return self._container
@container.setter
def container(self, container):
if self._container:
self._container.remove_content(self)
self._container = container
if container:
container.add_content(self)
for child in self.interface.children:
child._impl.container = container
self.refresh()
def get_tab_index(self):
return self.native.TabIndex
def set_tab_index(self, tab_index):
self.native.TabIndex = tab_index
def get_enabled(self):
return self.native.Enabled
def METHOD_NAME(self, value):
self.native.Enabled = value
def focus(self):
self.native.Focus()
# APPLICATOR
def set_bounds(self, x, y, width, height):
self.native.Size = Size(*map(self.scale_in, (width, height)))
self.native.Location = Point(*map(self.scale_in, (x, y)))
def set_alignment(self, alignment):
# By default, alignment can't be changed
pass
def set_hidden(self, hidden):
self.native.Visible = not hidden
def set_font(self, font):
self.native.Font = font._impl.native
def set_color(self, color):
if color is None:
self.native.ForeColor = SystemColors.WindowText
else:
self.native.ForeColor = native_color(color)
def set_background_color(self, color):
if not hasattr(self, "_default_background"):
self._default_background = self.native.BackColor
if color is None:
self.native.BackColor = self._default_background
else:
win_color = native_color(color)
if (win_color != Color.Empty) and (not self._background_supports_alpha):
win_color = Color.FromArgb(255, win_color.R, win_color.G, win_color.B)
self.native.BackColor = win_color
# INTERFACE
def add_child(self, child):
child.container = self.container
def insert_child(self, index, child):
self.add_child(child)
def remove_child(self, child):
child.container = None
def refresh(self):
intrinsic = self.interface.intrinsic
intrinsic.width = intrinsic.height = None
self.rehint()
assert intrinsic.width is not None
assert intrinsic.height is not None
intrinsic.width = self.scale_out(intrinsic.width, ROUND_UP)
intrinsic.height = self.scale_out(intrinsic.height, ROUND_UP)
@abstractmethod
def rehint(self):
...
|
3,954 |
test fact
|
"""
:codeauthor: Rahul Handay <[email protected]>
"""
import errno
import os
import pytest
import salt.modules.puppet as puppet
import salt.utils.args
import salt.utils.files
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, mock_open, patch
@pytest.fixture
def configure_loader_modules():
return {puppet: {}}
@pytest.fixture
def puppet_config():
_puppet_config = """
---
confdir: "/etc/puppet"
rundir: "/var/run/puppetlabs"
vardir: "/var/lib/puppet"
"""
yield _puppet_config
def test_run(puppet_config):
"""
Test to execute a puppet run
"""
mock_empty_lst = []
mock = MagicMock(return_value={"A": "B"})
with patch.object(salt.utils.args, "clean_kwargs", mock):
cmd_run_all_mock = MagicMock(return_value={"retcode": 0})
cmd_run_mock = MagicMock(side_effect=[puppet_config, mock_empty_lst])
with patch.dict(
puppet.__salt__, {"cmd.run_all": cmd_run_all_mock, "cmd.run": cmd_run_mock}
):
ret = puppet.run()
assert ret
def test_noop():
"""
Test to execute a puppet noop run
"""
mock = MagicMock(return_value={"stderr": "A", "stdout": "B"})
with patch.object(puppet, "run", mock):
assert puppet.noop() == {"stderr": "A", "stdout": "B"}
def test_enable(puppet_config):
"""
Test to enable the puppet agent
"""
mock_empty_lst = []
cmd_run_mock = MagicMock(
side_effect=[puppet_config, puppet_config, puppet_config, mock_empty_lst]
)
with patch.dict(puppet.__salt__, {"cmd.run": cmd_run_mock}):
mock = MagicMock(return_value=True)
with patch.object(os.path, "isfile", mock):
mock = MagicMock(return_value=True)
with patch.object(os, "remove", mock):
assert puppet.enable()
with patch.object(os, "remove", MagicMock(side_effect=IOError)):
pytest.raises(CommandExecutionError, puppet.enable)
assert not puppet.enable()
def test_disable(puppet_config):
"""
Test to disable the puppet agent
"""
cmd_run_mock = MagicMock(return_value=puppet_config)
with patch.dict(puppet.__salt__, {"cmd.run": cmd_run_mock}):
mock = MagicMock(side_effect=[True, False])
with patch.object(os.path, "isfile", mock):
assert not puppet.disable()
with patch("salt.utils.files.fopen", mock_open()):
assert puppet.disable()
try:
with patch("salt.utils.files.fopen", mock_open()) as m_open:
m_open.side_effect = IOError(13, "Permission denied:", "/file")
pytest.raises(CommandExecutionError, puppet.disable)
except StopIteration:
pass
def test_status(puppet_config):
"""
Test to display puppet agent status
"""
cmd_run_mock = MagicMock(return_value=puppet_config)
with patch.dict(puppet.__salt__, {"cmd.run": cmd_run_mock}):
mock = MagicMock(side_effect=[True])
with patch.object(os.path, "isfile", mock):
assert puppet.status() == "Administratively disabled"
mock = MagicMock(side_effect=[False, True])
with patch.object(os.path, "isfile", mock):
with patch("salt.utils.files.fopen", mock_open(read_data="1")):
mock = MagicMock(return_value=True)
with patch.object(os, "kill", mock):
assert puppet.status() == "Applying a catalog"
mock = MagicMock(side_effect=[False, True])
with patch.object(os.path, "isfile", mock):
with patch("salt.utils.files.fopen", mock_open()):
mock = MagicMock(return_value=True)
with patch.object(os, "kill", mock):
assert puppet.status() == "Stale lockfile"
mock = MagicMock(side_effect=[False, False, True])
with patch.object(os.path, "isfile", mock):
with patch("salt.utils.files.fopen", mock_open(read_data="1")):
mock = MagicMock(return_value=True)
with patch.object(os, "kill", mock):
assert puppet.status() == "Idle daemon"
mock = MagicMock(side_effect=[False, False, True])
with patch.object(os.path, "isfile", mock):
with patch("salt.utils.files.fopen", mock_open()):
mock = MagicMock(return_value=True)
with patch.object(os, "kill", mock):
assert puppet.status() == "Stale pidfile"
mock = MagicMock(side_effect=[False, False, False])
with patch.object(os.path, "isfile", mock):
assert puppet.status() == "Stopped"
def test_summary(puppet_config):
"""
Test to show a summary of the last puppet agent run
"""
cmd_run_mock = MagicMock(return_value=puppet_config)
with patch.dict(puppet.__salt__, {"cmd.run": cmd_run_mock}):
with patch("salt.utils.files.fopen", mock_open(read_data="resources: 1")):
assert puppet.summary() == {"resources": 1}
permission_error = IOError(errno.EACCES, "Permission denied:", "/file")
with patch(
"salt.utils.files.fopen", mock_open(read_data=permission_error)
) as m_open:
pytest.raises(CommandExecutionError, puppet.summary)
def test_plugin_sync(puppet_config):
"""
Test to runs a plugin synch between the puppet master and agent
"""
cmd_run_mock = MagicMock(return_value=puppet_config)
with patch.dict(puppet.__salt__, {"cmd.run": cmd_run_mock}):
mock_lst = MagicMock(side_effect=[False, True])
with patch.dict(puppet.__salt__, {"cmd.run": mock_lst}):
assert puppet.plugin_sync() == ""
assert puppet.plugin_sync()
def test_facts():
"""
Test to run facter and return the results
"""
mock = MagicMock(return_value={"retcode": 0, "stdout": "1\n2"})
with patch.dict(puppet.__salt__, {"cmd.run_all": mock}):
mock = MagicMock(side_effect=[["a", "b"], ["c", "d"]])
with patch.object(puppet, "_format_fact", mock):
assert puppet.facts() == {"a": "b", "c": "d"}
def METHOD_NAME():
"""
Test to run facter for a specific fact
"""
mock = MagicMock(
side_effect=[
{"retcode": 0, "stdout": False},
{"retcode": 0, "stdout": True},
]
)
with patch.dict(puppet.__salt__, {"cmd.run_all": mock}):
assert puppet.fact("salt") == ""
assert puppet.fact("salt")
|
3,955 |
recover chunk
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from curvesnapshot_python import curvesnapshot
from config import config
from logger.logger import *
class CurveSnapshot:
def __init__(self):
rc = curvesnapshot.Init(config.client_conf)
logger.info("CurveSnapshot init success.")
if rc != 0:
print ("init CurveSnapshot client fail! rc=%s" % rc)
logger.error("init CurveSnapshot client fail! rc=%s" % rc)
raise AssertionError
def create_snapshot(self, file_path, user_name, password):
user = curvesnapshot.CUserInfo_t()
user.owner = user_name
user.password = password
seq = curvesnapshot.type_uInt64_t()
rc = curvesnapshot.CreateSnapShot(file_path, user, seq)
if rc != 0:
print ("create_snapshot fail! rc=%s" % rc)
logger.error("create_snapshot fail! rc=%s" % rc)
return rc
else:
return seq
def get_snapshot(self, file_path, user_name, password, seq):
user = curvesnapshot.CUserInfo_t()
user.owner = user_name
user.password = password
finfo = curvesnapshot.CFInfo_t()
rc = curvesnapshot.GetSnapShot(file_path, user, seq, finfo)
if rc != 0:
print ("get_snapshot fail! rc=%s" % rc)
logger.error("get_snapshot fail! rc=%s" % rc)
return rc
else:
logger.info("get_sanpshot_info , file snapshot info.status = %s, owner = %s, filename = %s, "
"length = %s, chunksize = %s, seqnum = %s, segmentsize = %s" % (
finfo.filestatus, finfo.owner, finfo.filename, finfo.length.value, finfo.chunksize.value,
finfo.seqnum.value, finfo.segmentsize.value))
return finfo
def get_snapshot_SegmentInfo(self, file_path, user_name, password, seq, offset):
user = curvesnapshot.CUserInfo_t()
user.owner = user_name
user.password = password
segInfo = curvesnapshot.CSegmentInfo_t()
rc = curvesnapshot.GetSnapshotSegmentInfo(file_path, user, seq, offset, segInfo)
if rc != 0:
logger.error("get_snapshot_SegmentInfo fail! rc=%s" % rc)
return rc
else:
return segInfo
def get_chunk_Info(self, chunkidinfo):
info = curvesnapshot.CChunkInfoDetail_t()
rc = curvesnapshot.GetChunkInfo(chunkidinfo, info)
if rc != 0:
print ("get_chunk_Info fail! rc=%s" % rc)
logger.error("get_chunk_Info fail! rc=%s" % rc)
return rc
else:
return info
def read_chunk_snapshot(self, idinfo, seq, offset, len, buf):
content = curvesnapshot.ReadChunkSnapshot(idinfo, seq, offset, len, buf)
return content
def delete_chunk_snapshot_or_correct_sn(self, idinfo, correctseq):
rc = curvesnapshot.DeleteChunkSnapshotOrCorrectSn(idinfo, correctseq)
if rc != 0:
print ("delete_chunk_snapshot_or_correct_sn fail! rc=%s" % rc)
logger.error("delete_chunk_snapshot_or_correct_sn fail! rc=%s" % rc)
return rc
def check_snapshot_status(self, file_path, user_name, password, seq):
user = curvesnapshot.CUserInfo_t()
user.owner = user_name
user.password = password
filestatus = curvesnapshot.type_uInt32_t()
rc = curvesnapshot.CheckSnapShotStatus(file_path, user, seq, filestatus)
if rc != 0:
print ("check_snapshot_status fail! rc=%s" % rc)
logger.error("check_snapshot_status fail! rc=%s" % rc)
return rc
else:
return filestatus
def delete_snapshot(self, file_path, user_name, password, seq):
user = curvesnapshot.CUserInfo_t()
user.owner = user_name
user.password = password
rc = curvesnapshot.DeleteSnapShot(file_path, user, seq)
if rc != 0:
print ("delete_snapshot fail! rc=%s" % rc)
logger.error("delete_snapshot fail! rc=%s" % rc)
return rc
def create_clone_chunk(self, file_path, chunkinfo, seq, correctseq, chunksize):
rc = curvesnapshot.CreateCloneChunk(file_path, chunkinfo, seq, correctseq, chunksize)
if rc != 0:
print ("create_clone_chunk fail! rc=%s" % rc)
logger.error("create_clone_chunk fail! rc=%s" % rc)
return rc
def METHOD_NAME(self, chunkinfo, offset, len):
rc = curvesnapshot.RecoverChunk(chunkinfo, offset, len)
if rc != 0:
print ("recover_chunk fail! rc=%s" % rc)
logger.error("recover_chunk fail! rc=%s" % rc)
return rc
def libcurve_uninit():
rc = curvesnapshot.UnInit()
if rc != None:
print "CurveSnapshot uninit fail! rc=%s" % rc
logger.error("CurveSnapshot uninit file fail! rc=%s" % rc)
return rc
raise AssertionError
else:
return 0
|
3,956 |
get output dir
|
"""
Run PyTorch cpu benchmarking.
"""
import json
import os
import re
import sys
import time
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
REPO_PATH = Path(__file__).absolute().parent.parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def list_metrics() -> List[str]:
return ["latencies", "throughputs", "cpu_peak_mem"]
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def validate(candidates, choices: List[str]):
"""Validate the candidates provided by the user is valid"""
if isinstance(candidates, List):
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
else:
assert candidates in choices, f"Specified {candidates}, but not in available list: {choices}."
return candidates
def METHOD_NAME(bm_name, test_date=None):
current_dir = Path(__file__).parent.absolute()
bm_out_dir = current_dir.parent.parent.joinpath(USERBENCHMARK_OUTPUT_PREFIX, bm_name)
test_date = test_date if test_date else datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
output_dir = bm_out_dir.joinpath("cpu-" + test_date)
output_dir.mkdir(exist_ok=True, parents=True)
return output_dir
def get_output_json(bm_name, metrics):
import torch
return {
"name": bm_name,
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
def dump_output(bm_name, output, output_dir=None, fname=None):
output_dir = output_dir if output_dir else METHOD_NAME(bm_name)
fname = fname if fname else "metrics-{}.json".format(os.getpid())
full_fname = os.path.join(output_dir, fname)
with open(full_fname, "w") as f:
json.dump(output, f, indent=4)
def get_run(test_dir: Path):
run = {}
testdir_name = test_dir.name
regex = "(.*)-(.*)"
g = re.match(regex, testdir_name).groups()
run["model"] = g[0]
run["test"] = g[1]
run["results"] = []
ins_jsons = filter(lambda x: x.is_file(), test_dir.iterdir())
for ins_json in ins_jsons:
with open(ins_json, "r") as ij:
run["results"].append(json.load(ij))
return run
def get_runs(work_dir: Path):
runs = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run = get_run(subdir)
runs.append(run)
return runs
def add_test_results(runs, result_metrics):
# metrics name examples:
# timm_regnet-eval_latency
# timm_regnet-eval_cmem
for run in runs:
run_base_name = f"{run['model']}-{run['test']}"
ins_number = len(run["results"])
assert ins_number
latency_metric = "latency" in run["results"][0]["metrics"]
throughput_metric = "throughput" in run["results"][0]["metrics"]
cmem_metric = "cpu_peak_mem" in run["results"][0]["metrics"]
latency_sum = 0
throughput_sum = 0
cmem_sum = 0
for ins_res in run["results"]:
if latency_metric:
latency_sum += ins_res["metrics"]["latency"]
if throughput_metric:
throughput_sum += ins_res["metrics"]["throughput"]
if cmem_metric:
cmem_sum += ins_res["metrics"]["cpu_peak_mem"]
if latency_metric:
result_metrics[f"{run_base_name}_latency"] = latency_sum / ins_number
if throughput_metric:
result_metrics[f"{run_base_name}_throughput"] = throughput_sum
if cmem_metric:
result_metrics[f"{run_base_name}_cmem"] = cmem_sum / ins_number
return result_metrics
def analyze(result_dir):
result_dir = Path(result_dir)
assert result_dir.is_dir(), f"Expected directory {str(result_dir)} doesn't exist."
result_metrics = {}
runs = get_runs(result_dir)
cpu_train = list(filter(lambda x: x["test"] == "train", runs))
if len(cpu_train):
add_test_results(cpu_train, result_metrics)
cpu_eval = list(filter(lambda x: x["test"] == "eval", runs))
if len(cpu_eval):
add_test_results(cpu_eval, result_metrics)
return result_metrics
|
3,957 |
test output
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def METHOD_NAME(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main()
|
3,958 |
tear down
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import tempfile
import unittest
import transaction
import ZODB.config
import ZODB.tests.util
from ZODB.POSException import ReadOnlyError
class ConfigTestBase(ZODB.tests.util.TestCase):
def _opendb(self, s):
return ZODB.config.databaseFromString(s)
def METHOD_NAME(self):
ZODB.tests.util.TestCase.METHOD_NAME(self)
if getattr(self, "storage", None) is not None:
self.storage.cleanup()
def _test(self, s):
db = self._opendb(s)
try:
self.storage = db._storage
# Do something with the database to make sure it works
cn = db.open()
rt = cn.root()
rt["test"] = 1
transaction.commit()
finally:
db.close()
class ZODBConfigTest(ConfigTestBase):
def test_map_config1(self):
self._test(
"""
<zodb>
<mappingstorage/>
</zodb>
""")
def test_map_config2(self):
self._test(
"""
<zodb>
<mappingstorage/>
cache-size 1000
</zodb>
""")
def test_file_config1(self):
path = tempfile.mktemp()
self._test(
"""
<zodb>
<filestorage>
path %s
</filestorage>
</zodb>
""" % path)
def test_file_config2(self):
path = tempfile.mktemp()
# first pass to actually create database file
self._test(
"""
<zodb>
<filestorage>
path %s
</filestorage>
</zodb>
""" % path)
# write operations must be disallowed on read-only access
cfg = """
<zodb>
<filestorage>
path %s
create false
read-only true
</filestorage>
</zodb>
""" % path
self.assertRaises(ReadOnlyError, self._test, cfg)
def test_demo_config(self):
cfg = """
<zodb unused-name>
<demostorage>
name foo
<mappingstorage/>
</demostorage>
</zodb>
"""
self._test(cfg)
def database_xrefs_config():
r"""
>>> db = ZODB.config.databaseFromString(
... "<zodb>\n<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
True
>>> db = ZODB.config.databaseFromString(
... "<zodb>\nallow-implicit-cross-references true\n"
... "<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
True
>>> db = ZODB.config.databaseFromString(
... "<zodb>\nallow-implicit-cross-references false\n"
... "<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
False
"""
def multi_atabases():
r"""If there are multiple codb sections -> multidatabase
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb Foo>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb>
... database-name Bar
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''')
>>> sorted(db.databases)
['', 'Bar', 'foo']
>>> db.database_name
''
>>> db.databases[db.database_name] is db
True
>>> db.databases['foo'] is not db
True
>>> db.databases['Bar'] is not db
True
>>> db.databases['Bar'] is not db.databases['foo']
True
Can't have repeats:
>>> ZODB.config.databaseFromString('''
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ZConfig.ConfigurationSyntaxError:
section names must not be re-used within the same container:'1' (line 9)
>>> ZODB.config.databaseFromString('''
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: database_name '' already in databases
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp,
METHOD_NAME=ZODB.tests.util.METHOD_NAME,
checker=ZODB.tests.util.checker))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(
ZODBConfigTest))
return suite
|
3,959 |
feature engineering expand basic
|
import logging
from functools import reduce
from typing import Dict
import talib.abstract as ta
from pandas import DataFrame
from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy
logger = logging.getLogger(__name__)
class freqai_test_multimodel_strat(IStrategy):
"""
Test strategy - used for testing freqAI multimodel functionalities.
DO not use in production.
"""
minimal_roi = {"0": 0.1, "240": -1}
plot_config = {
"main_plot": {},
"subplots": {
"prediction": {"prediction": {"color": "blue"}},
"target_roi": {
"target_roi": {"color": "brown"},
},
"do_predict": {
"do_predict": {"color": "brown"},
},
},
}
process_only_new_candles = True
stoploss = -0.05
use_exit_signal = True
startup_candle_count: int = 300
can_short = False
linear_roi_offset = DecimalParameter(
0.00, 0.02, default=0.005, space="sell", optimize=False, load=True
)
max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
metadata: Dict, **kwargs):
dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
return dataframe
def METHOD_NAME(self, dataframe: DataFrame, metadata: Dict, **kwargs):
dataframe["%-pct-change"] = dataframe["close"].pct_change()
dataframe["%-raw_volume"] = dataframe["volume"]
dataframe["%-raw_price"] = dataframe["close"]
return dataframe
def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
return dataframe
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
dataframe["&-s_close"] = (
dataframe["close"]
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
.mean()
/ dataframe["close"]
- 1
)
dataframe["&-s_range"] = (
dataframe["close"]
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
.max()
-
dataframe["close"]
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
.min()
)
return dataframe
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
self.freqai_info = self.config["freqai"]
dataframe = self.freqai.start(dataframe, metadata, self)
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
return dataframe
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
if enter_long_conditions:
df.loc[
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
] = (1, "long")
enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"]]
if enter_short_conditions:
df.loc[
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
] = (1, "short")
return df
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"] * 0.25]
if exit_long_conditions:
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"] * 0.25]
if exit_short_conditions:
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
return df
|
3,960 |
select
|
"""0MQ polling related functions and classes."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from typing import Any, Dict, List, Optional, Tuple
from zmq.backend import zmq_poll
from zmq.constants import POLLERR, POLLIN, POLLOUT
# -----------------------------------------------------------------------------
# Polling related methods
# -----------------------------------------------------------------------------
class Poller:
"""A stateful poll interface that mirrors Python's built-in poll."""
sockets: List[Tuple[Any, int]]
_map: Dict
def __init__(self) -> None:
self.sockets = []
self._map = {}
def __contains__(self, socket: Any) -> bool:
return socket in self._map
def register(self, socket: Any, flags: int = POLLIN | POLLOUT):
"""p.register(socket, flags=POLLIN|POLLOUT)
Register a 0MQ socket or native fd for I/O monitoring.
register(s,0) is equivalent to unregister(s).
Parameters
----------
socket : zmq.Socket or native socket
A zmq.Socket or any Python object having a ``fileno()``
method that returns a valid file descriptor.
flags : int
The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
If `flags=0`, socket will be unregistered.
"""
if flags:
if socket in self._map:
idx = self._map[socket]
self.sockets[idx] = (socket, flags)
else:
idx = len(self.sockets)
self.sockets.append((socket, flags))
self._map[socket] = idx
elif socket in self._map:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN | POLLOUT):
"""Modify the flags for an already registered 0MQ socket or native fd."""
self.register(socket, flags)
def unregister(self, socket: Any):
"""Remove a 0MQ socket or native fd for I/O monitoring.
Parameters
----------
socket : Socket
The socket instance to stop polling.
"""
idx = self._map.pop(socket)
self.sockets.pop(idx)
# shift indices after deletion
for socket, flags in self.sockets[idx:]:
self._map[socket] -= 1
def poll(self, timeout: Optional[int] = None) -> List[Tuple[Any, int]]:
"""Poll the registered 0MQ or native fds for I/O.
If there are currently events ready to be processed, this function will return immediately.
Otherwise, this function will return as soon the first event is available or after timeout
milliseconds have elapsed.
Parameters
----------
timeout : int
The timeout in milliseconds. If None, no `timeout` (infinite). This
is in milliseconds to be compatible with ``select.poll()``.
Returns
-------
events : list of tuples
The list of events that are ready to be processed.
This is a list of tuples of the form ``(socket, event_mask)``, where the 0MQ Socket
or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
It is common to call ``events = dict(poller.poll())``,
which turns the list of tuples into a mapping of ``socket : event_mask``.
"""
if timeout is None or timeout < 0:
timeout = -1
elif isinstance(timeout, float):
timeout = int(timeout)
return zmq_poll(self.sockets, timeout=timeout)
def METHOD_NAME(rlist: List, wlist: List, xlist: List, timeout: Optional[float] = None):
"""select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
Return the result of poll as a lists of sockets ready for r/w/exception.
This has the same interface as Python's built-in ``select.select()`` function.
Parameters
----------
timeout : float, int, optional
The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
compatible with ``select.select()``.
rlist : list of sockets/FDs
sockets/FDs to be polled for read events
wlist : list of sockets/FDs
sockets/FDs to be polled for write events
xlist : list of sockets/FDs
sockets/FDs to be polled for error events
Returns
-------
(rlist, wlist, xlist) : tuple of lists of sockets (length 3)
Lists correspond to sockets available for read/write/error events respectively.
"""
if timeout is None:
timeout = -1
# Convert from sec -> ms for zmq_poll.
# zmq_poll accepts 3.x style timeout in ms
timeout = int(timeout * 1000.0)
if timeout < 0:
timeout = -1
sockets = []
for s in set(rlist + wlist + xlist):
flags = 0
if s in rlist:
flags |= POLLIN
if s in wlist:
flags |= POLLOUT
if s in xlist:
flags |= POLLERR
sockets.append((s, flags))
return_sockets = zmq_poll(sockets, timeout)
rlist, wlist, xlist = [], [], []
for s, flags in return_sockets:
if flags & POLLIN:
rlist.append(s)
if flags & POLLOUT:
wlist.append(s)
if flags & POLLERR:
xlist.append(s)
return rlist, wlist, xlist
# -----------------------------------------------------------------------------
# Symbols to export
# -----------------------------------------------------------------------------
__all__ = ['Poller', 'select']
|
3,961 |
get node address
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.remote
import docker
import re
import os
import pathlib
import grp
import infra.github
import time
from loguru import logger as LOG
def is_docker_env():
"""Returns true if the process executing _this_ code already runs inside Docker"""
return os.path.isfile("/.dockerenv")
def is_azure_devops_env():
return "SYSTEM_TEAMFOUNDATIONCOLLECTIONURI" in os.environ
def map_azure_devops_docker_workspace_dir(workspace_dir):
return workspace_dir.replace("__w", "/mnt/vss/_work")
# Docker image name prefix
# To update when runtime images are pushed to ACR
MICROSOFT_REGISTRY_NAME = "mcr.microsoft.com"
DOCKER_IMAGE_NAME_PREFIX = "ccf/app/run"
# Network name
AZURE_DEVOPS_CONTAINER_NETWORK_ENV_VAR = "AGENT_CONTAINERNETWORK"
DOCKER_NETWORK_NAME_LOCAL = "ccf_test_docker_network"
# Identifier for all CCF test containers
CCF_TEST_CONTAINERS_LABEL = "ccf_test"
NODE_STARTUP_WRAPPER_SCRIPT = "docker_wrap.sh"
CONTAINER_IP_REPLACE_STR = "CONTAINER_IP"
def kernel_has_sgx_builtin():
with open("/proc/cpuinfo", "r", encoding="utf-8") as cpu_info:
f = re.compile("^flags.*sgx.*")
for line in cpu_info:
if f.match(line):
return True
return False
class DockerRemote(infra.remote.LocalRemote):
def _stop_container(self, container):
while True:
try:
container.stop()
container.remove()
LOG.info(f"Stopped container {container.name}")
break
except docker.errors.NotFound:
break
except docker.errors.APIError:
# Container may already be in the process of being cleaned up
time.sleep(0.5)
continue
@staticmethod
def make_host(host):
# Bind local RPC address to 0.0.0.0, so that it be can be accessed from outside container
for _, rpc_interface in host.rpc_interfaces.items():
rpc_interface.host = "0.0.0.0"
rpc_interface.public_host = CONTAINER_IP_REPLACE_STR
return host
@staticmethod
def METHOD_NAME(*args, **kwargs):
return CONTAINER_IP_REPLACE_STR
def __init__(
self,
*args,
host=None,
label=None,
local_node_id=None,
version=None,
binary_dir=".",
node_container_image=None,
**kwargs,
):
self.docker_client = docker.DockerClient()
self.container_ip = None # Assigned when container is started
self.host = host
self.binary_dir = binary_dir
# Sanitise container name, replacing illegal characters with underscores
self.container_name = f"{label}_{local_node_id}"
self.container_name = re.sub(r"[^a-zA-Z0-9_.-]", "_", self.container_name)
# Create network to connect all containers to (for n2n communication, etc.).
# In a Docker environment, use existing network (either the one provided by
# ADO or the one already created by the runner).
# Otherwise, create network on the fly.
if is_docker_env() and is_azure_devops_env():
network_name = os.environ[AZURE_DEVOPS_CONTAINER_NETWORK_ENV_VAR]
else:
network_name = DOCKER_NETWORK_NAME_LOCAL
try:
self.network = self.docker_client.networks.get(network_name)
except docker.errors.NotFound:
LOG.debug(f"Creating network {network_name}")
self.network = self.docker_client.networks.create(network_name)
# Stop and delete existing container(s)
if local_node_id == 0:
for c in self.docker_client.containers.list(
all=True, filters={"label": [CCF_TEST_CONTAINERS_LABEL, label]}
):
self._stop_container(c)
LOG.debug(
f'Network {self.network.name} [{self.network.attrs["IPAM"]["Config"][0]["Gateway"]}]'
)
# Group and device for kernel sgx builtin support (or not)
if kernel_has_sgx_builtin():
gid = grp.getgrnam("sgx_prv").gr_gid
devices = (
["/dev/sgx/enclave", "/dev/sgx/provision"]
if os.path.isdir("/dev/sgx")
else None
)
else:
gid = os.getgid()
devices = ["/dev/sgx"] if os.path.isdir("/dev/sgx") else None
# Mount workspace volume
cwd = str(pathlib.Path().resolve())
cwd_host = (
map_azure_devops_docker_workspace_dir(cwd) if is_azure_devops_env() else cwd
)
# Deduce container tag from node version
repo = infra.github.Repository()
if node_container_image is None:
node_container_image = (
f"{MICROSOFT_REGISTRY_NAME}/{DOCKER_IMAGE_NAME_PREFIX}:"
)
if version is not None:
node_container_image += version
else:
suffix = "sgx" if os.path.exists("/dev/sgx") else "virtual-clang15"
node_container_image += f"{infra.github.strip_release_tag_name(repo.get_latest_dev_tag())}-{suffix}"
try:
self.docker_client.images.get(node_container_image)
except docker.errors.ImageNotFound:
LOG.info(f"Pulling image {node_container_image}")
self.docker_client.images.pull(node_container_image)
super().__init__(*args, host=host, **kwargs)
self.command = (
f'./{NODE_STARTUP_WRAPPER_SCRIPT} "{super().get_cmd(include_dir=False)}"'
)
self.container = self.docker_client.containers.create(
node_container_image,
volumes={cwd_host: {"bind": cwd, "mode": "rw"}},
devices=devices,
command=self.command,
name=self.container_name,
init=True,
labels=[label, CCF_TEST_CONTAINERS_LABEL],
publish_all_ports=True,
user=f"{os.getuid()}:{gid}",
working_dir=self.root,
detach=True,
auto_remove=True,
)
self.network.connect(self.container)
LOG.debug(f"Created container {self.container_name} [{node_container_image}]")
def setup(self, use_links=False):
src_path = os.path.join(self.binary_dir, NODE_STARTUP_WRAPPER_SCRIPT)
super().setup(use_links=use_links)
super().cp(src_path, self.root)
def start(self):
LOG.info(self.command)
self.container.start()
self.container.reload() # attrs are cached
self.container_ip = self.container.attrs["NetworkSettings"]["Networks"][
self.network.name
]["IPAddress"]
for _, rpc_interface in self.host.rpc_interfaces.items():
rpc_interface.public_host = self.container_ip
self.hostname = self.container_ip
LOG.debug(f"Started container {self.container_name} [{self.container_ip}]")
def stop(self):
try:
self.container.stop()
LOG.info(f"Stopped container {self.container.name}")
except docker.errors.NotFound:
pass
def suspend(self):
self.container.pause()
def resume(self):
self.container.unpause()
def check_done(self):
try:
self.container.reload()
LOG.debug(self.container.attrs["State"])
return self.container.attrs["State"]["Status"] != "running"
except docker.errors.NotFound:
return True
|
3,962 |
get hostname from dn
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
POD_REGEX = re.compile('pod-([0-9]+)')
BD_REGEX = re.compile('/BD-([^/]+)/')
APP_REGEX = re.compile('/ap-([^/]+)/')
CEP_REGEX = re.compile('/cep-([^/]+)/')
EPG_REGEX = re.compile('/epg-([^/]+)/')
IP_REGEX = re.compile('/ip-([^/]+)/')
NODE_REGEX = re.compile('node-([0-9]+)')
def parse_capacity_tags(dn):
"""
This parses tags from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
tags = []
pod = get_pod_from_dn(dn)
if pod:
tags.append("fabric_pod_id:{}".format(pod))
node = get_node_from_dn(dn)
if node:
tags.append("node_id:{}".format(node))
return tags
def get_pod_from_dn(dn):
"""
This parses the pod from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
return _get_value_from_dn(POD_REGEX, dn)
def get_bd_from_dn(dn):
"""
This parses the bd from the dn designator. They look like this:
uni/tn-DataDog/BD-DataDog-BD1
"""
return _get_value_from_dn(BD_REGEX, dn)
def get_app_from_dn(dn):
"""
This parses the app from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h
"""
return _get_value_from_dn(APP_REGEX, dn)
def get_cep_from_dn(dn):
"""
This parses the cep from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/cep-00:50:56:9E:FB:48
"""
return _get_value_from_dn(CEP_REGEX, dn)
def get_epg_from_dn(dn):
"""
This parses the epg from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h
"""
return _get_value_from_dn(EPG_REGEX, dn)
def get_ip_from_dn(dn):
"""
This parses the ip from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/cep-00:50:56:9D:91:B5/ip-[10.10.10.17]
"""
return _get_value_from_dn(IP_REGEX, dn)
def get_node_from_dn(dn):
"""
This parses the node from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
return _get_value_from_dn(NODE_REGEX, dn)
def _get_value_from_dn(regex, dn):
if not dn:
return None
v = regex.search(dn)
if v:
return v.group(1)
else:
return None
def get_event_tags_from_dn(dn):
"""
This grabs the event tags from the dn designator. They look like this:
uni/tn-DataDog/ap-DtDg-AP1-EcommerceApp/epg-DtDg-Ecomm/HDl2IngrPktsAg1h
"""
tags = []
node = get_node_from_dn(dn)
if node:
tags.append("node:" + node)
app = get_app_from_dn(dn)
if app:
tags.append("app:" + app)
bd = get_bd_from_dn(dn)
if bd:
tags.append("bd:" + bd)
cep = get_cep_from_dn(dn)
if cep:
tags.append("mac:" + cep)
ip = get_ip_from_dn(dn)
if ip:
tags.append("ip:" + ip)
epg = get_epg_from_dn(dn)
if epg:
tags.append("epg:" + epg)
return tags
def METHOD_NAME(dn):
"""
This parses the hostname from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
pod = get_pod_from_dn(dn)
node = get_node_from_dn(dn)
if pod and node:
return "pod-{}-node-{}".format(pod, node)
else:
return None
def get_fabric_hostname(obj):
"""
This grabs the hostname from the object
The object looks something like this:
{
"dn": "topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min"
...
}
"""
attrs = get_attributes(obj)
dn = attrs['dn']
return METHOD_NAME(dn)
def get_attributes(obj):
"""
the json objects look like this:
{
"objType": {
"attributes": {
...
}
}
It always has the attributes nested below the object type
This helper provides a way of getting at the attributes
"""
if not obj or type(obj) is not dict:
return {}
keys = list(obj.keys())
if len(keys) > 0:
key = keys[0]
else:
return {}
key_obj = obj.get(key, {})
if type(key_obj) is not dict:
# if the object is not a dict
# it is probably already scoped to attributes
return obj
if key != "attributes":
attrs = key_obj.get('attributes')
if type(attrs) is not dict:
# if the attributes doesn't exist,
# it is probably already scoped to attributes
return obj
else:
# if the attributes exist, we return the value, except if it's not a dict type
attrs = key_obj
if type(attrs) is not dict:
return obj
return attrs
def check_metric_can_be_zero(metric_name, metric_value, json_attributes):
"""
When a counter is reset, don't send a zero because it will look bad on the graphs
This checks if the zero makes sense or not
"""
if "last" in metric_name.lower():
return True
if not metric_value:
return False
try:
if metric_value == 0 or metric_value == "0" or metric_value == "0.000000" or float(metric_value) == 0.0:
if not json_attributes or not json_attributes.get('cnt'):
return False
if json_attributes.get('cnt') == "0" or json_attributes.get('cnt') == 0:
return False
except ValueError:
return False
return True
|
3,963 |
get add identity
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.gmpy_math import invert, is_prime, powmod, tonelli, legendre
from federatedml.secureprotol.number_theory.field.base_galois_field import GaloisFieldElement, GaloisFieldArithmetic
class IntegersModuloPrimeElement(GaloisFieldElement):
"""
A realization of GF: integers modulo a prime
"""
def __init__(self, val, arithmetic=None):
"""
:param val: int
:param arithmetic: IntegersModuloPrimeArithmetic
"""
super(IntegersModuloPrimeElement, self).__init__()
if arithmetic is not None:
# might need rectification
self.val = arithmetic.rectify(val)
else:
# need no rectification
self.val = val
class IntegersModuloPrimeArithmetic(GaloisFieldArithmetic):
"""
For the finite field - integers modulo a prime
"""
def __init__(self, mod):
add_identity = IntegersModuloPrimeElement(0)
mul_identity = IntegersModuloPrimeElement(1)
super(IntegersModuloPrimeArithmetic, self).__init__(add_identity, mul_identity)
self.mod = mod # mod base
self._check_mod_prime()
def rectify(self, a):
"""
Rectify an out-of-range element back to this field
:param a: int
:return: int
"""
return a % self.mod
def add(self, a, b):
"""
:param a: IntegersModuloPrimeElement
:param b: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
if not isinstance(a, IntegersModuloPrimeElement) or not isinstance(b, IntegersModuloPrimeElement):
raise TypeError("Addition only supports IntegersModuloPrimeElement objects")
return IntegersModuloPrimeElement((a.val + b.val) % self.mod)
def neg(self, a):
"""
:param a: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
if not isinstance(a, IntegersModuloPrimeElement):
raise TypeError("Negative only supports IntegersModuloPrimeElement objects")
return IntegersModuloPrimeElement(self.mod - a.val)
def sub(self, a, b):
"""
:param a: IntegersModuloPrimeElement
:param b: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
return self.add(a, self.neg(b))
def mul(self, a, b):
"""
:param a: IntegersModuloPrimeElement
:param b: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
if isinstance(a, IntegersModuloPrimeElement) and isinstance(b, IntegersModuloPrimeElement):
return IntegersModuloPrimeElement((a.val * b.val) % self.mod)
elif isinstance(a, IntegersModuloPrimeElement) and isinstance(b, int):
if b == 0:
return self.add_identity
elif b < 0:
raise ValueError("Scalars in multiplication must be non-negative")
else:
return IntegersModuloPrimeElement((a.val * b) % self.mod)
elif isinstance(a, int) and isinstance(b, IntegersModuloPrimeElement):
if a == 0:
return self.add_identity
elif a < 0:
raise ValueError("Scalars in multiplication must be non-negative")
else:
return IntegersModuloPrimeElement((a * b.val) % self.mod)
else:
raise TypeError("Multiplication only supports two IntegersModuloPrimeElement objects" +
"one int plus one object")
def invert(self, a):
"""
:param a: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
if not isinstance(a, IntegersModuloPrimeElement):
raise TypeError("Invert only supports IntegersModuloPrimeElement objects")
return IntegersModuloPrimeElement(invert(a.val, self.mod))
def div(self, a, b):
"""
:param a: IntegersModuloPrimeElement
:return: IntegersModuloPrimeElement
"""
if not isinstance(a, IntegersModuloPrimeElement) or not isinstance(b, IntegersModuloPrimeElement):
raise TypeError("Division only supports IntegersModuloPrimeElement objects")
return self.mul(a, self.invert(b))
def pow(self, a, e):
"""
:param a: IntegersModuloPrimeElement
:param e: int
:return: IntegersModuloPrimeElement
"""
if not isinstance(a, IntegersModuloPrimeElement) or not isinstance(e, int):
raise TypeError("Power only supports IntegersModuloPrimeElement to the int's")
if e == 0:
return self.mul_identity
elif e < 0:
raise ValueError("Exponents in power must be non-negative")
else:
return IntegersModuloPrimeElement(powmod(a.val, e, self.mod))
def sqrt(self, a):
"""
sqrt(a) found by the Tonelli–Shanks algorithm
:param a: IntegersModuloPrimeElement
:return: Output -1 if a is not a quadratic residue, otherwise the correct square roots (root, -root)
Note root < self.mod / 2
"""
if not isinstance(a, IntegersModuloPrimeElement):
raise TypeError("Square root only supports an object")
if self.is_a_quadratic_residue(a):
root_raw = tonelli(a.val, self.mod)
root_raw_other = self.mod - root_raw
if root_raw < root_raw_other:
return IntegersModuloPrimeElement(root_raw), IntegersModuloPrimeElement(root_raw_other)
else:
return IntegersModuloPrimeElement(root_raw_other), IntegersModuloPrimeElement(root_raw)
else:
return -1, -1
def is_a_quadratic_residue(self, a):
"""
Check if a is a quadratic residue
:param a: IntegersModuloPrimeElement
:return:
"""
if not isinstance(a, IntegersModuloPrimeElement):
raise ValueError("Only check an object")
return legendre(a.val, self.mod) == 1
def is_positive(self, a):
"""
Check if a is positive in this field, i.e., if a < self.mod / 2
:param a: IntegersModuloPrimeElement
:return:
"""
return a.val < self.mod / 2
def _check_mod_prime(self):
if not is_prime(self.mod):
raise ValueError("Galois fields take only prime orders")
def METHOD_NAME(self):
return self.add_identity
def get_mul_identity(self):
return self.mul_identity
|
3,964 |
test tensor train cross 2
|
import tensorly as tl
import pytest
import numpy as np
import itertools
from .._tt_cross import tensor_train_cross
from ....tt_tensor import tt_to_tensor
from tensorly.testing import assert_
skip_if_backend = pytest.mark.skipif(
tl.get_backend() in ("tensorflow", "jax", "cupy"),
reason=f"Operation not supported in {tl.get_backend()}",
)
@skip_if_backend
def test_tensor_train_cross_1():
"""Test for tensor-train"""
## Test 1
# Create tensor with random elements
d = 3
n = 4
tensor = np.arange(n**d, dtype=float).reshape((n,) * d)
tensor = tl.tensor(tensor)
tensor_shape = tensor.shape
# Find TT decomposition of the tensor
rank = [1, 3, 3, 1]
factors = tensor_train_cross(
tensor, rank, tol=1e-5, n_iter_max=10, random_state=1234
)
assert len(factors) == d, "Number of factors should be 4, currently has " + str(
len(factors)
)
# Check that the ranks are correct and that the second mode of each factor
# has the correct number of elements
r_prev_iteration = 1
for k in range(d):
(r_prev_k, n_k, r_k) = factors[k].shape
assert tensor_shape[k] == n_k, (
"Mode 1 of factor "
+ str(k)
+ "needs "
+ str(tensor_shape[k])
+ " dimensions, currently has "
+ str(n_k)
)
assert r_prev_k == r_prev_iteration, " Incorrect ranks of factors "
r_prev_iteration = r_k
@skip_if_backend
def METHOD_NAME():
"""Test for tensor-train"""
rng = tl.check_random_state(1234)
## Test 2
# Create tensor with random elements
tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
# Find TT decomposition of the tensor
rank = [1, 2, 2, 3, 2, 2, 1]
factors = tensor_train_cross(tensor, rank, random_state=rng)
for k in range(6):
(r_prev, n_k, r_k) = factors[k].shape
first_error_message = (
"TT rank " + str(k) + " is greater than the maximum allowed "
)
first_error_message += str(r_prev) + " > " + str(rank[k])
assert r_prev <= rank[k], first_error_message
first_error_message = (
"TT rank " + str(k + 1) + " is greater than the maximum allowed "
)
first_error_message += str(r_k) + " > " + str(rank[k + 1])
assert r_k <= rank[k + 1], first_error_message
@skip_if_backend
@pytest.mark.skipif(
tl.get_backend() in ("mxnet"),
reason=f"MXNet bug in advanced indexing (Issue 18919).",
)
def test_tensor_train_cross_3():
"""Test for tensor-train"""
rng = tl.check_random_state(1234)
## Test 3
tol = 10e-5
tensor = tl.tensor(rng.random_sample([3, 3, 3]))
factors = tensor_train_cross(tensor, (1, 3, 3, 1), random_state=rng)
reconstructed_tensor = tt_to_tensor(factors)
error = tl.norm(reconstructed_tensor - tensor, 2)
error /= tl.norm(tensor, 2)
assert_(error < tol, "norm 2 of reconstruction higher than tol")
@skip_if_backend
def test_tensor_train_cross_4():
"""Test for tensor-train"""
# TEST 4
# Random tensor is not really compress-able. Test on a tensor as values of a function
def getEquispaceGrid(n_dim, rng, subdivisions):
"""
Returns a grid of equally-spaced points in the specified number of dimensions
n_dim : The number of dimensions to construct the tensor grid in
rng : The maximum dimension coordinate (grid starts at 0)
subdivisions: Number of subdivisions of the grid to construct
"""
return np.array(
[
np.array(range(subdivisions + 1)) * rng * 1.0 / subdivisions
for i in range(n_dim)
]
)
def evaluateGrid(grid, fcn):
"""
Loops over a grid in specified order and computes the specified function at each
point in the grid, returning a list of computed values.
"""
d, n = grid.shape
values = np.zeros(len(grid[0]) ** len(grid))
idx = 0
for permutation in itertools.product(range(len(grid[0])), repeat=len(grid)):
pt = np.array([grid[i][permutation[i]] for i in range(len(permutation))])
values[idx] = fcn(pt)
idx += 1
return values.reshape((n,) * d)
def func(X):
return sum(X) ** 3
tol = 1e-3
n = 10
d = 4
rng = 1
grid = getEquispaceGrid(d, rng, n)
value = evaluateGrid(grid, func)
value = tl.tensor(value)
# Find TT decomposition of the tensor
rank = [1, 4, 4, 4, 1]
factors = tensor_train_cross(value, rank, tol=tol, random_state=rng)
approx = tt_to_tensor(factors)
error = tl.norm(approx - value, 2)
error /= tl.norm(value, 2)
print(error)
assert_(error < 1e-5, "norm 2 of reconstruction higher than tol")
|
3,965 |
unsqueeze batch
|
#!/usr/bin/env python3
from abc import abstractmethod
from typing import List, Optional, Tuple, Union
import torch
from jaxtyping import Float
from torch import Tensor
from ..utils.getitem import _is_noop_index, _noop_index
from ._linear_operator import IndexType, LinearOperator
from .dense_linear_operator import to_linear_operator
class BlockLinearOperator(LinearOperator):
"""
An abstract LinearOperator class for block tensors.
Subclasses will determine how the different blocks are layed out
(e.g. block diagonal, sum over blocks, etc.)
BlockLinearOperators represent the groups of blocks as a batched Tensor.
The :attr:block_dim` attribute specifies which dimension of the base LinearOperator
specifies the blocks.
For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks.
A `b x k x n x n` tensor represents `k` `b x n x n` blocks.
Args:
- :attr:`base_linear_op` (LinearOperator or Tensor):
Must be at least 3 dimensional.
- :attr:`block_dim` (int):
The dimension that specifies blocks.
"""
def __init__(self, base_linear_op, block_dim=-3):
if base_linear_op.dim() < 3:
raise RuntimeError(
"base_linear_op must be a batch matrix (i.e. at least 3 dimensions - got "
"{}".format(base_linear_op.dim())
)
# Make sure block_dim is negative
block_dim = block_dim if block_dim < 0 else (block_dim - base_linear_op.dim())
# Everything is MUCH easier to write if the last batch dimension is the block dimension
# I.e. block_dim = -3
# We'll permute the dimensions if this is not the case
if block_dim != -3:
positive_block_dim = base_linear_op.dim() + block_dim
base_linear_op = base_linear_op._permute_batch(
*range(positive_block_dim),
*range(positive_block_dim + 1, base_linear_op.dim() - 2),
positive_block_dim,
)
super(BlockLinearOperator, self).__init__(to_linear_operator(base_linear_op))
self.base_linear_op = base_linear_op
@abstractmethod
def _add_batch_dim(self, other):
raise NotImplementedError
def _expand_batch(
self: Float[LinearOperator, "... M N"], batch_shape: Union[torch.Size, List[int]]
) -> Float[LinearOperator, "... M N"]:
batch_shape = torch.Size((*batch_shape, self.base_linear_op.size(-3)))
res = self.__class__(self.base_linear_op._expand_batch(batch_shape))
return res
def _getitem(self, row_index: IndexType, col_index: IndexType, *batch_indices: IndexType) -> LinearOperator:
# First the easy case: just batch indexing
if _is_noop_index(row_index) and _is_noop_index(col_index):
return self.__class__(self.base_linear_op._getitem(row_index, col_index, *batch_indices, _noop_index))
# If either of the dimensions are indices, it's too complicated - go with the base case
if not isinstance(row_index, slice) or not isinstance(col_index, slice):
# It's too complicated to deal with tensor indices in this case - we'll use the super method
return super()._getitem(row_index, col_index, *batch_indices)
# Now we know that row_index and col_index
num_blocks = self.num_blocks
num_rows, num_cols = self.matrix_shape
row_start, row_end, row_step = row_index.start or 0, row_index.stop or num_rows, row_index.step
col_start, col_end, col_step = col_index.start or 0, col_index.stop or num_cols, col_index.step
# If we have a step, it's too complicated - go with the base case
if row_step is not None or col_step is not None:
return super()._getitem(row_index, col_index, *batch_indices)
# Let's make sure that the slice dimensions perfectly correspond with the number of
# outputs per input that we have
# Otherwise - its too complicated. We'll go with the base case
if (row_start % num_blocks) or (col_start % num_blocks) or (row_end % num_blocks) or (col_end % num_blocks):
return super()._getitem(row_index, col_index, *batch_indices)
# Otherwise - let's divide the slices by the number of outputs per input
row_index = slice(row_start // num_blocks, row_end // num_blocks, None)
col_index = slice(col_start // num_blocks, col_end // num_blocks, None)
# Now we can try the super call!
new_base_linear_op = self.base_linear_op._getitem(row_index, col_index, *batch_indices)
# Now construct a kernel with those indices
return self.__class__(new_base_linear_op, block_dim=-3)
def _matmul(
self: Float[LinearOperator, "*batch M N"],
rhs: Union[Float[torch.Tensor, "*batch2 N C"], Float[torch.Tensor, "*batch2 N"]],
) -> Union[Float[torch.Tensor, "... M C"], Float[torch.Tensor, "... M"]]:
isvector = rhs.ndimension() == 1
if isvector:
rhs = rhs.unsqueeze(1)
rhs = self._add_batch_dim(rhs)
res = self.base_linear_op._matmul(rhs)
res = self._remove_batch_dim(res)
if isvector:
res = res.squeeze(-1)
return res
def _bilinear_derivative(self, left_vecs: Tensor, right_vecs: Tensor) -> Tuple[Optional[Tensor], ...]:
if left_vecs.ndim == 1:
left_vecs = left_vecs.unsqueeze(-1)
right_vecs = right_vecs.unsqueeze(-1)
# deal with left_vecs having batch dimensions
elif left_vecs.size(-1) != right_vecs.size(-1):
left_vecs = left_vecs.unsqueeze(-1)
left_vecs = self._add_batch_dim(left_vecs)
right_vecs = self._add_batch_dim(right_vecs)
res = self.base_linear_op._bilinear_derivative(left_vecs, right_vecs)
return res
def _permute_batch(self, *dims: int) -> LinearOperator:
if torch.is_tensor(self.base_linear_op):
base_linear_op = self.base_linear_op.permute(*dims, -3, -2, -1)
else:
base_linear_op = self.base_linear_op._permute_batch(*dims, self.base_linear_op.dim() - 3)
res = self.__class__(base_linear_op)
return res
def METHOD_NAME(self, dim: int) -> LinearOperator:
if torch.is_tensor(self.base_linear_op):
base_linear_op = self.base_linear_op.unsqueeze(dim)
else:
base_linear_op = self.base_linear_op.METHOD_NAME(dim)
res = self.__class__(base_linear_op)
return res
@abstractmethod
def _remove_batch_dim(self, other):
raise NotImplementedError
def _mul_constant(
self: Float[LinearOperator, "*batch M N"], other: Union[float, torch.Tensor]
) -> Float[LinearOperator, "*batch M N"]:
# We're using a custom method here - the constant mul is applied to the base_lazy tensor
# This preserves the block structure
from .constant_mul_linear_operator import ConstantMulLinearOperator
return self.__class__(ConstantMulLinearOperator(self.base_linear_op, other))
def _transpose_nonbatch(self: Float[LinearOperator, "*batch M N"]) -> Float[LinearOperator, "*batch N M"]:
base_op = self.base_linear_op
if isinstance(base_op, LinearOperator):
new_base_op = base_op._transpose_nonbatch()
else:
new_base_op = base_op.transpose(-1, -2)
return self.__class__(new_base_op)
def zero_mean_mvn_samples(
self: Float[LinearOperator, "*batch N N"], num_samples: int
) -> Float[Tensor, "num_samples *batch N"]:
res = self.base_linear_op.zero_mean_mvn_samples(num_samples)
res = self._remove_batch_dim(res.unsqueeze(-1)).squeeze(-1)
return res
|
3,966 |
increment event duration
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax.monitoring and jax._src.monitoring.
Verify that callbacks are registered/uregistered and invoked correctly to record
events.
"""
from absl.testing import absltest
from jax import monitoring
from jax._src import monitoring as jax_src_monitoring
class MonitoringTest(absltest.TestCase):
def test_record_event(self):
events = []
counters = {} # Map event names to frequency.
def increment_event_counter(event):
if event not in counters:
counters[event] = 0
counters[event] += 1
# Test that we can register multiple callbacks.
monitoring.register_event_listener(events.append)
monitoring.register_event_listener(increment_event_counter)
monitoring.record_event("test_unique_event")
monitoring.record_event("test_common_event")
monitoring.record_event("test_common_event")
self.assertListEqual(events, ["test_unique_event",
"test_common_event", "test_common_event"])
self.assertDictEqual(counters, {"test_unique_event": 1,
"test_common_event": 2})
def test_record_event_durations(self):
durations = {} # Map event names to frequency.
def METHOD_NAME(event, duration):
if event not in durations:
durations[event] = 0.
durations[event] += duration
monitoring.register_event_duration_secs_listener(METHOD_NAME)
monitoring.record_event_duration_secs("test_short_event", 1)
monitoring.record_event_duration_secs("test_short_event", 2)
monitoring.record_event_duration_secs("test_long_event", 10)
self.assertDictEqual(durations, {"test_short_event": 3,
"test_long_event": 10})
def test_unregister_exist_callback_success(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
self.assertNotIn(callback, original_duration_listeners)
monitoring.register_event_duration_secs_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_duration_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
jax_src_monitoring._unregister_event_duration_listener_by_callback(callback)
self.assertEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_not_exist_callback_fail(self):
callback = lambda event, durations: None
self.assertNotIn(callback,
jax_src_monitoring.get_event_duration_listeners())
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_callback(
callback)
def test_unregister_callback_index_in_range_success(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
self.assertNotIn(callback, original_duration_listeners)
monitoring.register_event_duration_secs_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_duration_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
jax_src_monitoring._unregister_event_duration_listener_by_index(-1)
self.assertEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_callback_index_out_of_range_fail(self):
size = len(jax_src_monitoring.get_event_duration_listeners())
# Verify index >= size raises AssertionError.
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_index(size)
# Verify index < -size raises AssertionError.
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_index(-size - 1)
def test_get_event_duration_listeners_returns_a_copy(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
original_duration_listeners.append(callback)
self.assertNotIn(callback, jax_src_monitoring.get_event_duration_listeners())
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_exist_event_callback_success(self):
original_event_listeners = jax_src_monitoring.get_event_listeners()
callback = lambda event: None
self.assertNotIn(callback, original_event_listeners)
monitoring.register_event_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_event_listeners,
jax_src_monitoring.get_event_listeners())
jax_src_monitoring._unregister_event_listener_by_callback(callback)
self.assertEqual(original_event_listeners,
jax_src_monitoring.get_event_listeners())
def test_unregister_not_exist_event_callback_fail(self):
callback = lambda event: None
self.assertNotIn(callback, jax_src_monitoring.get_event_listeners())
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_listener_by_callback(callback)
if __name__ == "__main__":
absltest.main()
|
3,967 |
test detect jboss fuse potential ent
|
"""Test the product fuse."""
from django.test import TestCase
from api.models import ServerInformation
from fingerprinter.jboss_fuse import detect_jboss_fuse, get_version
class ProductFuseTest(TestCase):
"""Tests Product Fuse class."""
def setUp(self):
"""Create test case setup."""
self.server_id = ServerInformation.create_or_retrieve_server_id()
def test_detect_jboss_fuse_present(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
}
facts = {
"eap_home_bin": {"opt/fuse/": ["jboss-fuse.jar"]},
"jboss_activemq_ver": ["redhat-630187"],
}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "present",
"version": ["Fuse-6.3.0"],
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
"raw_fact_key": "eap_home_bin/jboss_activemq_ver",
},
}
self.assertEqual(product, expected)
def test_detect_jboss_fuse_potential_init(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
}
facts = {"jboss_fuse_systemctl_unit_files": ["jboss_fuse_init"]}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "potential",
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
"raw_fact_key": "jboss_fuse_systemctl_unit_files",
},
}
self.assertEqual(product, expected)
def test_detect_jboss_fuse_potential_sub(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
}
facts = {"subman_consumed": [{"name": "JBoss Fuse Sub"}]}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "potential",
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
"raw_fact_key": "subman_consumed",
},
}
self.assertEqual(product, expected)
def METHOD_NAME(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "satellite",
}
facts = {"entitlements": [{"name": "JBoss Fuse Sub"}]}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "potential",
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "satellite",
"raw_fact_key": "entitlements",
},
}
self.assertEqual(product, expected)
def test_detect_jboss_fuse_absent(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "satellite",
}
facts = {"entitlements": [{"name": "Satellite Sub"}]}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "absent",
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "satellite",
"raw_fact_key": None,
},
}
self.assertEqual(product, expected)
def test_detect_fuse_present(self):
"""Test the detect_jboss_fuse method."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
}
facts = {
"eap_home_bin": {"opt/fuse/": ["jboss-fuse.jar"]},
"fuse_camel_version": ["redhat-630187"],
"jboss_fuse_on_eap_activemq_ver": [
{"homedir": "/foo/bin", "version": ["redhat-630187"]}
],
"jboss_cxf_ver": ["redhat-630187"],
}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "present",
"version": ["Fuse-6.3.0"],
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
"raw_fact_key": "eap_home_bin/fuse_camel_version/"
"jboss_cxf_ver/"
"jboss_fuse_on_eap_activemq_ver",
},
}
self.assertEqual(product, expected)
def test_detect_activemq_fuse_absent(self):
"""Test the detect_jboss_fuse method with activemq version found."""
source = {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
}
facts = {
"jboss_fuse_on_eap_activemq_ver": [
{"homedir": "/foo/bin", "version": ["redhat-630187"]}
]
}
product = detect_jboss_fuse(source, facts)
expected = {
"name": "JBoss Fuse",
"presence": "absent",
"metadata": {
"server_id": self.server_id,
"source_name": "source1",
"source_type": "network",
"raw_fact_key": None,
},
}
self.assertEqual(product, expected)
def test_get_version(self):
"""Test the get_version method."""
eap_camel = [{"homedir": "/foo/bin", "version": ["redhat-620133"]}]
versions = get_version(eap_camel)
expected = ["redhat-620133"]
self.assertEqual(versions, expected)
|
3,968 |
test list
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains code to test SageMaker ``Actions``"""
from __future__ import absolute_import
import datetime
import logging
import time
import pytest
from sagemaker.lineage import action
from sagemaker.lineage.query import LineageQueryDirectionEnum
def test_create_delete(action_obj):
# fixture does create and then delete, this test ensures it happens at least once
assert action_obj.action_arn
def test_create_delete_with_association(action_obj_with_association):
# fixture does create and then delete, this test ensures it happens at least once
assert action_obj_with_association.action_arn
def test_save(action_obj, sagemaker_session):
action_obj.description = "updated integration test description"
action_obj.status = "Completed"
action_obj.properties = {"k3": "v3"}
action_obj.properties_to_remove = ["k1"]
action_obj.save()
loaded = action.Action.load(
action_name=action_obj.action_name, sagemaker_session=sagemaker_session
)
assert "updated integration test description" == loaded.description
assert "Completed" == loaded.status
assert {"k3": "v3"} == loaded.properties
def test_load(action_obj, sagemaker_session):
assert action_obj.action_name
logging.info(f"loading {action_obj.action_name}")
loaded = action.Action.load(
action_name=action_obj.action_name, sagemaker_session=sagemaker_session
)
assert action_obj.action_arn == loaded.action_arn
def METHOD_NAME(action_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
action_names = [actn.action_name for actn in action_objs]
for sort_order in ["Ascending", "Descending"]:
action_names_listed = [
action_listed.action_name
for action_listed in action.Action.list(
created_after=now - slack,
created_before=now + slack,
sort_by="CreationTime",
sort_order=sort_order,
sagemaker_session=sagemaker_session,
)
if action_listed.action_name in action_names
]
if sort_order == "Descending":
action_names_listed = action_names_listed[::-1]
assert action_names == action_names_listed
# sanity check
assert action_names
@pytest.mark.timeout(30)
def test_tag(action_obj, sagemaker_session):
tag = {"Key": "foo", "Value": "bar"}
action_obj.set_tag(tag)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=action_obj.action_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert actual_tags[0] == tag
@pytest.mark.timeout(30)
def test_tags(action_obj, sagemaker_session):
tags = [{"Key": "foo1", "Value": "bar1"}]
action_obj.set_tags(tags)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=action_obj.action_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert [actual_tags[-1]] == tags
@pytest.mark.skip("data inconsistency P61661075")
def test_upstream_artifacts(static_model_deployment_action):
artifacts_from_query = static_model_deployment_action.artifacts(
direction=LineageQueryDirectionEnum.ASCENDANTS
)
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
@pytest.mark.skip("data inconsistency P61661075")
def test_downstream_artifacts(static_approval_action):
artifacts_from_query = static_approval_action.artifacts(
direction=LineageQueryDirectionEnum.DESCENDANTS
)
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
@pytest.mark.skip("data inconsistency P61661075")
def test_datasets(static_approval_action, static_dataset_artifact, sagemaker_session):
try:
sagemaker_session.sagemaker_client.add_association(
SourceArn=static_dataset_artifact.artifact_arn,
DestinationArn=static_approval_action.action_arn,
AssociationType="ContributedTo",
)
except Exception:
print("Source and Destination association already exists.")
time.sleep(3)
artifacts_from_query = static_approval_action.datasets()
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
assert artifact.artifact_type == "DataSet"
try:
sagemaker_session.sagemaker_client.delete_association(
SourceArn=static_dataset_artifact.artifact_arn,
DestinationArn=static_approval_action.action_arn,
)
except Exception:
pass
@pytest.mark.skip("data inconsistency P61661075")
def test_endpoints(static_approval_action):
endpoint_contexts_from_query = static_approval_action.endpoints()
assert len(endpoint_contexts_from_query) > 0
for endpoint in endpoint_contexts_from_query:
assert endpoint.context_type == "Endpoint"
assert "endpoint" in endpoint.context_arn
|
3,969 |
out prompt tokens
|
#!/usr/bin/env python
"""An example of how to embed an IPython shell into a running program.
Please see the documentation in the IPython.Shell module for more details.
The accompanying file embed_class_short.py has quick code fragments for
embedding which you can cut and paste in your code once you understand how
things work.
The code in this file is deliberately extra-verbose, meant for learning."""
# The basics to get you going:
# IPython injects get_ipython into builtins, so you can know if you have nested
# copies running.
# Try running this code both at the command line and from inside IPython (with
# %run example-embed.py)
from IPython.terminal.prompts import Prompts, Token
class CustomPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'In <'),
(Token.PromptNum, str(self.shell.execution_count)),
(Token.Prompt, '>: '),
]
def METHOD_NAME(self):
return [
(Token.OutPrompt, 'Out<'),
(Token.OutPromptNum, str(self.shell.execution_count)),
(Token.OutPrompt, '>: '),
]
from traitlets.config.loader import Config
try:
get_ipython
except NameError:
nested = 0
cfg = Config()
cfg.TerminalInteractiveShell.prompts_class=CustomPrompt
else:
print("Running nested copies of IPython.")
print("The prompts for the nested copy have been modified")
cfg = Config()
nested = 1
# First import the embeddable shell class
from IPython.terminal.embed import InteractiveShellEmbed
# Now create an instance of the embeddable shell. The first argument is a
# string with options exactly as you would type them if you were starting
# IPython at the system command line. Any parameters you want to define for
# configuration can thus be specified here.
ipshell = InteractiveShellEmbed(config=cfg,
banner1 = 'Dropping into IPython',
exit_msg = 'Leaving Interpreter, back to program.')
# Make a second instance, you can have as many as you want.
ipshell2 = InteractiveShellEmbed(config=cfg,
banner1 = 'Second IPython instance.')
print('\nHello. This is printed from the main controller program.\n')
# You can then call ipshell() anywhere you need it (with an optional
# message):
ipshell('***Called from top level. '
'Hit Ctrl-D to exit interpreter and continue program.\n'
'Note that if you use %kill_embedded, you can fully deactivate\n'
'This embedded instance so it will never turn on again')
print('\nBack in caller program, moving along...\n')
#---------------------------------------------------------------------------
# More details:
# InteractiveShellEmbed instances don't print the standard system banner and
# messages. The IPython banner (which actually may contain initialization
# messages) is available as get_ipython().banner in case you want it.
# InteractiveShellEmbed instances print the following information every time they
# start:
# - A global startup banner.
# - A call-specific header string, which you can use to indicate where in the
# execution flow the shell is starting.
# They also print an exit message every time they exit.
# Both the startup banner and the exit message default to None, and can be set
# either at the instance constructor or at any other time with the
# by setting the banner and exit_msg attributes.
# The shell instance can be also put in 'dummy' mode globally or on a per-call
# basis. This gives you fine control for debugging without having to change
# code all over the place.
# The code below illustrates all this.
# This is how the global banner and exit_msg can be reset at any point
ipshell.banner2 = 'Entering interpreter - New Banner'
ipshell.exit_msg = 'Leaving interpreter - New exit_msg'
def foo(m):
s = 'spam'
ipshell('***In foo(). Try %whos, or print s or m:')
print('foo says m = ',m)
def bar(n):
s = 'eggs'
ipshell('***In bar(). Try %whos, or print s or n:')
print('bar says n = ',n)
# Some calls to the above functions which will trigger IPython:
print('Main program calling foo("eggs")\n')
foo('eggs')
# The shell can be put in 'dummy' mode where calls to it silently return. This
# allows you, for example, to globally turn off debugging for a program with a
# single call.
ipshell.dummy_mode = True
print('\nTrying to call IPython which is now "dummy":')
ipshell()
print('Nothing happened...')
# The global 'dummy' mode can still be overridden for a single call
print('\nOverriding dummy mode manually:')
ipshell(dummy=False)
# Reactivate the IPython shell
ipshell.dummy_mode = False
print('You can even have multiple embedded instances:')
ipshell2()
print('\nMain program calling bar("spam")\n')
bar('spam')
print('Main program finished. Bye!')
|
3,970 |
test create endpoint no wait
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
from mock import MagicMock, Mock
import sagemaker
MODEL_NAME = "mymodelname"
ENDPOINT_CONFIG_NAME = "myendpointconfigname"
ENDPOINT_NAME = "myendpointname"
ROLE = "myimrole"
EXPANDED_ROLE = "arn:aws:iam::111111111111:role/ExpandedRole"
IMAGE = "myimage"
FULL_CONTAINER_DEF = {"Environment": {}, "Image": IMAGE, "ModelDataUrl": "s3://mybucket/mymodel"}
VPC_CONFIG = {"Subnets": ["subnet-foo"], "SecurityGroups": ["sg-foo"]}
INITIAL_INSTANCE_COUNT = 1
INSTANCE_TYPE = "ml.c4.xlarge"
ACCELERATOR_TYPE = "ml.eia.medium"
REGION = "us-west-2"
@pytest.fixture()
def sagemaker_session():
boto_mock = MagicMock(name="boto_session", region_name=REGION)
ims = sagemaker.Session(boto_session=boto_mock)
ims.expand_role = Mock(return_value=EXPANDED_ROLE)
return ims
def test_create_model(sagemaker_session):
returned_name = sagemaker_session.create_model(
name=MODEL_NAME, role=ROLE, container_defs=FULL_CONTAINER_DEF, vpc_config=VPC_CONFIG
)
assert returned_name == MODEL_NAME
sagemaker_session.sagemaker_client.create_model.assert_called_once_with(
ModelName=MODEL_NAME,
PrimaryContainer=FULL_CONTAINER_DEF,
ExecutionRoleArn=EXPANDED_ROLE,
VpcConfig=VPC_CONFIG,
)
def test_create_model_expand_primary_container(sagemaker_session):
sagemaker_session.create_model(name=MODEL_NAME, role=ROLE, container_defs=IMAGE)
_1, _2, create_model_kwargs = sagemaker_session.sagemaker_client.create_model.mock_calls[0]
assert create_model_kwargs["PrimaryContainer"] == {"Environment": {}, "Image": IMAGE}
def test_create_endpoint_config(sagemaker_session):
returned_name = sagemaker_session.create_endpoint_config(
name=ENDPOINT_CONFIG_NAME,
model_name=MODEL_NAME,
initial_instance_count=INITIAL_INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
)
assert returned_name == ENDPOINT_CONFIG_NAME
expected_pvs = [
{
"ModelName": MODEL_NAME,
"InitialInstanceCount": INITIAL_INSTANCE_COUNT,
"InstanceType": INSTANCE_TYPE,
"InitialVariantWeight": 1,
"VariantName": "AllTraffic",
}
]
sagemaker_session.sagemaker_client.create_endpoint_config.assert_called_once_with(
EndpointConfigName=ENDPOINT_CONFIG_NAME, ProductionVariants=expected_pvs, Tags=[]
)
def test_create_endpoint_config_with_accelerator(sagemaker_session):
returned_name = sagemaker_session.create_endpoint_config(
name=ENDPOINT_CONFIG_NAME,
model_name=MODEL_NAME,
initial_instance_count=INITIAL_INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
)
assert returned_name == ENDPOINT_CONFIG_NAME
expected_pvs = [
{
"ModelName": MODEL_NAME,
"InitialInstanceCount": INITIAL_INSTANCE_COUNT,
"InstanceType": INSTANCE_TYPE,
"InitialVariantWeight": 1,
"VariantName": "AllTraffic",
"AcceleratorType": ACCELERATOR_TYPE,
}
]
sagemaker_session.sagemaker_client.create_endpoint_config.assert_called_once_with(
EndpointConfigName=ENDPOINT_CONFIG_NAME, ProductionVariants=expected_pvs, Tags=[]
)
def METHOD_NAME(sagemaker_session):
returned_name = sagemaker_session.create_endpoint(
endpoint_name=ENDPOINT_NAME, config_name=ENDPOINT_CONFIG_NAME, wait=False
)
assert returned_name == ENDPOINT_NAME
sagemaker_session.sagemaker_client.create_endpoint.assert_called_once_with(
EndpointName=ENDPOINT_NAME, EndpointConfigName=ENDPOINT_CONFIG_NAME, Tags=[]
)
def test_create_endpoint_wait(sagemaker_session):
sagemaker_session.wait_for_endpoint = Mock()
returned_name = sagemaker_session.create_endpoint(
endpoint_name=ENDPOINT_NAME, config_name=ENDPOINT_CONFIG_NAME
)
assert returned_name == ENDPOINT_NAME
sagemaker_session.sagemaker_client.create_endpoint.assert_called_once_with(
EndpointName=ENDPOINT_NAME, EndpointConfigName=ENDPOINT_CONFIG_NAME, Tags=[]
)
sagemaker_session.wait_for_endpoint.assert_called_once_with(ENDPOINT_NAME)
|
3,971 |
random element
|
"""
Ring of pari objects
AUTHORS:
- William Stein (2004): Initial version.
- Simon King (2011-08-24): Use UniqueRepresentation, element_class and
proper initialisation of elements.
"""
# ****************************************************************************
# Copyright (C) 2004 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
# ****************************************************************************
import sage.libs.pari.all as pari
import sage.rings.ring as ring
from sage.structure.element import RingElement
from sage.structure.richcmp import richcmp
from sage.misc.fast_methods import Singleton
class Pari(RingElement):
"""
Element of Pari pseudo-ring.
"""
def __init__(self, x, parent=None):
"""
EXAMPLES::
sage: R = PariRing()
sage: f = R('x^3 + 1/2')
sage: f
x^3 + 1/2
sage: type(f)
<class 'sage.rings.pari_ring.PariRing_with_category.element_class'>
sage: loads(f.dumps()) == f
True
"""
if parent is None:
parent = _inst
RingElement.__init__(self, parent)
self.__x = pari.pari(x)
def __repr__(self):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3); a
3
"""
return str(self.__x)
def _add_(self, other):
"""
EXAMPLES::
sage: R = PariRing()
sage: b = R(11)
sage: a = R(3)
sage: a + b
14
"""
return self.__class__(self.__x + other.__x, parent=_inst)
def _sub_(self, other):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: b = R(11)
sage: b - a
8
"""
return self.__class__(self.__x - other.__x, parent=_inst)
def _mul_(self, other):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: b = R(11)
sage: b * a
33
"""
return self.__class__(self.__x * other.__x, parent=_inst)
def _div_(self, other):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: b = R(11)
sage: b / a
11/3
"""
return self.__x * (~other.__x)
def __neg__(self):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: -a
-3
"""
return self.__class__(-self.__x, parent=_inst)
def __pow__(self, other):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: a^2
9
"""
if not(other in PariRing()):
other = Pari(other)
return self.__class__(self.__x ** other.__x, parent=_inst)
def __invert__(self):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: ~a
1/3
"""
return self.__class__(~self.__x, parent=_inst)
def _richcmp_(self, other, op):
"""
EXAMPLES::
sage: R = PariRing()
sage: a = R(3)
sage: b = R(11)
sage: a < b
True
sage: a == b
False
sage: a > b
False
"""
return richcmp(self.__x, other.__x, op)
def __int__(self):
return int(self.__x)
class PariRing(Singleton, ring.Ring):
"""
EXAMPLES::
sage: R = PariRing(); R
Pseudoring of all PARI objects.
sage: loads(R.dumps()) is R
True
"""
Element = Pari
def __init__(self):
ring.Ring.__init__(self, self)
def __repr__(self):
return 'Pseudoring of all PARI objects.'
def _element_constructor_(self, x):
if isinstance(x, Pari):
return x
return self.element_class(x, parent=self)
def is_field(self, proof=True):
return False
def characteristic(self):
raise RuntimeError("Not defined.")
def METHOD_NAME(self, x=None, y=None, distribution=None):
"""
Return a random integer in Pari.
.. NOTE::
The given arguments are passed to ``ZZ.random_element(...)``.
INPUT:
- `x`, `y` -- optional integers, that are lower and upper bound
for the result. If only `x` is provided, then the result is
between 0 and `x-1`, inclusive. If both are provided, then the
result is between `x` and `y-1`, inclusive.
- `distribution` -- optional string, so that ``ZZ`` can make sense
of it as a probability distribution.
EXAMPLES::
sage: R = PariRing()
sage: R.random_element().parent() is R
True
sage: R(5) <= R.random_element(5,13) < R(13)
True
sage: R.random_element(distribution="1/n").parent() is R
True
"""
from sage.rings.integer_ring import ZZ
return self(ZZ.METHOD_NAME(x, y, distribution))
def zeta(self):
"""
Return -1.
EXAMPLES::
sage: R = PariRing()
sage: R.zeta()
-1
"""
return self(-1)
_inst = PariRing()
|
3,972 |
properties
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPolicyResult',
'AwaitableGetPolicyResult',
'get_policy',
'get_policy_output',
]
@pulumi.output_type
class GetPolicyResult:
"""
Policy model.
"""
def __init__(__self__, id=None, name=None, METHOD_NAME=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Gets or sets the Id of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets or sets the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> 'outputs.PolicyModelPropertiesResponse':
"""
Policy model properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.PolicyModelResponseSystemData':
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets or sets the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPolicyResult(GetPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyResult(
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
type=self.type)
def get_policy(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyResult:
"""
Gets the details of the policy.
Azure REST API version: 2021-02-16-preview.
:param str policy_name: The policy name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The vault name.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datareplication:getPolicy', __args__, opts=opts, typ=GetPolicyResult).value
return AwaitableGetPolicyResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_policy)
def get_policy_output(policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPolicyResult]:
"""
Gets the details of the policy.
Azure REST API version: 2021-02-16-preview.
:param str policy_name: The policy name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The vault name.
"""
...
|
3,973 |
get entity type
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper Field model classes for Ontology explorer."""
import re
from typing import List, Optional
from yamlformat.validator.entity_type_lib import EntityType
FQ_FIELD_NAME = re.compile(
r'(^[a-z]+[a-z0-9]*(?:_[a-z]+[a-z0-9]*)*)((?:_[0-9]+)+)?$')
class StandardField(object):
"""A class to represent a generic field without increment or optionality.
Args:
namespace_name: a field's defined namespace as a string.
standard_field_name: the un-incremented name of the field as a string.
must be lower-case and properly formatted.
increment: [Optional] a field's enumerated value suffixed onto the
field name.
Attributes:
namespace: the name of the namespace as a string
name: the field name as a string.
increment: a field's enumerated value suffixed onto the field name.
returns: An instance of the StandardField class.
"""
def __init__(self,
namespace_name: str,
standard_field_name: str,
increment: Optional[str] = ''):
super().__init__()
if not FQ_FIELD_NAME.match(standard_field_name + increment):
raise ValueError(
f'{namespace_name}/{standard_field_name}{increment} format error')
else:
self._namespace = namespace_name
self._name = standard_field_name
self._increment = increment
def __hash__(self):
return hash((self._namespace, self._name, self._increment))
def __eq__(self, other):
try:
namespace_eq = self._namespace == other.GetNamespaceName()
name_eq = self._name == other.GetStandardFieldName()
increment_eq = self._increment == other.GetIncrement()
return name_eq and namespace_eq and increment_eq
except AttributeError as ae:
print(ae)
def __repr__(self):
return f'{self._name}{self._increment}'
def GetNamespaceName(self) -> str:
"""Returns namespace variable as a string."""
return self._namespace
def GetStandardFieldName(self) -> str:
"""Returns the unqualified field name.
without any increment as a string
"""
return self._name
def GetIncrement(self) -> str:
"""Returns the EntityType Field's increment as a string."""
return self._increment
class EntityTypeField(StandardField):
"""A class to represent a field assigned to a type and extends StandardField.
Attributes:
namespace_name: a field's defined namespace as a string.
standard_field_name: the name of the field as a string.
increment: the increment of the field under a type as a string.
is_optional: optionality of the field relative to it's Entity Type as a
boolean.
Returns:
An instance of the EntityTypeField class.
"""
def __init__(self,
namespace_name: str,
standard_field_name: str,
is_optional: bool,
increment: Optional[str] = ''):
super().__init__(namespace_name, standard_field_name, increment)
self._is_optional = is_optional
def __hash__(self):
return hash(
(self._namespace, self._name, self._increment, self._is_optional))
def __eq__(self, other):
if isinstance(other, StandardField):
return super().__eq__(other)
elif not isinstance(other, self.__class__):
raise TypeError(
'{str(other)} and {str(self)} must be EntityTypeField objects')
else:
standard_eq = super().__eq__(other)
optional_eq = self._is_optional == other.IsOptional()
return standard_eq and optional_eq
def __str__(self):
standard_str = super().__str__()
optionality = 'optional' if self._is_optional else 'required'
return f'{standard_str}: {optionality}'
def IsOptional(self) -> bool:
"""Returns the optionality of the field as a boolean."""
return self._is_optional
class Match(object):
"""A data container class to hold the information about a match.
A match is between EntityTypeField objects and an EntityType object.
Attributes:
field_list: list of StandardField objects.
entity_type: An instance of EntityType class.
match_score: an integer in [0, 100] representing the closeness of match
between field_list and entity_type.
Returns:
An instance of the Match class
"""
def __init__(self, field_list: List[StandardField], entity_type: EntityType,
match_score: float):
"""Init.
Args:
field_list: a list of StandardField objects
entity_type: an entity type which implements a subset of field_list
match_score: the closeness of a match between field_list and entity type,
integer in [0, 100].
"""
super().__init__()
self._field_list = field_list
self._entity_type = entity_type
self._match_score = match_score
def __eq__(self, other):
field_eq = self._field_list == other.GetFieldList()
type_eq = self._entity_type == other.METHOD_NAME()
match_eq = self._match_score == other.GetMatchScore()
return field_eq and type_eq and match_eq
def __repr__(self):
return f'{self._entity_type.typename} -- score:{str(self._match_score)}'
def GetFieldList(self) -> List[EntityTypeField]:
"""Returns the list of EntityTypeField objects for a match."""
return self._field_list
def METHOD_NAME(self) -> EntityType:
"""Returns the entity type for a match."""
return self._entity_type
def GetMatchScore(self) -> int:
"""Returns the score in [0, 100] for a match."""
return self._match_score
def StandardizeField(field: EntityTypeField) -> StandardField:
return StandardField(field.GetNamespaceName(),
field.GetStandardFieldName(),
field.GetIncrement())
|
3,974 |
read config event
|
"""
Copyright 2018 Grid Singularity
This file is part of Grid Singularity Exchange.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from enum import Enum
from logging import getLogger
from typing import Dict, Optional, TYPE_CHECKING
from cached_property import cached_property
from gsy_e.gsy_e_core.exceptions import GSyException
from gsy_e.gsy_e_core.util import TaggedLogWrapper
log = getLogger(__name__)
if TYPE_CHECKING:
from gsy_e.models.area import Area
from gsy_e.models.strategy.state import StateInterface
class AssetType(Enum):
"""Enum class for defining the asset type"""
PRODUCER = 0
CONSUMER = 1
PROSUMER = 2
UNDEFINED = 3
class AreaBehaviorBase:
"""Base class used by area behaviour defining classes `BaseStrategy`"""
def __init__(self):
# `area` is the area we trade in
self.area: Optional["Area"] = None
# `owner` is the area of which we are the strategy, usually a child of `area`
self.owner: Optional["Area"] = None
@cached_property
def _log(self):
return TaggedLogWrapper(log, f"{self.owner.name}:{self.__class__.__name__}")
@property
def log(self):
"""Select the appropriate logger for the strategy logs"""
if not self.owner:
log.warning("Logging without area in %s, using default logger",
self.__class__.__name__)
return log
return self._log
def event_on_disabled_area(self) -> None:
"""Override to execute actions on disabled areas on every market cycle"""
def METHOD_NAME(self) -> None:
"""Override to deal with events that update the SimulationConfig object"""
def _read_or_rotate_profiles(self, reconfigure: bool = False) -> None:
"""Override to define how the strategy will read or rotate its profiles"""
raise NotImplementedError
def deactivate(self) -> None:
"""Handles deactivate event"""
def area_reconfigure_event(self, *args, **kwargs) -> None:
"""Reconfigure the strategy properties at runtime using the provided arguments.
This method is triggered when the strategy is updated while the simulation is
running. The update can happen via live events (triggered by the user) or scheduled events.
"""
raise NotImplementedError
@property
def state(self) -> "StateInterface":
"""Get the state class of the strategy. Needs to be implemented by all strategies"""
raise NotImplementedError
def get_state(self) -> Dict:
"""Retrieve the current state object of the strategy in dict format."""
try:
return self.state.get_state()
except AttributeError as ex:
raise GSyException(
"Strategy does not have a state. "
"State is required to support save state functionality.") from ex
def restore_state(self, saved_state: Dict) -> None:
"""Restore the current state object of the strategy from dict format."""
try:
self.state.restore_state(saved_state)
except AttributeError as ex:
raise GSyException(
"Strategy does not have a state. "
"State is required to support load state functionality.") from ex
@property
def asset_type(self):
"""Return the asset type of the strategy. Should be implemented by all children."""
raise NotImplementedError
@staticmethod
def deserialize_args(constructor_args: Dict) -> Dict:
"""Deserialize the constructor arguments."""
return constructor_args
|
3,975 |
load
|
"""AppArmor control for host."""
from __future__ import annotations
import logging
from pathlib import Path
import shutil
from awesomeversion import AwesomeVersion
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import DBusError, HostAppArmorError
from ..resolution.const import UnsupportedReason
from ..utils.apparmor import validate_profile
from .const import HostFeature
_LOGGER: logging.Logger = logging.getLogger(__name__)
class AppArmorControl(CoreSysAttributes):
"""Handle host AppArmor controls."""
def __init__(self, coresys: CoreSys):
"""Initialize host power handling."""
self.coresys: CoreSys = coresys
self._profiles: set[str] = set()
@property
def available(self) -> bool:
"""Return True if AppArmor is available on host."""
return (
HostFeature.OS_AGENT in self.sys_host.features
and UnsupportedReason.APPARMOR not in self.sys_resolution.unsupported
)
@property
def version(self) -> AwesomeVersion | None:
"""Return hosts AppArmor Version."""
return self.sys_dbus.agent.apparmor.version
def exists(self, profile_name: str) -> bool:
"""Return True if a profile exists."""
return profile_name in self._profiles
def _get_profile(self, profile_name: str) -> Path:
"""Get a profile from AppArmor store."""
if profile_name not in self._profiles:
raise HostAppArmorError(
f"Can't find {profile_name} for removing", _LOGGER.error
)
return Path(self.sys_config.path_apparmor, profile_name)
async def METHOD_NAME(self) -> None:
"""Load available profiles."""
for content in self.sys_config.path_apparmor.iterdir():
if not content.is_file():
continue
self._profiles.add(content.name)
_LOGGER.info("Loading AppArmor Profiles: %s", self._profiles)
# Load profiles
if self.available:
for profile_name in self._profiles:
try:
await self._load_profile(profile_name)
except HostAppArmorError:
pass
else:
_LOGGER.warning("AppArmor is not enabled on host")
async def load_profile(self, profile_name: str, profile_file: Path) -> None:
"""Load/Update a new/exists profile into AppArmor."""
if not validate_profile(profile_name, profile_file):
raise HostAppArmorError(
f"AppArmor profile '{profile_name}' is not valid", _LOGGER.error
)
# Copy to AppArmor folder
dest_profile: Path = Path(self.sys_config.path_apparmor, profile_name)
try:
await self.sys_run_in_executor(shutil.copyfile, profile_file, dest_profile)
except OSError as err:
raise HostAppArmorError(
f"Can't copy {profile_file}: {err}", _LOGGER.error
) from err
# Load profiles
_LOGGER.info("Adding/updating AppArmor profile: %s", profile_name)
self._profiles.add(profile_name)
if not self.available:
return
await self._load_profile(profile_name)
async def remove_profile(self, profile_name: str) -> None:
"""Remove a AppArmor profile."""
profile_file: Path = self._get_profile(profile_name)
# Unload if apparmor is enabled
if self.available:
await self._unload_profile(profile_name)
try:
await self.sys_run_in_executor(profile_file.unlink)
except OSError as err:
raise HostAppArmorError(
f"Can't remove profile: {err}", _LOGGER.error
) from err
_LOGGER.info("Removing AppArmor profile: %s", profile_name)
self._profiles.remove(profile_name)
async def backup_profile(self, profile_name: str, backup_file: Path) -> None:
"""Backup A profile into a new file."""
profile_file: Path = self._get_profile(profile_name)
try:
await self.sys_run_in_executor(shutil.copy, profile_file, backup_file)
except OSError as err:
raise HostAppArmorError(
f"Can't backup profile {profile_name}: {err}", _LOGGER.error
) from err
async def _load_profile(self, profile_name: str) -> None:
"""Load a profile on the host."""
try:
await self.sys_dbus.agent.apparmor.load_profile(
self.sys_config.path_extern_apparmor.joinpath(profile_name),
self.sys_config.path_extern_apparmor_cache,
)
except DBusError as err:
raise HostAppArmorError(
f"Can't load profile {profile_name}: {err!s}", _LOGGER.error
) from err
async def _unload_profile(self, profile_name: str) -> None:
"""Unload a profile on the host."""
try:
await self.sys_dbus.agent.apparmor.unload_profile(
self.sys_config.path_extern_apparmor.joinpath(profile_name),
self.sys_config.path_extern_apparmor_cache,
)
except DBusError as err:
raise HostAppArmorError(
f"Can't unload profile {profile_name}: {err!s}", _LOGGER.error
) from err
|
3,976 |
check nonlinear
|
# Implementation of the TCN proposed in
# Luo. et al. "Conv-tasnet: Surpassing ideal time–frequency
# magnitude masking for speech separation."
#
# The code is based on:
# https://github.com/kaituoxu/Conv-TasNet/blob/master/src/conv_tasnet.py
#
import torch
import torch.nn as nn
EPS = torch.finfo(torch.get_default_dtype()).eps
class TemporalConvNet(nn.Module):
def __init__(self, N, B, H, P, X, R, norm_type="gLN", causal=False):
"""Basic Module of tasnet.
Args:
N: Number of filters in autoencoder
B: Number of channels in bottleneck 1 * 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
norm_type: BN, gLN, cLN
causal: causal or non-causal
"""
super().__init__()
# Components
# [M, N, K] -> [M, N, K]
layer_norm = ChannelwiseLayerNorm(N)
# [M, N, K] -> [M, B, K]
bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)
# [M, B, K] -> [M, B, K]
repeats = []
for r in range(R):
blocks = []
for x in range(X):
dilation = 2**x
padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2
blocks += [
TemporalBlock(
B,
H,
P,
stride=1,
padding=padding,
dilation=dilation,
norm_type=norm_type,
causal=causal,
)
]
repeats += [nn.Sequential(*blocks)]
temporal_conv_net = nn.Sequential(*repeats)
# Put together (except mask_conv1x1, modified from the original code)
self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net)
def forward(self, mixture_w):
"""Keep this API same with TasNet.
Args:
mixture_w: [M, N, K], M is batch size
Returns:
bottleneck_feature: [M, B, K]
"""
return self.network(mixture_w) # [M, N, K] -> [M, B, K]
class TemporalBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
# [M, B, K] -> [M, H, K]
conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
prelu = nn.PReLU()
norm = chose_norm(norm_type, out_channels)
# [M, H, K] -> [M, B, K]
dsconv = DepthwiseSeparableConv(
out_channels,
in_channels,
kernel_size,
stride,
padding,
dilation,
norm_type,
causal,
)
# Put together
self.net = nn.Sequential(conv1x1, prelu, norm, dsconv)
def forward(self, x):
"""Forward.
Args:
x: [M, B, K]
Returns:
[M, B, K]
"""
residual = x
out = self.net(x)
# TODO(Jing): when P = 3 here works fine, but when P = 2 maybe need to pad?
return out + residual # look like w/o F.relu is better than w/ F.relu
# return F.relu(out + residual)
class DepthwiseSeparableConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False,
):
super().__init__()
# Use `groups` option to implement depthwise convolution
# [M, H, K] -> [M, H, K]
depthwise_conv = nn.Conv1d(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
if causal:
chomp = Chomp1d(padding)
prelu = nn.PReLU()
norm = chose_norm(norm_type, in_channels)
# [M, H, K] -> [M, B, K]
pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False)
# Put together
if causal:
self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv)
else:
self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv)
def forward(self, x):
"""Forward.
Args:
x: [M, H, K]
Returns:
result: [M, B, K]
"""
return self.net(x)
class Chomp1d(nn.Module):
"""To ensure the output length is the same as the input."""
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""Forward.
Args:
x: [M, H, Kpad]
Returns:
[M, H, K]
"""
return x[:, :, : -self.chomp_size].contiguous()
def METHOD_NAME(nolinear_type):
if nolinear_type not in ["softmax", "relu"]:
raise ValueError("Unsupported nonlinear type")
def chose_norm(norm_type, channel_size):
"""The input of normalization will be (M, C, K), where M is batch size.
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
elif norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size)
else:
raise ValueError("Unsupported normalization type")
class ChannelwiseLayerNorm(nn.Module):
"""Channel-wise Layer Normalization (cLN)."""
def __init__(self, channel_size):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
cLN_y: [M, N, K]
"""
mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K]
var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K]
cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return cLN_y
class GlobalLayerNorm(nn.Module):
"""Global Layer Normalization (gLN)."""
def __init__(self, channel_size):
super().__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""Forward.
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
gLN_y: [M, N, K]
"""
mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1]
var = (
(torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True)
)
gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return gLN_y
|
3,977 |
get author
|
"""
Module that holds the base abstractions for Sematic's plug-in system.
Plug-ins are classes that inherit from the AbstractPlugin abstract base class.
Plug-ins are imported at runtime based on user's or server's settings stored in
their corresponding yaml files.
"""
# Standard Library
import abc
import enum
import logging
import sys
from importlib import import_module
from typing import Tuple, Type, final
# Sematic
from sematic.utils.exceptions import MissingPluginError
logger = logging.getLogger(__name__)
SEMATIC_PLUGIN_AUTHOR = "github.com/sematic-ai"
class PluginScope(enum.Enum):
"""
Enum of available plugin scopes.
At this time plug-ins are supported for artifact storage and authentication.
This enum is expected to be updated as more plug-in scopes are supported.
"""
# Storage plug-in scope for artifact data, future pickles, etc.
STORAGE = "STORAGE"
# Server-side authentication plug-in scope
AUTH = "AUTH"
# Plug-in scope for external resources
EXTERNAL_RESOURCE = "EXTERNAL_RESOURCE"
# Event external publishing plug-in scope
PUBLISH = "PUBLISH"
# Metrics storage
METRICS_STORAGE = "METRICS_STORAGE"
# Source code container image building plugin
BUILD = "BUILD"
class AbstractPluginSettingsVar(enum.Enum):
"""
Abstract base class for lists of settings vars
"""
pass
PluginVersion = Tuple[int, int, int]
class AbstractPlugin(abc.ABC):
"""
Abstract base class for plugins.
All plug-ins must inherit from this class.
"""
@staticmethod
@abc.abstractmethod
def METHOD_NAME() -> str:
"""
The plug-in's author.
Can be an arbitrary string containing contact info (e.g. GitHub profile,
email address, etc.)
"""
pass
@staticmethod
@abc.abstractmethod
def get_version() -> PluginVersion:
"""
Plug-in version: MAJOR.MINOR.PATCH
increment PATCH for bug fixes
increment MINOR for new functionalities
increment MAJOR for breaking API changes (0 means unstable)
"""
pass
@classmethod
def get_settings_vars(cls) -> Type[AbstractPluginSettingsVar]:
"""
Returns the Settings var enum for this plug-in.
The class must inherit from `AbstractPluginSettingsVar` and list all
available settings for this plug-in.
"""
return AbstractPluginSettingsVar
@final
@classmethod
def get_name(cls) -> str:
"""
The plug-in's name.
This is used as a key to store plug-in specific settings in settings
YAML files.
"""
return cls.__name__
@final
@classmethod
def get_path(cls) -> str:
"""
Full import path of the module.
Can be used in server-returned payloads to tell client code what plug-in
to use (e.g. server-prescribed upload locations),
"""
return ".".join([cls.__module__, cls.__name__])
def import_plugin(plugin_import_path: str) -> Type[AbstractPlugin]:
"""
The internal API to import a plug-in based on its import path.
Parameters
----------
plugin_import_path: str
fully-qualified import path: some.module.PluginClass
Raises
------
MissingPluginError
The requested plug-in cannot be found.
"""
try:
split_import_path = plugin_import_path.split(".")
import_path, plugin_name = (
".".join(split_import_path[:-1]),
split_import_path[-1],
)
except (AttributeError, IndexError):
raise ValueError(f"Incorrect plugin import path: {plugin_import_path}")
try:
first_import = import_path not in sys.modules
# module imports are cached so this is idempotent
module = import_module(import_path)
plugin: Type[AbstractPlugin] = getattr(module, plugin_name)
if first_import:
logger.info(
"Imported plugin %s, version %s",
plugin.get_path(),
plugin.get_version(),
)
except (ImportError, AttributeError):
raise MissingPluginError(plugin_import_path)
return plugin
|
3,978 |
test asyncio yield from
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from salt.ext.tornado import gen
from salt.ext.tornado.testing import AsyncTestCase, gen_test
from salt.ext.tornado.test.util import unittest, skipBefore33, skipBefore35, exec_test
try:
from salt.ext.tornado.platform.asyncio import asyncio
except ImportError:
asyncio = None
else:
from salt.ext.tornado.platform.asyncio import AsyncIOLoop, to_asyncio_future
# This is used in dynamically-evaluated code, so silence pyflakes.
to_asyncio_future
@unittest.skipIf(asyncio is None, "asyncio module not present")
class AsyncIOLoopTest(AsyncTestCase):
def get_new_ioloop(self):
io_loop = AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def test_asyncio_callback(self):
# Basic test that the asyncio loop is set up correctly.
asyncio.get_event_loop().call_soon(self.stop)
self.wait()
@gen_test
def test_asyncio_future(self):
# Test that we can yield an asyncio future from a tornado coroutine.
# Without 'yield from', we must wrap coroutines in ensure_future,
# which was introduced during Python 3.4, deprecating the prior "async".
if hasattr(asyncio, 'ensure_future'):
ensure_future = asyncio.ensure_future
else:
# async is a reserved word in Python 3.7
ensure_future = getattr(asyncio, "async")
x = yield ensure_future(
asyncio.get_event_loop().run_in_executor(None, lambda: 42))
self.assertEqual(x, 42)
@skipBefore33
@gen_test
def METHOD_NAME(self):
# Test that we can use asyncio coroutines with 'yield from'
# instead of asyncio.async(). This requires python 3.3 syntax.
namespace = exec_test(globals(), locals(), """
@gen.coroutine
def f():
event_loop = asyncio.get_event_loop()
x = yield from event_loop.run_in_executor(None, lambda: 42)
return x
""")
result = yield namespace['f']()
self.assertEqual(result, 42)
@skipBefore35
def test_asyncio_adapter(self):
# This test demonstrates that when using the asyncio coroutine
# runner (i.e. run_until_complete), the to_asyncio_future
# adapter is needed. No adapter is needed in the other direction,
# as demonstrated by other tests in the package.
@gen.coroutine
def tornado_coroutine():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
native_coroutine_without_adapter = exec_test(globals(), locals(), """
async def native_coroutine_without_adapter():
return await tornado_coroutine()
""")["native_coroutine_without_adapter"]
native_coroutine_with_adapter = exec_test(globals(), locals(), """
async def native_coroutine_with_adapter():
return await to_asyncio_future(tornado_coroutine())
""")["native_coroutine_with_adapter"]
# Use the adapter, but two degrees from the tornado coroutine.
native_coroutine_with_adapter2 = exec_test(globals(), locals(), """
async def native_coroutine_with_adapter2():
return await to_asyncio_future(native_coroutine_without_adapter())
""")["native_coroutine_with_adapter2"]
# Tornado supports native coroutines both with and without adapters
self.assertEqual(
self.io_loop.run_sync(native_coroutine_without_adapter),
42)
self.assertEqual(
self.io_loop.run_sync(native_coroutine_with_adapter),
42)
self.assertEqual(
self.io_loop.run_sync(native_coroutine_with_adapter2),
42)
# Asyncio only supports coroutines that yield asyncio-compatible
# Futures.
with self.assertRaises(RuntimeError):
asyncio.get_event_loop().run_until_complete(
native_coroutine_without_adapter())
self.assertEqual(
asyncio.get_event_loop().run_until_complete(
native_coroutine_with_adapter()),
42)
self.assertEqual(
asyncio.get_event_loop().run_until_complete(
native_coroutine_with_adapter2()),
42)
|
3,979 |
analyze params
|
from typing import TYPE_CHECKING, Dict
from slither.core.declarations.custom_error import CustomError
from slither.core.declarations.custom_error_contract import CustomErrorContract
from slither.core.declarations.custom_error_top_level import CustomErrorTopLevel
from slither.core.variables.local_variable import LocalVariable
from slither.solc_parsing.declarations.caller_context import CallerContextExpression
from slither.solc_parsing.variables.local_variable import LocalVariableSolc
if TYPE_CHECKING:
from slither.solc_parsing.slither_compilation_unit_solc import SlitherCompilationUnitSolc
from slither.core.compilation_unit import SlitherCompilationUnit
# Part of the code was copied from the function parsing
# In the long term we should refactor these two classes to merge the duplicated code
class CustomErrorSolc(CallerContextExpression):
def __init__(
self,
custom_error: CustomError,
custom_error_data: dict,
slither_parser: "SlitherCompilationUnitSolc",
) -> None:
self._slither_parser: "SlitherCompilationUnitSolc" = slither_parser
self._custom_error = custom_error
custom_error.name = custom_error_data["name"]
self._params_was_analyzed = False
if not self._slither_parser.is_compact_ast:
custom_error_data = custom_error_data["attributes"]
self._custom_error_data = custom_error_data
def METHOD_NAME(self) -> None:
# Can be re-analyzed due to inheritance
if self._params_was_analyzed:
return
self._params_was_analyzed = True
if self._slither_parser.is_compact_ast:
params = self._custom_error_data["parameters"]
else:
children = self._custom_error_data[self.get_children("children")]
# It uses to be
# params = children[0]
# returns = children[1]
# But from Solidity 0.6.3 to 0.6.10 (included)
# Comment above a function might be added in the children
child_iter = iter(
[child for child in children if child[self.get_key()] == "ParameterList"]
)
params = next(child_iter)
if params:
self._parse_params(params)
@property
def is_compact_ast(self) -> bool:
return self._slither_parser.is_compact_ast
def get_key(self) -> str:
return self._slither_parser.get_key()
def get_children(self, key: str) -> str:
if self._slither_parser.is_compact_ast:
return key
return "children"
def _parse_params(self, params: Dict) -> None:
assert params[self.get_key()] == "ParameterList"
if self._slither_parser.is_compact_ast:
params = params["parameters"]
else:
params = params[self.get_children("children")]
for param in params:
assert param[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(param)
self._custom_error.add_parameters(local_var.underlying_variable)
self._custom_error.set_solidity_sig()
def _add_param(self, param: Dict) -> LocalVariableSolc:
local_var = LocalVariable()
local_var.set_offset(param["src"], self._slither_parser.compilation_unit)
local_var_parser = LocalVariableSolc(local_var, param)
if isinstance(self._custom_error, CustomErrorTopLevel):
local_var_parser.analyze(self)
else:
assert isinstance(self._custom_error, CustomErrorContract)
local_var_parser.analyze(self)
# see https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
if local_var.location == "default":
local_var.set_location("memory")
return local_var_parser
@property
def underlying_custom_error(self) -> CustomError:
return self._custom_error
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self._slither_parser
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self._custom_error.compilation_unit
|
3,980 |
bleed channel
|
"""
Simple bleed trail implementation to apply to "eimages" just prior to
the camera readout code.
"""
import numpy as np
__all__ = ['bleed_eimage', 'bleed_channel', 'find_channels_with_saturation']
def find_channels_with_saturation(eimage, full_well):
"""Helper function to find which channels (x values) have at least one saturated pixel.
Parameters
----------
eimage: numpy array
The "eimage" containing the image data in units of electrons per pixel.
full_well: int
The pixel full well/saturation value in electrons.
Returns
-------
set: A set of integers giving all x values with a saturated pixel.
"""
# Find channels (by x-index) with signal above full_well.
return set(np.where(eimage > full_well)[1])
def bleed_eimage(eimage, full_well, midline_stop=True):
"""
Apply the bleed charge algorithm to an eimage.
Parameters
----------
eimage: numpy array
The "eimage" containing the image data in units of electrons per
pixel. This is the image prior to electronic readout (i.e.,
conversion to ADU, addition of bias, dark current, crosstalk, etc.).
For LSST CCDs, the eimages have parallel transfer directions along
the x-axis, hence channels correspond to rows in eimages.
full_well: int
The pixel full well/saturation value in electrons.
midline_stop: bool [True]
Flag to treat the midline of the sensor as a bleed stop.
Returns
-------
numpy array: This is the input eimage object with the charge
bleeding applied.
"""
# Find channels (by x-index) with signal above full_well.
channels = find_channels_with_saturation(eimage, full_well)
# Apply bleeding to each channel.
for xpix in channels:
if midline_stop:
ymid = eimage.shape[0]//2
eimage[:ymid, xpix] = METHOD_NAME(eimage[:ymid, xpix], full_well)
eimage[ymid:, xpix] = METHOD_NAME(eimage[ymid:, xpix], full_well)
else:
eimage[:, xpix] = METHOD_NAME(eimage[:, xpix], full_well)
return eimage
def METHOD_NAME(channel, full_well):
"""
Redistribute charge along a channel for all pixels above full well.
Parameters
----------
channel: numpy.array of pixel values
1D array of pixel values in units of electrons.
full_well: int
The pixel full well/saturation value in electrons.
Returns
-------
numpy.array of pixel values: The channel of pixel data with
bleeding applied. This is a new object, i.e., the input
numpy.array is unaltered.
"""
# Find contiguous sets of pixels that lie above full well, and
# build a list of end point pairs identifying each set.
my_channel = channel.copy()
# Add 0 at start and end, so saturated points are known to be all internal.
padded = np.concatenate([[0], my_channel, [0]])
# Find places where full well condition changes (either true to false or false to true).
end_points, = np.diff(padded > full_well).nonzero()
# Pairs of these are now the first saturated pixel in a run then the
# first subsequent unsaturated pixel. Logically, they have to alternate.
# Reshape these into array of (start, end) pairs.
end_points = end_points.reshape(-1,2)
# Loop over end point pairs.
for y0, y1 in end_points:
excess_charge = sum(my_channel[y0:y1]) - (y1 - y0)*full_well
my_channel[y0:y1] = full_well
bleed_charge = BleedCharge(my_channel, excess_charge, full_well)
for dy in range(0, max(y0, len(my_channel) - y1)):
if bleed_charge(y0 - dy - 1) or bleed_charge(y1 + dy):
break
return my_channel
class BleedCharge:
"Class to manage charge redistribution along a channel."
def __init__(self, imarr, excess_charge, full_well):
"""
Parameters
----------
imarr: numpy.array
1D numpy array containing the channel of pixel data.
excess_charge: float
The remaining charge above full-well to be distributed
to the specified pixels.
full_well: int
The full well value, i.e., the maximum charge any pixel
can contain.
"""
self.imarr = imarr
self.excess_charge = excess_charge
self.full_well = full_well
def __call__(self, ypix):
"""
Parameters
----------
ypix: int
Index of the pixel to which charge will be redistributed.
If it is already at full_well, do nothing.
Returns
-------
bool: True if all excess charge has been redistributed.
"""
if 0 <= ypix < len(self.imarr):
# The normal case: Add excess charge up to the full well and reduce this
# amount from the total excess charge to be redistributed.
bled_charge = min(self.full_well - self.imarr[ypix],
self.excess_charge)
self.imarr[ypix] += bled_charge
self.excess_charge -= bled_charge
elif ypix < 0:
# Off the bottom end, the charge escapes into the electronics.
# We can reduce the excess charge by one full-well-worth.
# These electrons are not added to any pixel though.
self.excess_charge -= self.full_well
else:
# Electrons do not escape off the top end, so excess charge is not reduced
# when trying to bleed past the end of the channel.
pass
return self.excess_charge == 0
|
3,981 |
run with profile
|
from decimal import Decimal
import os
import random
from sqlalchemy import __version__
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
Base = declarative_base()
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": type}
class Boss(Employee):
__tablename__ = "boss"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
golf_average = Column(Numeric)
__mapper_args__ = {"polymorphic_identity": "boss"}
class Grunt(Employee):
__tablename__ = "grunt"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
savings = Column(Numeric)
employer_id = Column(Integer, ForeignKey("boss.id"))
employer = relationship(
"Boss", backref="employees", primaryjoin=Boss.id == employer_id
)
__mapper_args__ = {"polymorphic_identity": "grunt"}
if os.path.exists("orm2010.db"):
os.remove("orm2010.db")
# use a file based database so that cursor.execute() has some
# palpable overhead.
engine = create_engine("sqlite:///orm2010.db")
Base.metadata.create_all(engine)
sess = Session(engine)
def runit_persist(status, factor=1, query_runs=5):
num_bosses = 100 * factor
num_grunts = num_bosses * 100
bosses = [
Boss(name="Boss %d" % i, golf_average=Decimal(random.randint(40, 150)))
for i in range(num_bosses)
]
sess.add_all(bosses)
status("Added %d boss objects" % num_bosses)
grunts = [
Grunt(
name="Grunt %d" % i,
savings=Decimal(random.randint(5000000, 15000000) / 100),
)
for i in range(num_grunts)
]
status("Added %d grunt objects" % num_grunts)
while grunts:
# this doesn't associate grunts with bosses evenly,
# just associates lots of them with a relatively small
# handful of bosses
batch_size = 100
batch_num = (num_grunts - len(grunts)) / batch_size
boss = sess.query(Boss).filter_by(name="Boss %d" % batch_num).first()
for grunt in grunts[0:batch_size]:
grunt.employer = boss
grunts = grunts[batch_size:]
sess.commit()
status("Associated grunts w/ bosses and committed")
def runit_query_runs(status, factor=1, query_runs=5):
# do some heavier reading
for i in range(query_runs):
status("Heavy query run #%d" % (i + 1))
report = []
# load all the Grunts, print a report with their name, stats,
# and their bosses' stats.
for grunt in sess.query(Grunt):
report.append(
(
grunt.name,
grunt.savings,
grunt.employer.name,
grunt.employer.golf_average,
)
)
sess.close() # close out the session
def METHOD_NAME(runsnake=False, dump=False):
import cProfile
import pstats
filename = "orm2010.profile"
if os.path.exists("orm2010.profile"):
os.remove("orm2010.profile")
def status(msg):
print(msg)
cProfile.runctx(
# "runit_persist(status)",
"runit_persist(status); runit_query_runs(status)",
globals(),
locals(),
filename,
)
stats = pstats.Stats(filename)
counts_by_methname = {key[2]: stats.stats[key][0] for key in stats.stats}
print("SQLA Version: %s" % __version__)
print("Total calls %d" % stats.total_calls)
print("Total cpu seconds: %.2f" % stats.total_tt)
print(
"Total execute calls: %d"
% counts_by_methname[
"<method 'execute' of 'sqlite3.Cursor' " "objects>"
]
)
print(
"Total executemany calls: %d"
% counts_by_methname.get(
"<method 'executemany' of 'sqlite3.Cursor' " "objects>", 0
)
)
if dump:
# stats.sort_stats("nfl")
stats.sort_stats("cumtime", "calls")
stats.print_stats()
# stats.print_callers()
if runsnake:
os.system("runsnake %s" % filename)
def run_with_time(factor):
import time
now = time.time()
def status(msg):
print("%d - %s" % (time.time() - now, msg))
runit_persist(status, factor)
print("Total time: %d" % (time.time() - now))
runit_query_runs(status, factor)
print("Total time: %d" % (time.time() - now))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--profile",
action="store_true",
help="run shorter test suite w/ cprofilng",
)
parser.add_argument(
"--dump",
action="store_true",
help="dump full call profile (implies --profile)",
)
parser.add_argument(
"--runsnake",
action="store_true",
help="invoke runsnakerun (implies --profile)",
)
parser.add_argument(
"--factor",
type=int,
default=10,
help="scale factor, a multiple of how many records to work with. "
"defaults to 10",
)
args = parser.parse_args()
args.profile = args.profile or args.dump or args.runsnake
if args.profile:
METHOD_NAME(runsnake=args.runsnake, dump=args.dump)
else:
run_with_time(args.factor)
|
3,982 |
worker ready connect
|
# This file is a part of IntelOwl https://github.com/intelowlproject/IntelOwl
# See the file 'LICENSE' for copying permission.
from __future__ import absolute_import, unicode_literals
import datetime
import logging
import typing
from celery import shared_task, signals
from celery.worker.consumer import Consumer
from celery.worker.control import control_command
from django.conf import settings
from django.db.models import Q
from django.utils.module_loading import import_string
from django.utils.timezone import now
from django_celery_beat.models import PeriodicTask
from api_app.choices import Status
from intel_owl import secrets
from intel_owl.celery import DEFAULT_QUEUE, app, get_queue_name
logger = logging.getLogger(__name__)
@control_command(
args=[("plugin_path", str)],
)
def update_plugin(state, plugin_path):
plugin = import_string(plugin_path)
plugin.update()
@shared_task(soft_time_limit=300)
def execute_ingestor(config_pk: str):
from api_app.ingestors_manager.classes import Ingestor
from api_app.ingestors_manager.models import IngestorConfig
config: IngestorConfig = IngestorConfig.objects.get(pk=config_pk)
if config.disabled:
logger.info(f"Not executing ingestor {config.name} because disabled")
else:
class_ = config.python_class
obj: Ingestor = class_(config=config, runtime_configuration={})
obj.start()
logger.info(f"Executing ingestor {config.name}")
@shared_task(soft_time_limit=10000)
def remove_old_jobs():
"""
this is to remove old jobs to avoid to fill the database.
Retention can be modified.
"""
from api_app.models import Job
logger.info("started remove_old_jobs")
retention_days = int(secrets.get_secret("OLD_JOBS_RETENTION_DAYS", 14))
date_to_check = now() - datetime.timedelta(days=retention_days)
old_jobs = Job.objects.filter(finished_analysis_time__lt=date_to_check)
num_jobs_to_delete = old_jobs.count()
logger.info(f"found {num_jobs_to_delete} old jobs to delete")
old_jobs.delete()
logger.info("finished remove_old_jobs")
return num_jobs_to_delete
@shared_task(soft_time_limit=120)
def check_stuck_analysis(minutes_ago: int = 25, check_pending: bool = False):
"""
In case the analysis is stuck for whatever reason,
we should force the status "failed"
to avoid special exceptions,
we can just put this function as a cron to cleanup.
"""
from api_app.models import Job
logger.info("started check_stuck_analysis")
query = Q(status=Job.Status.RUNNING.value)
if check_pending:
query |= Q(status=Job.Status.PENDING.value)
difference = now() - datetime.timedelta(minutes=minutes_ago)
running_jobs = Job.objects.filter(query).filter(
received_request_time__lte=difference
)
logger.info(f"checking if {running_jobs.count()} jobs are stuck")
jobs_id_stuck = []
for running_job in running_jobs:
logger.error(
f"found stuck analysis, job_id:{running_job.id}."
f"Setting the job to status to {Job.Status.FAILED.value}'"
)
jobs_id_stuck.append(running_job.id)
running_job.status = Job.Status.FAILED.value
running_job.finished_analysis_time = now()
running_job.process_time = running_job.calculate_process_time()
running_job.save(
update_fields=["status", "finished_analysis_time", "process_time"]
)
logger.info("finished check_stuck_analysis")
return jobs_id_stuck
@shared_task(soft_time_limit=150)
def update(config_pk: str):
from api_app.analyzers_manager.models import AnalyzerConfig
from intel_owl.celery import broadcast
analyzer_config = AnalyzerConfig.objects.get(pk=config_pk)
class_ = analyzer_config.python_class
if hasattr(class_, "_update") and callable(class_._update): # noqa
if settings.NFS:
update_plugin(None, analyzer_config.python_complete_path)
else:
broadcast(
update_plugin,
queue=analyzer_config.queue,
arguments={"plugin_path": analyzer_config.python_complete_path},
)
return True
logger.error(f"Unable to update {analyzer_config.python_complete_path}")
return False
@shared_task(soft_time_limit=100)
def update_notifications_with_releases():
from django.core import management
management.call_command(
"changelog_notification",
".github/CHANGELOG.md",
"INTELOWL",
"--number-of-releases",
"1",
)
@app.task(name="job_set_final_status", soft_time_limit=20)
def job_set_final_status(job_id: int):
from api_app.models import Job
job = Job.objects.get(pk=job_id)
# execute some callbacks
job.set_final_status()
@app.task(name="job_set_pipeline_status", soft_time_limit=20)
def job_set_pipeline_status(job_id: int, status: str):
from api_app.models import Job
job = Job.objects.get(pk=job_id)
if status not in Status.running_statuses() + Status.partial_statuses():
logger.error(f"Unable to set job status to {status}")
else:
job.status = status
job.save(update_fields=["status"])
@app.task(name="job_pipeline", soft_time_limit=100)
def job_pipeline(
job_id: int,
):
from api_app.models import Job
job = Job.objects.get(pk=job_id)
job.execute()
@app.task(name="run_plugin", soft_time_limit=500)
def run_plugin(
job_id: int,
plugin_path: str,
plugin_config_pk: str,
runtime_configuration: dict,
task_id: int,
):
from api_app.classes import Plugin
plugin_class: typing.Type[Plugin] = import_string(plugin_path)
config = plugin_class.config_model.objects.get(pk=plugin_config_pk)
plugin = plugin_class(
config=config,
job_id=job_id,
runtime_configuration=runtime_configuration,
task_id=task_id,
)
plugin.start()
# startup
@signals.worker_ready.connect
def METHOD_NAME(*args, sender: Consumer = None, **kwargs):
logger.info(f"worker {sender.hostname} ready")
queue = sender.hostname.split("_", maxsplit=1)[1]
logger.info(f"Updating repositories inside {queue}")
if settings.REPO_DOWNLOADER_ENABLED and queue == get_queue_name(DEFAULT_QUEUE):
for task in PeriodicTask.objects.filter(
enabled=True, queue=queue, task="intel_owl.tasks.update"
):
config_pk = task.kwargs["config_pk"]
logger.info(f"Updating {config_pk}")
update(config_pk)
# set logger
@signals.setup_logging.connect
def config_loggers(*args, **kwags):
from logging.config import dictConfig
dictConfig(settings.LOGGING)
|
3,983 |
test api store addons addon version
|
"""Test Store API."""
import asyncio
from unittest.mock import MagicMock, PropertyMock, patch
from aiohttp.test_utils import TestClient
import pytest
from supervisor.addons.addon import Addon
from supervisor.arch import CpuArch
from supervisor.const import AddonState
from supervisor.coresys import CoreSys
from supervisor.docker.addon import DockerAddon
from supervisor.docker.const import ContainerState
from supervisor.docker.interface import DockerInterface
from supervisor.docker.monitor import DockerContainerStateEvent
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
from tests.common import load_json_fixture
from tests.const import TEST_ADDON_SLUG
REPO_URL = "https://github.com/awesome-developer/awesome-repo"
@pytest.mark.asyncio
async def test_api_store(
api_client: TestClient,
store_addon: AddonStore,
repository: Repository,
caplog: pytest.LogCaptureFixture,
):
"""Test /store REST API."""
resp = await api_client.get("/store")
result = await resp.json()
assert result["data"]["addons"][-1]["slug"] == store_addon.slug
assert result["data"]["repositories"][-1]["slug"] == repository.slug
assert (
f"Add-on {store_addon.slug} not supported on this platform" not in caplog.text
)
@pytest.mark.asyncio
async def test_api_store_addons(api_client: TestClient, store_addon: AddonStore):
"""Test /store/addons REST API."""
print("test")
resp = await api_client.get("/store/addons")
result = await resp.json()
print(result)
assert result["data"][-1]["slug"] == store_addon.slug
@pytest.mark.asyncio
async def test_api_store_addons_addon(api_client: TestClient, store_addon: AddonStore):
"""Test /store/addons/{addon} REST API."""
resp = await api_client.get(f"/store/addons/{store_addon.slug}")
result = await resp.json()
assert result["data"]["slug"] == store_addon.slug
@pytest.mark.asyncio
async def METHOD_NAME(
api_client: TestClient, store_addon: AddonStore
):
"""Test /store/addons/{addon}/{version} REST API."""
resp = await api_client.get(f"/store/addons/{store_addon.slug}/1.0.0")
result = await resp.json()
assert result["data"]["slug"] == store_addon.slug
@pytest.mark.asyncio
async def test_api_store_repositories(api_client: TestClient, repository: Repository):
"""Test /store/repositories REST API."""
resp = await api_client.get("/store/repositories")
result = await resp.json()
assert result["data"][-1]["slug"] == repository.slug
@pytest.mark.asyncio
async def test_api_store_repositories_repository(
api_client: TestClient, repository: Repository
):
"""Test /store/repositories/{repository} REST API."""
resp = await api_client.get(f"/store/repositories/{repository.slug}")
result = await resp.json()
assert result["data"]["slug"] == repository.slug
async def test_api_store_add_repository(api_client: TestClient, coresys: CoreSys):
"""Test POST /store/repositories REST API."""
with patch("supervisor.store.repository.Repository.load", return_value=None), patch(
"supervisor.store.repository.Repository.validate", return_value=True
):
response = await api_client.post(
"/store/repositories", json={"repository": REPO_URL}
)
assert response.status == 200
assert REPO_URL in coresys.store.repository_urls
assert isinstance(coresys.store.get_from_url(REPO_URL), Repository)
async def test_api_store_remove_repository(
api_client: TestClient, coresys: CoreSys, repository: Repository
):
"""Test DELETE /store/repositories/{repository} REST API."""
response = await api_client.delete(f"/store/repositories/{repository.slug}")
assert response.status == 200
assert repository.source not in coresys.store.repository_urls
assert repository.slug not in coresys.store.repositories
async def test_api_store_update_healthcheck(
api_client: TestClient,
coresys: CoreSys,
install_addon_ssh: Addon,
container: MagicMock,
tmp_supervisor_data,
path_extern,
):
"""Test updating an addon with healthcheck waits for health status."""
coresys.hardware.disk.get_disk_free_space = lambda x: 5000
container.status = "running"
container.attrs["Config"] = {"Healthcheck": "exists"}
install_addon_ssh.path_data.mkdir()
await install_addon_ssh.load()
with patch(
"supervisor.store.data.read_json_or_yaml_file",
return_value=load_json_fixture("addon-config-add-image.json"),
):
await coresys.store.data.update()
assert install_addon_ssh.need_update is True
state_changes: list[AddonState] = []
async def container_events():
nonlocal state_changes
await asyncio.sleep(0.01)
await install_addon_ssh.container_state_changed(
DockerContainerStateEvent(
name=f"addon_{TEST_ADDON_SLUG}",
state=ContainerState.STOPPED,
id="abc123",
time=1,
)
)
state_changes.append(install_addon_ssh.state)
await install_addon_ssh.container_state_changed(
DockerContainerStateEvent(
name=f"addon_{TEST_ADDON_SLUG}",
state=ContainerState.RUNNING,
id="abc123",
time=1,
)
)
state_changes.append(install_addon_ssh.state)
await install_addon_ssh.container_state_changed(
DockerContainerStateEvent(
name=f"addon_{TEST_ADDON_SLUG}",
state=ContainerState.HEALTHY,
id="abc123",
time=1,
)
)
async def container_events_task(*args, **kwargs):
asyncio.create_task(container_events())
with patch.object(DockerAddon, "run", new=container_events_task), patch.object(
DockerInterface, "install"
), patch.object(DockerAddon, "is_running", return_value=False), patch.object(
CpuArch, "supported", new=PropertyMock(return_value=["amd64"])
):
resp = await api_client.post(f"/store/addons/{TEST_ADDON_SLUG}/update")
assert state_changes == [AddonState.STOPPED, AddonState.STARTUP]
assert install_addon_ssh.state == AddonState.STARTED
assert resp.status == 200
|
3,984 |
get kernel
|
"""
pysteps.nowcasts.lagrangian_probability
=======================================
Implementation of the local Lagrangian probability nowcasting technique
described in :cite:`GZ2004`.
.. autosummary::
:toctree: ../generated/
forecast
"""
import numpy as np
from scipy.signal import convolve
from pysteps.nowcasts import extrapolation
def forecast(
precip,
velocity,
timesteps,
threshold,
extrap_method="semilagrangian",
extrap_kwargs=None,
slope=5,
):
"""
Generate a probability nowcast by a local lagrangian approach. The ouput is
the probability of exceeding a given intensity threshold, i.e.
P(precip>=threshold).
Parameters
----------
precip: array_like
Two-dimensional array of shape (m,n) containing the input precipitation
field.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs.
timesteps: int or list of floats
Number of time steps to forecast or a sorted list of time steps for which
the forecasts are computed (relative to the input time step).
The number of time steps has to be a positive integer.
The elements of the list are required to be in ascending order.
threshold: float
Intensity threshold for which the exceedance probabilities are computed.
slope: float, optional
The slope of the relationship between optimum scale and lead time in
pixels / timestep. Germann and Zawadzki (2004) found the optimal slope
to be equal to 1 km / minute.
Returns
-------
out: ndarray
Three-dimensional array of shape (num_timesteps, m, n) containing a time
series of nowcast exceedence probabilities. The time series starts from
t0 + timestep, where timestep is taken from the advection field velocity.
References
----------
Germann, U. and I. Zawadzki, 2004:
Scale Dependence of the Predictability of Precipitation from Continental
Radar Images. Part II: Probability Forecasts.
Journal of Applied Meteorology, 43(1), 74-89.
"""
# Compute deterministic extrapolation forecast
if isinstance(timesteps, int) and timesteps > 0:
timesteps = np.arange(1, timesteps + 1)
elif not isinstance(timesteps, list):
raise ValueError(f"invalid value for argument 'timesteps': {timesteps}")
precip_forecast = extrapolation.forecast(
precip,
velocity,
timesteps,
extrap_method,
extrap_kwargs,
)
# Ignore missing values
nanmask = np.isnan(precip_forecast)
precip_forecast[nanmask] = threshold - 1
valid_pixels = (~nanmask).astype(float)
# Compute exceedance probabilities using a neighborhood approach
precip_forecast = (precip_forecast >= threshold).astype(float)
for i, timestep in enumerate(timesteps):
scale = int(timestep * slope)
if scale == 0:
continue
kernel = METHOD_NAME(scale)
kernel_sum = convolve(
valid_pixels[i, ...],
kernel,
mode="same",
)
precip_forecast[i, ...] = convolve(
precip_forecast[i, ...],
kernel,
mode="same",
)
precip_forecast[i, ...] /= kernel_sum
precip_forecast = np.clip(precip_forecast, 0, 1)
precip_forecast[nanmask] = np.nan
return precip_forecast
def METHOD_NAME(size):
"""
Generate a circular kernel.
Parameters
----------
size : int
Size of the circular kernel (its diameter). For size < 5, the kernel is
a square instead of a circle.
Returns
-------
2-D array with kernel values
"""
middle = max((int(size / 2), 1))
if size < 5:
return np.ones((size, size), dtype=np.float32)
else:
xx, yy = np.mgrid[:size, :size]
circle = (xx - middle) ** 2 + (yy - middle) ** 2
return np.asarray(circle <= (middle**2), dtype=np.float32)
|
3,985 |
test settings visible when signed in
|
import json
from datetime import datetime, timedelta
from unittest import mock
from rest_framework.test import APIClient
from kitsune.questions.tests import (
AnswerFactory,
AnswerVoteFactory,
QuestionFactory,
SolutionAnswerFactory,
)
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users import api
from kitsune.users.tests import ProfileFactory, UserFactory
class UsernamesTests(TestCase):
"""Test the usernames API method."""
url = reverse("users.api.usernames", locale="en-US")
def setUp(self):
self.u = UserFactory(username="testUser")
self.client.login(username=self.u.username, password="testpass")
def tearDown(self):
self.client.logout()
def test_no_query(self):
res = self.client.get(self.url)
self.assertEqual(200, res.status_code)
self.assertEqual(b"[]", res.content)
def test_query_old(self):
res = self.client.get(urlparams(self.url, term="a"))
self.assertEqual(200, res.status_code)
data = json.loads(res.content)
self.assertEqual(0, len(data))
def test_query_current(self):
res = self.client.get(urlparams(self.url, term=self.u.username[0]))
self.assertEqual(200, res.status_code)
data = json.loads(res.content)
self.assertEqual(1, len(data))
def test_post(self):
res = self.client.post(self.url)
self.assertEqual(405, res.status_code)
def test_logged_out(self):
self.client.logout()
res = self.client.get(self.url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(403, res.status_code)
class TestUserSerializer(TestCase):
def setUp(self):
self.request = mock.Mock()
self.data = {
"username": "bobb",
"display_name": "Bobbert the Seventh",
"password": "testpass",
"email": "[email protected]",
}
def test_helpfulness(self):
u = UserFactory()
p = u.profile
a1 = AnswerFactory(creator=u)
a2 = AnswerFactory(creator=u)
AnswerVoteFactory(answer=a1, helpful=True)
AnswerVoteFactory(answer=a2, helpful=True)
AnswerVoteFactory(answer=a2, helpful=True)
# Some red herrings.
AnswerVoteFactory(creator=u)
AnswerVoteFactory(answer=a1, helpful=False)
serializer = api.ProfileSerializer(instance=p)
self.assertEqual(serializer.data["helpfulness"], 3)
def test_counts(self):
u = UserFactory()
p = u.profile
q = QuestionFactory(creator=u)
AnswerFactory(creator=u)
q.solution = AnswerFactory(question=q, creator=u)
q.save()
serializer = api.ProfileSerializer(instance=p)
self.assertEqual(serializer.data["question_count"], 1)
self.assertEqual(serializer.data["answer_count"], 2)
self.assertEqual(serializer.data["solution_count"], 1)
def test_last_answer_date(self):
p = ProfileFactory()
u = p.user
AnswerFactory(creator=u)
serializer = api.ProfileSerializer(instance=p)
self.assertEqual(serializer.data["last_answer_date"], u.answers.last().created)
class TestUserView(TestCase):
def setUp(self):
self.client = APIClient()
def test_usernames_with_periods(self):
u = UserFactory(username="something.something")
url = reverse("user-detail", args=[u.username])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data["username"], u.username)
def test_cant_delete(self):
p = ProfileFactory()
self.client.force_authenticate(user=p.user)
url = reverse("user-detail", args=[p.user.username])
res = self.client.delete(url)
self.assertEqual(res.status_code, 405)
def test_weekly_solutions(self):
eight_days_ago = datetime.now() - timedelta(days=8)
# First one is a solution, but it is too old.
# second answer is not a solution.
SolutionAnswerFactory(created=eight_days_ago)
AnswerFactory()
res = self.client.get(reverse("user-weekly-solutions"))
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data), 0)
# Check that the data about the contributors is showing currectly
user_info_list = [] # Info list with username and their number of solutions
top_answer_number = 15
for i in range(12):
user = UserFactory()
SolutionAnswerFactory.create_batch(top_answer_number, creator=user)
user_info_list.append((user.username, top_answer_number))
top_answer_number -= 1
res = self.client.get(reverse("user-weekly-solutions"))
self.assertEqual(res.status_code, 200)
# Check only 10 users information is present there
self.assertEqual(len(res.data), 10)
# Create a list of the data with only the ``username`` and ``weekly_solutions``
data_list = [(data["username"], data["weekly_solutions"]) for data in res.data]
# Check only top 10 contributor information is in the API
top_ten = user_info_list[:10]
self.assertEqual(sorted(top_ten), sorted(data_list))
def test_email_visible_when_signed_in(self):
p = ProfileFactory()
url = reverse("user-detail", args=[p.user.username])
self.client.force_authenticate(user=p.user)
res = self.client.get(url)
self.assertEqual(res.data["email"], p.user.email)
def test_email_not_visible_when_signed_out(self):
p = ProfileFactory()
url = reverse("user-detail", args=[p.user.username])
res = self.client.get(url)
assert "email" not in res.data
def METHOD_NAME(self):
p = ProfileFactory()
p.settings.create(name="foo", value="bar")
url = reverse("user-detail", args=[p.user.username])
self.client.force_authenticate(user=p.user)
res = self.client.get(url)
self.assertEqual(res.data["settings"], [{"name": "foo", "value": "bar"}])
def test_settings_not_visible_when_signed_out(self):
p = ProfileFactory()
p.settings.create(name="foo", value="bar")
url = reverse("user-detail", args=[p.user.username])
res = self.client.get(url)
assert "settings" not in res.data
def test_is_active(self):
p = ProfileFactory()
url = reverse("user-detail", args=[p.user.username])
res = self.client.get(url)
assert "is_active" in res.data
|
3,986 |
reshapefunc
|
"""Defines the GLPluginInterface class, which is a unified base class for
plugins to the klampt.vis module.
"""
from typing import Callable,List,Tuple
from ..model.typing import Vector3
class GLPluginInterface:
"""Users can add their own hooks into the visualizer by overloading this
class's methods. Each method should return True if the user event was
processed. A return value of True stops cascading
events to a parent interface.
Attributes:
window (QtGLWindow or GLUTWindow): the window to which this plugin
is attached. May be None if not attached to an OpenGL window.
view (GLViewport): the viewport that is currently being used for this
plugin.
actions (list): internally used.
"""
def __init__(self):
self.window = None
self.view = None
self.actions = []
def initialize(self) -> bool:
"""Called by backend after the GL context is created but before the event loop starts."""
return True
def displayfunc(self) -> bool:
return False
def display(self) -> bool:
return False
def display_screen(self) -> bool:
return False
def METHOD_NAME(self,w,h) -> bool:
return False
def keyboardfunc(self,c,x,y) -> bool:
return False
def keyboardupfunc(self,c,x,y) -> bool:
return False
def mousefunc(self,button,state,x,y) -> bool:
return False
def motionfunc(self,x,y,dx,dy) -> bool:
return False
def idle(self) -> bool:
return True
def eventfunc(self,type,args="") -> bool:
"""Generic hook for other events, e.g., button presses, from the GUI"""
return False
def closefunc(self) -> bool:
return False
def add_action(self,callback : Callable, short_name : str, key : str,description : str=None) -> None:
"""Defines a new generic GUI action. The action will be available in
a menu in Qt or as keyboard commands in GLUT."""
if not callable(callback):
raise ValueError("Invalid callback given to add_action(callback,short_name,key,description=None)")
self.actions.append((callback,short_name,key,description))
#functions to request operations of the backend
def reshape(self,w,h) -> None:
"""Asks to resize the GL window"""
if self.window:
return self.window.reshape(w,h)
def idlesleep(self,seconds) -> None:
"""Asks to sleep the idle function for seconds seconds."""
if self.window:
self.window.idlesleep(seconds)
def modifiers(self) -> List[str]:
"""Retrieves a list of currently pressed keyboard modifiers.
Values can be any combination of 'ctrl', 'shift', 'alt'.
"""
return self.window.modifiers()
def refresh(self) -> None:
"""Asks for a redraw"""
if self.window:
self.window.refresh()
def draw_text(self, point, text : str, size=12, color=None) -> None:
"""Draws text of the given size and color at the point (x,y) or
(x,y,z). The former method is usually called during display_screen.
"""
if self.window:
self.window.draw_text(point,text,size,color)
#3D viewport accessors -- not supported directly through the backend
def click_ray(self,x,y) -> Tuple[Vector3,Vector3]:
"""Returns the world-space ray associated with the camera click at
pixel coordinates x,y."""
if self.view is None:
raise RuntimeError("Can't get click_ray for a GLPluginInterface that's not bound to a GLProgram")
return self.view.click_ray(x,y)
def viewport(self) -> 'Viewport':
"""Returns the :class:`~klampt.robotsim.Viewport` instance associated
with the current GL view. Used for interfacing with C++ widgets.
"""
if self.view is None:
raise RuntimeError("Can't get viewport for a GLPluginInterface that's not bound to a GLProgram")
return self.view.to_viewport()
|
3,987 |
test imgw hydrology api monthly
|
import datetime as dt
from zoneinfo import ZoneInfo
import polars as pl
import pytest
from polars.testing import assert_frame_equal
from wetterdienst.provider.imgw.hydrology.api import ImgwHydrologyRequest, ImgwHydrologyResolution
@pytest.fixture
def df_expected_station():
return pl.DataFrame(
{
"station_id": "150190130",
"from_date": None,
"to_date": None,
"height": None,
"latitude": 50.350278,
"longitude": 19.185556,
"name": "£AGISZA",
"state": None,
},
schema={
"station_id": pl.Utf8,
"from_date": pl.Datetime(time_zone="UTC"),
"to_date": pl.Datetime(time_zone="UTC"),
"height": pl.Float64,
"latitude": pl.Float64,
"longitude": pl.Float64,
"name": pl.Utf8,
"state": pl.Utf8,
},
)
def test_imgw_hydrology_api_daily(df_expected_station):
request = ImgwHydrologyRequest(
parameter="hydrology",
resolution=ImgwHydrologyResolution.DAILY,
start_date="2010-08-01",
).filter_by_station_id("150190130")
assert_frame_equal(request.df, df_expected_station)
values = request.values.all()
df_expected_values = pl.DataFrame(
[
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "stage",
"date": dt.datetime(2010, 8, 1, tzinfo=ZoneInfo("UTC")),
"value": 1.64,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "discharge",
"date": dt.datetime(2010, 8, 1, tzinfo=ZoneInfo("UTC")),
"value": 3.62,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "temperature_water",
"date": dt.datetime(2010, 8, 1, tzinfo=ZoneInfo("UTC")),
"value": None,
"quality": None,
},
],
schema={
"station_id": pl.Utf8,
"dataset": pl.Utf8,
"parameter": pl.Utf8,
"date": pl.Datetime(time_zone="UTC"),
"value": pl.Float64,
"quality": pl.Float64,
},
)
assert_frame_equal(values.df, df_expected_values)
def METHOD_NAME(df_expected_station):
request = ImgwHydrologyRequest(
parameter="hydrology",
resolution=ImgwHydrologyResolution.MONTHLY,
start_date="2010-06-01",
).filter_by_station_id("150190130")
assert_frame_equal(request.df, df_expected_station)
values = request.values.all()
df_expected_values = pl.DataFrame(
[
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "stage_min",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 1.49,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "stage_mean",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 1.99,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "stage_max",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 2.64,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "discharge_min",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 2.75,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "discharge_mean",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 8.36,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "discharge_max",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": 18.3,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "temperature_water_min",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": None,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "temperature_water_mean",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": None,
"quality": None,
},
{
"station_id": "150190130",
"dataset": "hydrology",
"parameter": "temperature_water_max",
"date": dt.datetime(2010, 6, 1, tzinfo=ZoneInfo("UTC")),
"value": None,
"quality": None,
},
],
schema={
"station_id": pl.Utf8,
"dataset": pl.Utf8,
"parameter": pl.Utf8,
"date": pl.Datetime(time_zone="UTC"),
"value": pl.Float64,
"quality": pl.Float64,
},
)
assert_frame_equal(values.df, df_expected_values)
|
3,988 |
libcurve extend
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from curvefs_python import curvefs
from config import config
from logger.logger import *
cbdClient = curvefs.CBDClient()
class LibCurve:
def __init__(self):
rc = cbdClient.Init(config.client_conf)
logger.info("init success.")
if rc != 0:
print ("init client fail! rc=%s" % rc)
logger.debug("init client fail! rc=%s" % rc)
raise AssertionError
def libcurve_create(self, file_path, user_name, size, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Create(file_path, user_info_t, size)
if rc != 0:
# print("create file %s fail! rc=%s" %(file_path,rc))
logger.debug("create file %s fail! rc=%s" % (file_path,rc))
return rc
#raise AssertionError
else:
return rc
def libcurve_open(self, file_path, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
fd = cbdClient.Open(file_path, user_info_t)
logger.info("fd=%s" % fd)
return fd
def libcurve_write(self, fd, buf, offset, length):
rc = cbdClient.Write(fd, buf, offset, length)
if rc < 0:
logger.debug("write error, rc=%s" % rc)
return rc
raise AssertionError
else:
return rc
def libcurve_read(self, fd, buf, offset, length):
content = cbdClient.Read(fd, buf, offset, length)
#logger.debug(content)
return content
def libcurve_statfs(self, file_name, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
file_info = curvefs.FileInfo_t()
rc = cbdClient.StatFile(file_name, user_info_t, file_info)
if rc == 0:
return file_info
else:
logger.debug("statfs file %s fail! rc=%s" % (file_name,rc))
return rc
raise AssertionError
def METHOD_NAME(self, file_path, user_name, new_size, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Extend(file_path, user_info_t, new_size)
if rc != 0:
logger.info("extend file fail. rc=%s" %rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_close(self, fd):
rc = cbdClient.Close(fd)
if rc != 0:
logger.info("close file fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_rename(self, user_name, old_path, new_path, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Rename(user_info_t, old_path, new_path)
if rc != 0:
logger.info("rename file fail! rc=%s" % rc)
return rc
raise AssertionError
else:
return rc
def libcurve_delete(self, filepath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Unlink(filepath, user_info_t)
if rc != 0:
#print "delete file fail! rc=%s" % rc
logger.debug("delete file %s fail! rc=%s" % (filepath,rc))
# logger.info("delete file fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_rmdir(self, dirpath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Rmdir(dirpath, user_info_t)
if rc != 0:
#print "delete dir fail! rc=%s" % rc
logger.info("delete dir fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_mkdir(self, dirpath, user_name, pass_word=""):
user_info_t = curvefs.UserInfo_t()
user_info_t.owner = user_name
user_info_t.password = pass_word
rc = cbdClient.Mkdir(dirpath, user_info_t)
if rc != 0:
#print "mkdir fail! rc=%s" % rc
logger.info("mkdir fail! rc=%s" % rc)
return rc
#raise AssertionError
else:
return rc
def libcurve_uninit():
rc = cbdClient.UnInit()
if rc != None:
print "uninit fail! rc=%s" % rc
logger.debug("uninit file fail! rc=%s" % rc)
return rc
raise AssertionError
else:
return 0
|
3,989 |
error
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# All files in doxy-coverage are Copyright 2014 Alvaro Lopez Ortega.
#
# Authors:
# * Alvaro Lopez Ortega <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
__author__ = "Alvaro Lopez Ortega"
__email__ = "[email protected]"
__copyright__ = "Copyright (C) 2014 Alvaro Lopez Ortega"
import os
import sys
import argparse
import xml.etree.ElementTree as ET
import functools
# Defaults
ACCEPTABLE_COVERAGE = 80
# Global
ns = None
def METHOD_NAME(*objs):
print("ERROR: ", *objs, end='\n', file=sys.stderr)
def FATAL(*objs):
METHOD_NAME (*objs)
sys.exit((1,0)[ns.noerror])
def parse_file(fullpath):
tree = ET.parse(fullpath)
sourcefile = None
definitions = {}
for definition in tree.findall("./compounddef//memberdef"):
# Should it be documented
if (definition.get('kind') == 'function' and
definition.get('static') == 'yes'):
continue
# Is the definition documented?
documented = False
for k in ('briefdescription', 'detaileddescription', 'inbodydescription'):
if definition.findall("./%s/"%(k)):
documented = True
break
# Name
d_def = definition.find('./definition')
d_nam = definition.find('./name')
if not sourcefile:
l = definition.find('./location')
if l is not None:
sourcefile = l.get('file')
if d_def is not None:
name = d_def.text
elif d_nam is not None:
name = d_nam.text
else:
name = definition.get('id')
# Aggregate
definitions[name] = documented
if not sourcefile:
sourcefile = fullpath
return (sourcefile, definitions)
def parse(path):
index_fp = os.path.join (path, "index.xml")
if not os.path.exists (index_fp):
FATAL ("Documentation not present. Exiting.", index_fp)
tree = ET.parse(index_fp)
files = {}
for entry in tree.findall('compound'):
if entry.get('kind') in ('dir'):
continue
file_fp = os.path.join (path, "%s.xml" %(entry.get('refid')))
#print ('parsing file: ', entry.get('refid'))
tmp = parse_file (file_fp)
files[tmp[0]] = tmp[1]
return files
def report (files, exclude_dirs):
def get_coverage (f):
defs = files[f]
if not defs:
return 100
doc_yes = len([d for d in defs.values() if d])
doc_no = len([d for d in defs.values() if not d])
return (doc_yes * 100.0 / (doc_yes + doc_no))
def file_cmp (a,b):
#return cmp(get_coverage(a), get_coverage(b))
return (get_coverage(a) < get_coverage(b))
#files_sorted = files.keys()
files_sorted = list(files.keys())
#files_sorted.sort(file_cmp)
#sorted(files_sorted, cmp=file_cmp, reverse=True)
sorted(files_sorted, key=functools.cmp_to_key(file_cmp), reverse=True)
#files_sorted.reverse()
total_yes = 0
total_no = 0
for f in files_sorted:
skip = False
for exclude_dir in exclude_dirs:
if exclude_dir in f:
skip = True
break
if skip:
continue
defs = files[f]
if not defs:
continue
doc_yes = len([d for d in defs.values() if d])
doc_no = len([d for d in defs.values() if not d])
doc_per = doc_yes * 100.0 / (doc_yes + doc_no)
total_yes += doc_yes
total_no += doc_no
print ('%3d%% - %s - (%d of %d)'%(doc_per, f, doc_yes, (doc_yes + doc_no)))
#defs_sorted = defs.keys()
defs_sorted = list(defs.keys())
defs_sorted.sort()
#sorted(defs_sorted)
for d in defs_sorted:
if not defs[d]:
print ("\t", d)
total_all = total_yes + total_no
total_per = total_yes * 100 / total_all
print()
print("%d%% API documentation coverage" %(total_per))
return (ns.threshold - total_per, 0)[total_per > ns.threshold]
def main():
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument ("dir", action="store", help="Path to Doxygen's XML doc directory")
parser.add_argument ("--noerror", action="store_true", help="Do not return error code after execution")
parser.add_argument ("--threshold", action="store", help="Min acceptable coverage percentage (Default: %s)"%(ACCEPTABLE_COVERAGE), default=ACCEPTABLE_COVERAGE, type=int)
parser.add_argument("--excludedirs", nargs='+', help="List of directories to be excluded from coverage analysis", type=str, default=[])
global ns
ns = parser.parse_args()
if not ns:
FATAL ("ERROR: Couldn't parse parameters")
# Parse
files = parse (ns.dir)
# Print report
err = report (files, ns.excludedirs)
if ns.noerror:
return
sys.exit(err)
if __name__ == "__main__":
main()
|
3,990 |
validate file extension
|
# coding=utf-8
"""Csv download model definition.
"""
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from bims.tasks.email_csv import send_csv_via_email
from bims.download.csv_download import (
send_rejection_csv,
send_new_csv_notification
)
def METHOD_NAME(value):
import os
ext = os.path.splitext(value.name)[1]
valid_extensions = ['.csv']
if ext not in valid_extensions:
raise ValidationError('File not supported!')
class DownloadRequestPurpose(models.Model):
name = models.CharField(
max_length=512,
blank=False,
null=False
)
description = models.TextField(
help_text='Optional',
null=True,
blank=True
)
order = models.IntegerField(
null=False,
blank=False
)
def __str__(self):
return self.name
class Meta:
ordering = ('order',)
class DownloadRequest(models.Model):
"""Download request model
"""
CSV = 'CSV'
CHART = 'CHART'
TABLE = 'TABLE'
IMAGE = 'IMAGE'
RESOURCE_TYPE_CHOICES = [
(CSV, 'Csv'),
(CHART, 'Chart'),
(TABLE, 'Table'),
(IMAGE, 'Image')
]
requester = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
related_name='download_request_requester',
blank=True,
null=True,
)
request_date = models.DateTimeField(
default=datetime.now
)
resource_type = models.CharField(
max_length=10,
choices=RESOURCE_TYPE_CHOICES,
default=CSV
)
resource_name = models.CharField(
max_length=256,
default='',
blank=True
)
taxon = models.ForeignKey(
'bims.Taxonomy',
null=True,
blank=True,
on_delete=models.SET_NULL
)
location_site = models.ForeignKey(
'bims.LocationSite',
null=True,
blank=True,
on_delete=models.SET_NULL
)
survey = models.ForeignKey(
'bims.Survey',
null=True,
blank=True,
on_delete=models.SET_NULL
)
purpose = models.ForeignKey(
'bims.DownloadRequestPurpose',
null=True,
blank=True,
on_delete=models.CASCADE
)
dashboard_url = models.TextField(
null=True,
blank=True
)
request_file = models.FileField(
upload_to='request-files/',
help_text='Only csv file',
null=True,
max_length=300,
validators=[METHOD_NAME]
)
notes = models.TextField(
null=True,
blank=True
)
processing = models.BooleanField(
default=True
)
approved = models.BooleanField(
default=False
)
request_category = models.CharField(
max_length=256,
default=''
)
rejected = models.BooleanField(
default=False
)
rejection_message = models.TextField(
null=True,
blank=True
)
progress = models.CharField(
max_length=255,
null=True,
blank=True
)
def get_formatted_name(self):
"""Return author formated full name, e.g. Maupetit J"""
if not self.requester:
return '-'
if self.requester.first_name or self.requester.last_name:
return '%s %s' % (
self.requester.first_name, self.requester.last_name)
return self.requester.username
# noinspection PyClassicStyleClass
class Meta:
"""Meta class for project."""
app_label = 'bims'
verbose_name_plural = 'Download requests'
verbose_name = 'Download request'
ordering = ('-request_date',)
def __str__(self):
return '{requester} - {date} - {category}'.format(
requester=self.requester,
date=self.request_date.strftime('%H:%M:%S'),
category=self.request_category
)
def save(self, *args, **kwargs):
old_obj = None
if (
not self.requester or
self.resource_type != DownloadRequest.CSV
):
super(DownloadRequest, self).save(*args, **kwargs)
return
if self.id:
old_obj = DownloadRequest.objects.get(id=self.id)
if old_obj and not self.processing and not self.rejected:
if self.approved and self.approved != old_obj.approved:
# send email
send_csv_via_email.delay(
self.requester.id,
self.request_file.path,
self.request_category,
approved=True
)
elif self.rejected and not self.approved and not self.processing:
send_rejection_csv(
self.requester,
self.rejection_message
)
super(DownloadRequest, self).save(*args, **kwargs)
|
3,991 |
neighbors
|
from __future__ import annotations
import heapq
import itertools
import math
import random
from river import base
class Vertex(base.Base):
_isolated: set[Vertex] = set()
def __init__(self, item, uuid: int) -> None:
self.item = item
self.uuid = uuid
self.edges: dict[Vertex, float] = {}
self.r_edges: dict[Vertex, float] = {}
self.flags: set[Vertex] = set()
self.worst_edge: Vertex | None = None
def __hash__(self) -> int:
return self.uuid
def __eq__(self, other) -> bool:
if not isinstance(other, Vertex):
raise NotImplementedError
return self.uuid == other.uuid
def __lt__(self, other) -> bool:
if not isinstance(other, Vertex):
raise NotImplementedError
return self.uuid < other.uuid
def farewell(self):
for rn in list(self.r_edges):
rn.rem_edge(self)
for n in list(self.edges):
self.rem_edge(n)
self.flags = None
self.worst_edge = None
Vertex._isolated.discard(self)
def fill(self, METHOD_NAME: list[Vertex], dists: list[float]):
for n, dist in zip(METHOD_NAME, dists):
self.edges[n] = dist
self.flags.add(n)
n.r_edges[self] = dist
# Neighbors are ordered by distance
self.worst_edge = n
def add_edge(self, vertex: Vertex, dist):
self.edges[vertex] = dist
self.flags.add(vertex)
vertex.r_edges[self] = dist
if self.worst_edge is None or self.edges[self.worst_edge] < dist:
self.worst_edge = vertex
def rem_edge(self, vertex: Vertex):
self.edges.pop(vertex)
vertex.r_edges.pop(self)
self.flags.discard(vertex)
if self.has_neighbors():
if vertex == self.worst_edge:
self.worst_edge = max(self.edges, key=self.edges.get) # type: ignore
else:
self.worst_edge = None
if not self.has_rneighbors():
Vertex._isolated.add(self)
def push_edge(self, node: Vertex, dist: float, max_edges: int) -> int:
if self.is_neighbor(node) or node == self:
return 0
if len(self.edges) >= max_edges:
if self.worst_edge is None or self.edges.get(self.worst_edge, math.inf) <= dist:
return 0
self.rem_edge(self.worst_edge)
self.add_edge(node, dist)
return 1
def is_neighbor(self, vertex):
return vertex in self.edges or vertex in self.r_edges
def get_edge(self, vertex: Vertex):
if vertex in self.edges:
return self, vertex, self.edges[vertex]
return vertex, self, self.r_edges[vertex]
def has_neighbors(self) -> bool:
return len(self.edges) > 0
def has_rneighbors(self) -> bool:
return len(self.r_edges) > 0
@property
def sample_flags(self):
return list(map(lambda n: n in self.flags, self.edges.keys()))
@sample_flags.setter
def sample_flags(self, sampled):
self.flags -= set(sampled)
def METHOD_NAME(self) -> tuple[list[Vertex], list[float]]:
res = tuple(map(list, zip(*((node, dist) for node, dist in self.edges.items()))))
return res if len(res) > 0 else ([], []) # type: ignore
def r_neighbors(self) -> tuple[list[Vertex], list[float]]:
res = tuple(map(list, zip(*((vertex, dist) for vertex, dist in self.r_edges.items()))))
return res if len(res) > 0 else ([], []) # type: ignore
def all_neighbors(self):
return set.union(set(self.edges.keys()), set(self.r_edges.keys()))
def is_isolated(self):
return len(self.edges) == 0 and len(self.r_edges) == 0
def prune(self, prune_prob: float, prune_trigger: int, rng: random.Random):
if prune_prob == 0:
return
total_degree = len(self.edges) + len(self.r_edges)
if total_degree <= prune_trigger:
return
# To avoid tie in distances
counter = itertools.count()
edge_pool: list[tuple[float, int, Vertex, bool]] = []
for n, dist in self.edges.items():
heapq.heappush(edge_pool, (dist, next(counter), n, True))
for rn, dist in self.r_edges.items():
heapq.heappush(edge_pool, (dist, next(counter), rn, False))
# Start with the best undirected edge
selected: list[Vertex] = [heapq.heappop(edge_pool)[2]]
while len(edge_pool) > 0:
c_dist, _, c, c_isdir = heapq.heappop(edge_pool)
discarded = False
for s in selected:
if s.is_neighbor(c) and rng.random() < prune_prob:
orig, dest, dist = s.get_edge(c)
if dist < c_dist:
if c_isdir:
self.rem_edge(c)
else:
c.rem_edge(self)
discarded = True
break
else:
orig.rem_edge(dest)
if not discarded:
selected.append(c)
|
3,992 |
get value text
|
# (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
**WARNING**
This demo might not work as expected and some documented features might be
missing.
-------------------------------------------------------------------------------
Another demo showing how to use a TabularEditor to create a multi-select list
box. This demo creates a reusable StringListEditor class and uses that instead
of defining the editor as part of the demo class.
This approach greatly simplifies the actual demo class and shows how to
construct a reusable Traits UI-based editor that can be used in other
applications.
"""
# Issue related to the demo warning: enthought/traitsui#960
from traits.api import HasPrivateTraits, List, Str, Property, observe
from traits.etsconfig.api import ETSConfig
from traitsui.api import (
BasicEditorFactory,
HGroup,
Item,
TabularAdapter,
TabularEditor,
View,
)
if ETSConfig.toolkit == 'wx':
from traitsui.wx.ui_editor import UIEditor
else:
from traitsui.qt.ui_editor import UIEditor
# -- Define the reusable StringListEditor class and its helper classes --------
# Define the tabular adapter used by the Traits UI string list editor:
class MultiSelectAdapter(TabularAdapter):
# The columns in the table (just the string value):
columns = [('Value', 'value')]
# The text property used for the 'value' column:
value_text = Property()
def METHOD_NAME(self):
return self.item
# Define the actual Traits UI string list editor:
class _StringListEditor(UIEditor):
# Indicate that the editor is scrollable/resizable:
scrollable = True
# The list of available editor choices:
choices = List(Str)
# The list of currently selected items:
selected = List(Str)
# The traits UI view used by the editor:
traits_view = View(
Item(
'choices',
show_label=False,
editor=TabularEditor(
show_titles=False,
selected='selected',
editable=False,
multi_select=True,
adapter=MultiSelectAdapter(),
),
),
id='string_list_editor',
resizable=True,
)
def init_ui(self, parent):
self.sync_value(self.factory.choices, 'choices', 'from', is_list=True)
self.selected = self.value
return self.edit_traits(parent=parent, kind='subpanel')
@observe('selected')
def _selected_modified(self, event):
self.value = self.selected
# Define the StringListEditor class used by client code:
class StringListEditor(BasicEditorFactory):
# The editor implementation class:
klass = _StringListEditor
# The extended trait name containing the editor's set of choices:
choices = Str()
# -- Define the demo class ----------------------------------------------------
class MultiSelect(HasPrivateTraits):
"""This class demonstrates using the StringListEditor to select a set
of string values from a set of choices.
"""
# The list of choices to select from:
choices = List(Str)
# The currently selected list of choices:
selected = List(Str)
# A dummy result so that we can display the selection using the same
# StringListEditor:
result = List(Str)
# A traits view showing the list of choices on the left-hand side, and
# the currently selected choices on the right-hand side:
traits_view = View(
HGroup(
Item(
'selected',
show_label=False,
editor=StringListEditor(choices='choices'),
),
Item(
'result',
show_label=False,
editor=StringListEditor(choices='selected'),
),
),
width=0.20,
height=0.25,
)
# Create the demo:
demo = MultiSelect(
choices=[
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
],
selected=['two', 'five', 'nine'],
)
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
3,993 |
check permissions
|
import uuid
import os
import csv
from sqlalchemy.exc import IntegrityError
from flask import request, current_app
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from zou.app.mixin import ArgsMixin
from zou.app import app
from zou.app.utils import permissions
from zou.app.services import user_service, projects_service
class ImportRowException(Exception):
message = ""
line_number = 0
def __init__(self, message, line_number):
Exception.__init__(self, message)
self.message = message
self.line_number = line_number
class RowException(Exception):
message = ""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class BaseCsvImportResource(Resource, ArgsMixin):
@jwt_required()
def post(self):
"""
Import persons as csv.
---
tags:
- Import
consumes:
- multipart/form-data
properties:
- in: formData
name: file
type: file
required: true
responses:
201:
description: Persons imported
400:
description: Format error
"""
uploaded_file = request.files["file"]
file_name = "%s.csv" % uuid.uuid4()
file_path = os.path.join(app.config["TMP_DIR"], file_name)
uploaded_file.save(file_path)
self.is_update = self.get_bool_parameter("update")
try:
result = self.run_import(file_path)
return result, 201
except ImportRowException as e:
current_app.logger.error(
"Import row %s failed: %s" % (e.line_number, e.message)
)
return self.format_row_error(e), 400
except csv.Error as e:
current_app.logger.error("Import failed: %s" % e)
return self.format_error(e), 400
def format_row_error(self, exception):
return {
"error": True,
"message": exception.message,
"line_number": exception.line_number,
}
def format_error(self, exception):
return {"error": True, "message": str(exception)}
def run_import(self, file_path):
result = []
self.METHOD_NAME()
self.prepare_import()
with open(file_path) as csvfile:
reader = csv.DictReader(csvfile, dialect=self.get_dialect(csvfile))
for row in reader:
row = self.import_row(row)
result.append(row)
return result
def get_dialect(self, csvfile):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(csvfile.read())
csvfile.seek(0)
return dialect
def prepare_import(self):
pass
def METHOD_NAME(self):
return permissions.check_manager_permissions()
def import_row(self):
pass
def add_to_cache_if_absent(self, cache, retrieve_function, name):
if name not in cache:
cache[name] = retrieve_function(name)
return cache[name]
def get_id_from_cache(self, cache, name):
cached_object = cache[name]
if isinstance(cached_object, dict):
return cached_object["id"]
else:
return cached_object.id
class BaseCsvProjectImportResource(BaseCsvImportResource, ArgsMixin):
@jwt_required()
def post(self, project_id, **kwargs):
uploaded_file = request.files["file"]
file_name = "%s.csv" % uuid.uuid4()
file_path = os.path.join(app.config["TMP_DIR"], file_name)
uploaded_file.save(file_path)
self.is_update = self.get_bool_parameter("update")
try:
result = self.run_import(project_id, file_path, **kwargs)
return result, 201
except ImportRowException as e:
current_app.logger.error(
"Import row %s failed: %s" % (e.line_number, e.message)
)
return self.format_row_error(e), 400
except csv.Error as e:
current_app.logger.error("Import failed: %s" % e)
return self.format_error(e), 400
def run_import(self, project_id, file_path, **kwargs):
result = []
self.check_project_permissions(project_id)
self.prepare_import(project_id, **kwargs)
with open(file_path) as csvfile:
reader = csv.DictReader(csvfile, dialect=self.get_dialect(csvfile))
line_number = 1
for row in reader:
try:
row = self.import_row(row, project_id, **kwargs)
result.append(row)
except IntegrityError as e:
raise ImportRowException(e._message(), line_number)
except RowException as e:
raise ImportRowException(e.message, line_number)
except KeyError as e:
raise ImportRowException(
"A columns is missing: %s" % e.args, line_number
)
except Exception as e:
raise ImportRowException(str(e), line_number)
line_number += 1
return result
def check_project_permissions(self, project_id):
return user_service.check_manager_project_access(project_id)
def import_row(self, project_id, **kwargs):
pass
def get_descriptor_field_map(self, project_id, entity_type):
descriptor_map = {}
descriptors = projects_service.get_metadata_descriptors(project_id)
for descriptor in descriptors:
if descriptor["entity_type"] == entity_type:
descriptor_map[descriptor["name"]] = descriptor["field_name"]
return descriptor_map
|
3,994 |
test list inside union default
|
# (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.api import (
Bytes, DefaultValue, Float, HasTraits, Instance, Int, List, Str,
TraitError, TraitType, Type, Union)
class CustomClass(HasTraits):
value = Int
class CustomStrType(TraitType):
#: The default value type to use.
default_value_type = DefaultValue.constant
#: The default value.
default_value = "a string value"
def validate(self, obj, name, value):
if not isinstance(value, Str):
return value
self.error(obj, name, value)
class TestUnion(unittest.TestCase):
def test_union_incompatible_trait(self):
with self.assertRaises(ValueError) as exception_context:
Union(Str(), "none")
self.assertEqual(
str(exception_context.exception),
"Union trait declaration expects a trait type or an instance of "
"trait type or None, but got 'none' instead"
)
def test_list_trait_types(self):
class TestClass(HasTraits):
int_or_str_type = Union(Type, Int, Str)
TestClass(int_or_str_type=3)
TestClass(int_or_str_type="3.5")
with self.assertRaises(TraitError):
TestClass(int_or_str_type=3.5)
with self.assertRaises(TraitError):
TestClass(int_or_str_type=Int(3))
def test_malformed_declaration(self):
with self.assertRaises(ValueError):
class TestClass(HasTraits):
a = Union(int, Float)
TestClass(a=2.4)
with self.assertRaises(ValueError):
class TestClass(HasTraits):
a = Union([1, 2], Float)
TestClass(a=2.4)
def test_list_trait_instances(self):
class TestClass(HasTraits):
float_or_str_obj = Union(Instance(Float), Instance(Str))
TestClass(float_or_str_obj=Float(3.5))
TestClass(float_or_str_obj=Str("3.5"))
with self.assertRaises(TraitError):
TestClass(float_or_str_obj=Float)
with self.assertRaises(TraitError):
TestClass(float_or_str_obj=3.5)
def test_union_with_none(self):
class TestClass(HasTraits):
int_or_none = Union(None, Int)
TestClass(int_or_none=None)
def test_union_unspecified_arguments(self):
class TestClass(HasTraits):
none = Union()
TestClass(none=None)
def test_default_value(self):
class TestClass(HasTraits):
atr = Union(Int(3), Float(4.1), Str("Something"))
self.assertEqual(TestClass().atr, 3)
class TestClass(HasTraits):
atr = Union(
Int(3), Float(4.1), Str("Something"),
default_value="XYZ",
)
self.assertEqual(TestClass().atr, "XYZ")
class TestClass(HasTraits):
atr = Union()
self.assertEqual(TestClass().atr, None)
class TestClass(HasTraits):
atr = Union(None)
self.assertEqual(TestClass().atr, None)
def test_default_raise_error(self):
# If 'default' is defined, it could be caused by migration from
# ``Either``. Raise an error to aid migrations from ``Either``
# to ``Union``
with self.assertRaises(ValueError) as exception_context:
Union(Int(), Float(), default=1.0)
self.assertEqual(
str(exception_context.exception),
"Union default value should be set via 'default_value', not "
"'default'."
)
def test_inner_traits(self):
class TestClass(HasTraits):
atr = Union(Float, Int, Str)
obj = TestClass()
t1, t2, t3 = obj.trait('atr').inner_traits
self.assertEqual(type(t1.trait_type), Float)
self.assertEqual(type(t2.trait_type), Int)
self.assertEqual(type(t3.trait_type), Str)
def test_union_user_defined_class(self):
class TestClass(HasTraits):
obj = Union(Instance(CustomClass), Int)
TestClass(obj=CustomClass(value=5))
TestClass(obj=5)
with self.assertRaises(TraitError):
TestClass(obj=CustomClass)
def test_union_user_defined_type(self):
class TestClass(HasTraits):
type_value = Union(CustomStrType, Int)
TestClass(type_value="new string")
def test_notification(self):
class TestClass(HasTraits):
union_attr = Union(Int)
shadow_union_trait = None
def _union_attr_changed(self, new):
self.shadow_union_trait = new
obj = TestClass(union_attr=-1)
obj.union_attr = 1
self.assertEqual(obj.shadow_union_trait, 1)
def test_extending_union_trait(self):
class UnionAllowStr(Union):
def validate(self, obj, name, value):
if isinstance(value, str):
return value
return super().validate(obj, name, value)
class TestClass(HasTraits):
s = UnionAllowStr(Int, Float)
TestClass(s="sdf")
def METHOD_NAME(self):
class HasUnionWithList(HasTraits):
foo = Union(List(Int), Str)
has_union = HasUnionWithList()
value = has_union.foo
self.assertIsInstance(value, list)
with self.assertRaises(TraitError):
value.append("not an integer")
def test_constant_default(self):
# Exercise the branch where the default is constant.
class HasUnionWithList(HasTraits):
foo = Union(Int(23), Float)
nested = Union(Union(Str(), Bytes()), Union(Int(), Float(), None))
has_union = HasUnionWithList()
value = has_union.foo
self.assertEqual(value, 23)
self.assertEqual(
has_union.trait("foo").default_value(),
(DefaultValue.constant, 23),
)
self.assertEqual(
has_union.trait("nested").default_value(),
(DefaultValue.constant, ""),
)
|
3,995 |
update params
|
"""Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def _loss_fn(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_clip = hyperparameters.grad_clip
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
scaled_grad = jax.tree_map(
lambda x: x / (grad_norm + _GRAD_CLIP_EPS) * grad_clip, grad)
grad = jax.lax.cond(grad_norm > grad_clip,
lambda _: scaled_grad,
lambda _: grad,
None)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def METHOD_NAME(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = get_learning_rate(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'train_step_ctc_loss': loss.mean(),
'grad_norm': grad_norm.mean(),
'learning_rate': lr,
},
global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
3,996 |
periodical task
|
# -*- coding: utf-8 -*-
import threading
from functools import wraps
from ..helpers import Periodical, is_sequence
from .plugin import BasePlugin
def threaded(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
return self.pyload.adm.start_thread(func, self, *args, **kwargs)
return wrapper
# NOTE: Performance penalty than original 'Expose' class decorator :(
def expose(func):
"""
Used for decoration to declare rpc services.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if not wrapper._exposed:
self.pyload.adm.add_rpc(func.__module__, func.__name__, func.__doc__)
wrapper._exposed = True
return func(self, *args, **kwargs)
wrapper._exposed = False
return wrapper
class BaseAddon(BasePlugin):
__name__ = "BaseAddon"
__type__ = "addon"
__version__ = "0.56"
__status__ = "stable"
__description__ = """Base addon plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
def __init__(self, core, manager):
self._init(core)
#: `AddonManager`
self.m = self.manager = manager
self.lock = threading.Lock()
#: Automatically register event listeners for functions, attribute will be deleted dont use it yourself
self.event_map = {}
#: Callback of periodical job task, used by AddonManager
self.periodical = Periodical(self, self.METHOD_NAME)
self.cb = self.periodical.cb # TODO: Recheck in 0.6.x
self.init()
self._init_events() # TODO: Remove in 0.6.x
self.init_events()
@property
def activated(self):
"""
Checks if addon is activated.
"""
return self.config.get("enabled")
# TODO: Remove in 0.6.x
def _log(self, level, plugintype, pluginname, args, kwargs):
plugintype = "addon" if plugintype == "addon" else plugintype
return super()._log(level, plugintype, pluginname, args, kwargs)
# TODO: Remove in 0.6.x
def _init_events(self):
event_map = {
"all_downloads_finished": "all_downloads_finished",
"all_downloads_processed": "all_downloads_processed",
"config_changed": "config_changed",
"download_processed": "download_processed",
"download_start": "download_start",
"links_added": "links_added",
"package_deleted": "package_deleted",
"package_failed": "package_failed",
"package_processed": "package_processed",
}
for event, funcs in event_map.items():
self.m.add_event(event, getattr(self, funcs))
def init_events(self):
if self.event_map:
for event, funcs in self.event_map.items():
if not is_sequence(funcs):
funcs = [funcs]
for fn in funcs:
self.m.add_event(event, getattr(self, fn))
#: Delete for various reasons
self.event_map = None
def METHOD_NAME(self):
raise NotImplementedError
#: Deprecated method, use `enabled` property instead (Remove in 0.6.x)
def is_activated(self):
return self.activated
def deactivate(self):
"""
Called when addon was deactivated.
"""
pass
#: Deprecated method, use `deactivate` instead (Remove in 0.6.x)
def unload(self):
self.db.store("info", self.info)
return self.deactivate()
def activate(self):
"""
Called when addon was activated.
"""
pass
#: Deprecated method, use `activate` instead (Remove in 0.6.x)
def core_ready(self):
self.db.retrieve("info", self.info)
return self.activate()
def exit(self):
"""
Called by core.shutdown just before pyLoad exit.
"""
pass
#: Deprecated method, use `exit` instead (Remove in 0.6.x)
def core_exiting(self):
self.unload() # TODO: Fix in 0.6.x
return self.exit()
def config_changed(self, category, option, value, section):
pass
def all_downloads_finished(self):
pass
def all_downloads_processed(self):
pass
def links_added(self, urls, pypack):
pass
def download_preparing(self, pyfile):
pass
#: Deprecated method, use `download_preparing` instead (Remove in 0.6.x)
# def download_preparing(self, pyfile):
# if pyfile.plugin.req is not None: # TODO: Remove in 0.6.x
# return self.download_preparing(pyfile)
def download_start(self, pyfile, url, filename):
pass
def download_processed(self, pyfile):
pass
def download_finished(self, pyfile):
pass
def download_failed(self, pyfile):
pass
def package_processed(self, pypack):
pass
def package_deleted(self, pid):
pass
def package_failed(self, pypack):
pass
def package_finished(self, pypack):
pass
def before_reconnect(self, ip):
pass
def after_reconnect(self, ip, old_ip):
pass
def captcha_task(self, task):
"""
New captcha task for the plugin, it MUST set the handler and timeout or will be
ignored.
"""
pass
def captcha_correct(self, task):
pass
def captcha_invalid(self, task):
pass
|
3,997 |
constructor
|
import copy
import inspect
from typing import Any, Callable, Dict, Generic, Optional, Type, TypeVar, Union, cast
from .det_hash import CustomDetHash, DetHashWithVersion
from .params import Params
T = TypeVar("T")
class Lazy(Generic[T], CustomDetHash):
"""
This class is for use when constructing objects using :class:`~tango.common.FromParams`,
when an argument to a constructor has a `sequential dependency` with another argument to the same
constructor.
For example, in a ``Trainer`` class you might want to take a ``Model`` and an ``Optimizer`` as arguments,
but the ``Optimizer`` needs to be constructed using the parameters from the ``Model``. You can give
the type annotation ``Lazy[Optimizer]`` to the optimizer argument, then inside the constructor
call ``optimizer.construct(parameters=model.parameters)``.
This is only recommended for use when you have registered a ``@classmethod`` as the constructor
for your class, instead of using ``__init__``. Having a ``Lazy[]`` type annotation on an argument
to an ``__init__`` method makes your class completely dependent on being constructed using the
``FromParams`` pipeline, which is not a good idea.
The actual implementation here is incredibly simple; the logic that handles the lazy
construction is actually found in ``FromParams``, where we have a special case for a ``Lazy`` type
annotation.
Examples
--------
::
@classmethod
def my_constructor(
cls,
some_object: Lazy[MyObject],
optional_object: Lazy[MyObject] = None,
# or:
# optional_object: Optional[Lazy[MyObject]] = None,
optional_object_with_default: Optional[Lazy[MyObject]] = Lazy(MyObjectDefault),
required_object_with_default: Lazy[MyObject] = Lazy(MyObjectDefault),
) -> MyClass:
obj1 = some_object.construct()
obj2 = None if optional_object is None else optional_object.construct()
obj3 = None optional_object_with_default is None else optional_object_with_default.construct()
obj4 = required_object_with_default.construct()
"""
def __init__(
self,
METHOD_NAME: Union[Type[T], Callable[..., T]],
params: Optional[Params] = None,
constructor_extras: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
self._constructor = METHOD_NAME
self._params = params or Params({})
self._constructor_extras = constructor_extras or {}
self._constructor_extras.update(kwargs)
@property
def METHOD_NAME(self) -> Callable[..., T]:
from tango.common.from_params import FromParams
if inspect.isclass(self._constructor) and issubclass(self._constructor, FromParams):
def constructor_to_use(**kwargs):
return self._constructor.from_params( # type: ignore[union-attr]
copy.deepcopy(self._params),
**kwargs,
)
return constructor_to_use
else:
return self._constructor
def construct(self, **kwargs) -> T:
"""
Call the constructor to create an instance of ``T``.
"""
# If there are duplicate keys between self._constructor_extras and kwargs,
# this will overwrite the ones in self._constructor_extras with what's in kwargs.
constructor_kwargs = {**self._constructor_extras, **kwargs}
return self.METHOD_NAME(**constructor_kwargs)
def det_hash_object(self) -> Any:
from tango.common.from_params import FromParams
class_to_construct: Union[Type[T], Callable[..., T]] = self._constructor
if isinstance(class_to_construct, type) and issubclass(class_to_construct, FromParams):
params = copy.deepcopy(self._params)
if params is None:
params = Params({})
elif isinstance(params, str):
params = Params({"type": params})
elif isinstance(params, dict):
params = Params(params)
elif not isinstance(params, Params):
return None
from tango.common import Registrable
if issubclass(class_to_construct, Registrable):
as_registrable = cast(Type[Registrable], class_to_construct)
if "type" in params and params["type"] not in as_registrable.list_available():
as_registrable.search_modules(params["type"])
# Resolve the subclass and constructor.
from .from_params import is_base_registrable
if is_base_registrable(class_to_construct) or "type" in params:
default_to_first_choice = as_registrable.default_implementation is not None
choice = params.pop_choice(
"type",
choices=as_registrable.list_available(),
default_to_first_choice=default_to_first_choice,
)
subclass_or_factory_func, _ = as_registrable.resolve_class_name(choice)
if inspect.isclass(subclass_or_factory_func):
class_to_construct = subclass_or_factory_func
else:
# We have a function that returns an instance of the class.
factory_func = cast(Callable[..., T], subclass_or_factory_func)
return_type = inspect.signature(factory_func).return_annotation
if return_type != inspect.Signature.empty:
class_to_construct = return_type
if isinstance(class_to_construct, type) and issubclass(
class_to_construct, DetHashWithVersion
):
return class_to_construct.VERSION, self
else:
return self
|
3,998 |
check host name
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .db import Database
from .configFileOps import configFileOps
from .serviceConfig import serviceCfgBase
from .cloudException import CloudRuntimeException, CloudInternalException
from .utilities import bash
import os
class cloudManagementConfig(serviceCfgBase):
def __init__(self, syscfg):
super(cloudManagementConfig, self).__init__(syscfg)
self.serviceName = "CloudStack Management Server"
def config(self):
def METHOD_NAME():
ret = bash("hostname --fqdn")
if not ret.isSuccess():
raise CloudInternalException("Cannot get hostname, 'hostname --fqdn failed'")
if self.syscfg.env.svrMode == "mycloud":
cfo = configFileOps("/usr/share/cloudstack-management/conf/environment.properties", self)
cfo.addEntry("cloud-stack-components-specification", "components-cloudzones.xml")
cfo.save()
cfo = configFileOps("/usr/share/cloudstack-management/conf/db.properties", self)
dbHost = cfo.getEntry("db.cloud.host")
dbPort = cfo.getEntry("db.cloud.port")
dbUser = cfo.getEntry("db.cloud.username")
dbPass = cfo.getEntry("db.cloud.password")
if dbPass.strip() == "":
dbPass = None
dbName = cfo.getEntry("db.cloud.name")
db = Database(dbUser, dbPass, dbHost, dbPort, dbName)
try:
db.testConnection()
except CloudRuntimeException as e:
raise e
except:
raise CloudInternalException("Failed to connect to Mysql server")
try:
statement = """ UPDATE configuration SET value='%s' WHERE name='%s'"""
db.execute(statement%('true','use.local.storage'))
db.execute(statement%('20','max.template.iso.size'))
statement = """ UPDATE vm_template SET url='%s',checksum='%s' WHERE id='%s' """
db.execute(statement%('https://rightscale-cloudstack.s3.amazonaws.com/kvm/RightImage_CentOS_5.4_x64_v5.6.28.qcow2.bz2', '90fcd2fa4d3177e31ff296cecb9933b7', '4'))
statement="""UPDATE disk_offering set use_local_storage=1"""
db.execute(statement)
except:
raise e
#add DNAT 443 to 8250
if not bash("iptables-save |grep PREROUTING | grep 8250").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 8250 ")
elif self.syscfg.env.svrMode == "HttpsServer":
if not bash("iptables-save |grep PREROUTING | grep 8443").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 8443")
bash("touch /var/run/cloudstack-management.pid")
bash("chown cloud.cloud /var/run/cloudstack-management.pid")
METHOD_NAME()
bash("mkdir -p /var/lib/cloudstack/")
bash("chown cloud:cloud -R /var/lib/cloudstack/")
#set max process per account is unlimited
if os.path.exists("/etc/security/limits.conf"):
cfo = configFileOps("/etc/security/limits.conf")
cfo.add_lines("cloud soft nproc -1\n")
cfo.add_lines("cloud hard nproc -1\n")
cfo.save()
if self.syscfg.env.noStart == False:
self.syscfg.svo.stopService("cloudstack-management")
if self.syscfg.svo.enableService("cloudstack-management"):
return True
else:
raise CloudRuntimeException("Failed to configure %s, please see the /var/log/cloudstack/management/setupManagement.log for detail"%self.serviceName)
else:
print("Configured successfully, but not starting management server.")
return True
|
3,999 |
test read earlinet get file list error
|
from __future__ import annotations
from pathlib import Path
import numpy as np
import pytest
from pyaerocom import VerticalProfile, const
from pyaerocom.io.read_earlinet import ReadEarlinet
from tests.conftest import TEST_RTOL
ROOT: str = const.OBSLOCS_UNGRIDDED["Earlinet-test"]
TEST_FILES: list[str] = [
f"{ROOT}/ev/ev1008192050.e532",
f"{ROOT}/ev/ev1009162031.e532",
f"{ROOT}/ev/ev1012131839.e532",
f"{ROOT}/ev/ev1011221924.e532",
f"{ROOT}/ev/ev1105122027.e532",
f"{ROOT}/ms/ms1005242029.e355",
]
def test_all_files_exist():
for file in TEST_FILES:
assert Path(file).exists()
@pytest.mark.parametrize(
"num,vars_to_retrieve",
[
(0, "ec532aer"),
(0, ["ec532aer", "zdust"]),
(0, ReadEarlinet.PROVIDES_VARIABLES),
(1, ReadEarlinet.PROVIDES_VARIABLES),
(2, ReadEarlinet.PROVIDES_VARIABLES),
(3, ReadEarlinet.PROVIDES_VARIABLES),
(4, ReadEarlinet.PROVIDES_VARIABLES),
(5, ReadEarlinet.PROVIDES_VARIABLES),
],
)
def test_ReadEarlinet_read_file(num: int, vars_to_retrieve: list[str]):
read = ReadEarlinet()
read.files = paths = TEST_FILES
stat = read.read_file(paths[num], vars_to_retrieve)
assert "data_level" in stat
assert "wavelength_det" in stat
assert "has_zdust" in stat
assert "eval_method" in stat
if num != 0:
return
assert "ec532aer" in stat.var_info
assert stat.var_info["ec532aer"]["unit_ok"]
assert stat.var_info["ec532aer"]["err_read"]
assert stat.var_info["ec532aer"]["outliers_removed"]
ec532aer = stat.ec532aer
assert isinstance(ec532aer, VerticalProfile)
assert len(ec532aer.data) == 253
assert np.sum(np.isnan(ec532aer.data)) == 216
assert np.nanmean(ec532aer.data) == pytest.approx(4.463068618148296, rel=TEST_RTOL)
assert np.nanstd(ec532aer.data) == pytest.approx(1.8529271228530515, rel=TEST_RTOL)
assert np.nanmean(ec532aer.data_err) == pytest.approx(4.49097234883772, rel=TEST_RTOL)
assert np.nanstd(ec532aer.data_err) == pytest.approx(0.8332285038985179, rel=TEST_RTOL)
assert np.min(ec532aer.altitude) == pytest.approx(331.29290771484375, rel=TEST_RTOL)
assert np.max(ec532aer.altitude) == pytest.approx(7862.52490234375, rel=TEST_RTOL)
@pytest.mark.parametrize(
"vars_to_retrieve,error",
[
("invalidvar", "invalidvar is not supported"),
("od550aer", "od550aer is not supported"),
],
)
def test_ReadEarlinet_read_file_error(vars_to_retrieve: str, error: str):
read = ReadEarlinet()
read.files = paths = TEST_FILES
with pytest.raises(ValueError) as e:
read.read_file(paths[0], vars_to_retrieve)
assert str(e.value) == error
def test_ReadEarlinet_read():
read = ReadEarlinet()
read.files = TEST_FILES
data = read.read(vars_to_retrieve="ec532aer")
assert len(data.metadata) == 5
assert data.shape == (786, 12)
assert np.nanmin(data._data[:, data._DATAINDEX]) == pytest.approx(-0.440742, rel=TEST_RTOL)
assert np.nanmean(data._data[:, data._DATAINDEX]) == pytest.approx(24.793547, rel=TEST_RTOL)
assert np.nanmax(data._data[:, data._DATAINDEX]) == pytest.approx(167.90787, rel=TEST_RTOL)
merged = data.to_station_data("Evora", freq="monthly")
assert np.nanmin(merged.ec532aer) == pytest.approx(0.220322, rel=TEST_RTOL)
assert np.nanmean(merged.ec532aer) == pytest.approx(23.093238, rel=TEST_RTOL)
assert np.nanmax(merged.ec532aer) == pytest.approx(111.478665, rel=TEST_RTOL)
@pytest.mark.parametrize(
"vars_to_retrieve,pattern,num",
[
(None, None, 5),
(["ec355aer"], None, 1),
(["zdust"], None, 6),
(["bsc355aer"], None, 0),
(["bsc532aer"], None, 0),
(None, "*ev*", 5),
(None, "*xy*", 0),
],
)
def test_ReadEarlinet_get_file_list(
vars_to_retrieve: list[str] | None, pattern: str | None, num: int
):
reader = ReadEarlinet("Earlinet-test")
files = reader.get_file_list(vars_to_retrieve, pattern)
assert len(files) == num
def METHOD_NAME():
reader = ReadEarlinet("Earlinet-test")
with pytest.raises(NotImplementedError) as e:
reader.get_file_list(pattern="*e.v*")
assert str(e.value) == "filetype delimiter . not supported"
def test_ReadEarlinet__get_exclude_filelist():
reader = ReadEarlinet("Earlinet-test")
reader.EXCLUDE_CASES.append("onefile.txt")
files = reader.get_file_list(reader.PROVIDES_VARIABLES)
assert len(files) == 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.