id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
4,400 |
get query by info id
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# ToDo upgrade
# Deprecation warnings in 7.15.0 pre-releases
# https://github.com/elastic/elasticsearch-py/issues/1698
import datetime
import logging
from typing import Iterable, List
from lib.cuckoo.common.config import Config
repconf = Config("reporting")
if repconf.elasticsearchdb.enabled:
from elasticsearch import Elasticsearch
elastic_handler = Elasticsearch(
hosts=[repconf.elasticsearchdb.host],
port=repconf.elasticsearchdb.get("port", 9200),
http_auth=(repconf.elasticsearchdb.get("username"), repconf.elasticsearchdb.get("password")),
use_ssl=repconf.elasticsearchdb.get("use_ssl", False),
verify_certs=repconf.elasticsearchdb.get("verify_certs", False),
timeout=120,
)
ANALYSIS_INDEX_PREFIX = f"{repconf.elasticsearchdb.index}-analysis-"
CALLS_INDEX_PREFIX = f"{repconf.elasticsearchdb.index}-calls-"
SCROLL_SIZE = 5000
SCROLL_TIME = "5m"
log = logging.getLogger(__name__)
ANALYSIS_INDEX_MAPPING_SETTINGS = {
"mappings": {
"properties": {
"info": {
"properties": {
"started": {"type": "date"},
"machine": {"properties": {"started_on": {"type": "date"}, "shutdown_on": {"type": "date"}}},
}
},
"network": {"properties": {"dead_hosts": {"type": "keyword"}}},
"target": {"properties": {"file": {"properties": {"tlsh": {"type": "keyword"}}}}},
"dropped": {"properties": {"tlsh": {"type": "keyword"}}},
"CAPE": {"properties": {"payloads": {"properties": {"tlsh": {"type": "keyword"}}}}},
}
},
"settings": {
"index.blocks.read_only_allow_delete": "false",
"index.priority": "1",
"index.query.default_field": ["*"],
"index.refresh_interval": "1s",
"index.write.wait_for_active_shards": "1",
"index.routing.allocation.include._tier_preference": "data_content",
"index.number_of_replicas": "1",
"index.mapping.total_fields.limit": 20000,
"index.mapping.depth.limit": 1000,
},
}
def get_daily_analysis_index() -> str:
return f"{ANALYSIS_INDEX_PREFIX}{datetime.datetime.now().strftime('%Y.%m.%d')}"
def daily_analysis_index_exists() -> bool:
return elastic_handler.indices.exists(index=get_daily_analysis_index())
def get_daily_calls_index() -> str:
return f"{CALLS_INDEX_PREFIX}{datetime.datetime.now().strftime('%Y.%m.%d')}"
def daily_calls_index_exists() -> bool:
return elastic_handler.indices.exists(index=get_daily_calls_index())
def METHOD_NAME(task_id: str) -> dict:
return {"match": {"info.id": task_id}}
def get_analysis_index() -> str:
return f"{ANALYSIS_INDEX_PREFIX}*"
def get_calls_index():
return f"{CALLS_INDEX_PREFIX}*"
def delete_analysis_and_related_calls(task_id: str):
analyses = elastic_handler.search(index=get_analysis_index(), query=METHOD_NAME(task_id))["hits"]["hits"]
if analyses:
log.debug("Deleting analysis data for Task %s" % task_id)
for analysis in analyses:
analysis = analysis["_source"]
for process in analysis["behavior"].get("processes", []):
for call in process["calls"]:
elastic_handler.delete_by_query(index=get_calls_index(), body={"query": {"match": {"_id": call}}})
elastic_handler.delete_by_query(index=get_analysis_index(), body={"query": METHOD_NAME(task_id)})
log.debug("Deleted previous ElasticsearchDB data for Task %s" % task_id)
def scroll(scroll_id: str) -> dict:
return elastic_handler.scroll(scroll_id=scroll_id, scroll=SCROLL_TIME)
def scroll_docs(index: str, query: dict, timeout: int = 600, _source: Iterable[str] = ()) -> dict:
return elastic_handler.search(
index=index, body=query, scroll=SCROLL_TIME, size=SCROLL_SIZE, request_timeout=timeout, _source=_source
)
def all_docs(index: str, query: dict, _source: Iterable[str] = ()) -> List[dict]:
# Scroll documents
result_scroll = scroll_docs(index=index, query=query, _source=_source)
hits = result_scroll["hits"]["hits"]
if "_scroll_id" not in result_scroll:
return []
while len(result_scroll["hits"]["hits"]) > 0:
# Process current batch of hits
hits.extend(result_scroll["hits"]["hits"])
result_scroll = scroll(result_scroll["_scroll_id"])
elastic_handler.clear_scroll(scroll_id=result_scroll["_scroll_id"])
return hits
|
4,401 |
test invalid path
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.test import fakedb
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import www
from buildbot.www.authz import endpointmatchers
class EndpointBase(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setup_test_reactor()
self.master = self.make_master(url='h:/a/b/')
self.db = self.master.db
self.matcher = self.makeMatcher()
self.matcher.setAuthz(self.master.authz)
self.insertData()
def makeMatcher(self):
raise NotImplementedError()
def assertMatch(self, match):
self.assertTrue(match is not None)
def assertNotMatch(self, match):
self.assertTrue(match is None)
def insertData(self):
self.db.insert_test_data([
fakedb.SourceStamp(id=13, branch='secret'),
fakedb.Build(
id=15, buildrequestid=16, masterid=1, workerid=2, builderid=21),
fakedb.BuildRequest(id=16, buildsetid=17),
fakedb.Buildset(id=17),
fakedb.BuildsetSourceStamp(id=20, buildsetid=17, sourcestampid=13),
fakedb.Builder(id=21, name="builder"),
])
class ValidEndpointMixin:
@defer.inlineCallbacks
def METHOD_NAME(self):
ret = yield self.matcher.match(("foo", "bar"))
self.assertNotMatch(ret)
class AnyEndpointMatcher(EndpointBase):
def makeMatcher(self):
return endpointmatchers.AnyEndpointMatcher(role="foo")
@defer.inlineCallbacks
def test_nominal(self):
ret = yield self.matcher.match(("foo", "bar"))
self.assertMatch(ret)
class AnyControlEndpointMatcher(EndpointBase):
def makeMatcher(self):
return endpointmatchers.AnyControlEndpointMatcher(role="foo")
@defer.inlineCallbacks
def test_default_action(self):
ret = yield self.matcher.match(("foo", "bar"))
self.assertMatch(ret)
@defer.inlineCallbacks
def test_get(self):
ret = yield self.matcher.match(("foo", "bar"), action="GET")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_other_action(self):
ret = yield self.matcher.match(("foo", "bar"), action="foo")
self.assertMatch(ret)
class ViewBuildsEndpointMatcherBranch(EndpointBase, ValidEndpointMixin):
def makeMatcher(self):
return endpointmatchers.ViewBuildsEndpointMatcher(branch="secret", role="agent")
@defer.inlineCallbacks
def test_build(self):
ret = yield self.matcher.match(("builds", "15"))
self.assertMatch(ret)
test_build.skip = "ViewBuildsEndpointMatcher is not implemented yet"
class StopBuildEndpointMatcherBranch(EndpointBase, ValidEndpointMixin):
def makeMatcher(self):
return endpointmatchers.StopBuildEndpointMatcher(builder="builder", role="owner")
@defer.inlineCallbacks
def test_build(self):
ret = yield self.matcher.match(("builds", "15"), "stop")
self.assertMatch(ret)
@defer.inlineCallbacks
def test_build_no_match(self):
self.matcher.builder = "foo"
ret = yield self.matcher.match(("builds", "15"), "stop")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_build_no_builder(self):
self.matcher.builder = None
ret = yield self.matcher.match(("builds", "15"), "stop")
self.assertMatch(ret)
class ForceBuildEndpointMatcherBranch(EndpointBase, ValidEndpointMixin):
def makeMatcher(self):
return endpointmatchers.ForceBuildEndpointMatcher(builder="builder", role="owner")
def insertData(self):
super().insertData()
self.master.allSchedulers = lambda: [
ForceScheduler(name="sched1", builderNames=["builder"])]
@defer.inlineCallbacks
def test_build(self):
ret = yield self.matcher.match(("builds", "15"), "stop")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_forcesched(self):
ret = yield self.matcher.match(("forceschedulers", "sched1"), "force")
self.assertMatch(ret)
@defer.inlineCallbacks
def test_noforcesched(self):
ret = yield self.matcher.match(("forceschedulers", "sched2"), "force")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_forcesched_builder_no_match(self):
self.matcher.builder = "foo"
ret = yield self.matcher.match(("forceschedulers", "sched1"), "force")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_forcesched_nobuilder(self):
self.matcher.builder = None
ret = yield self.matcher.match(("forceschedulers", "sched1"), "force")
self.assertMatch(ret)
class EnableSchedulerEndpointMatcher(EndpointBase, ValidEndpointMixin):
def makeMatcher(self):
return endpointmatchers.EnableSchedulerEndpointMatcher(role="agent")
@defer.inlineCallbacks
def test_build(self):
ret = yield self.matcher.match(("builds", "15"), "stop")
self.assertNotMatch(ret)
@defer.inlineCallbacks
def test_scheduler_enable(self):
ret = yield self.matcher.match(("schedulers", "15"), "enable")
self.assertMatch(ret)
|
4,402 |
tz from env
|
from __future__ import with_statement
import os
import re
import sys
import pytz
import subprocess
_systemconfig_tz = re.compile(r'^Time Zone: (.*)$(?m)')
def METHOD_NAME(tzenv):
if tzenv[0] == ':':
tzenv = tzenv[1:]
# TZ specifies a file
if os.path.exists(tzenv):
with open(tzenv, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
# TZ specifies a zoneinfo zone.
try:
tz = pytz.timezone(tzenv)
# That worked, so we return this:
return tz
except pytz.UnknownTimeZoneError:
raise pytz.UnknownTimeZoneError(
"tzlocal() does not support non-zoneinfo timezones like %s. \n"
"Please use a timezone in the form of Continent/City")
def _get_localzone(_root='/'):
"""Tries to find the local timezone configuration.
This method prefers finding the timezone name and passing that to pytz,
over passing in the localtime file, as in the later case the zoneinfo
name is unknown.
The parameter _root makes the function look for files like /etc/localtime
beneath the _root directory. This is primarily used by the tests.
In normal usage you call the function without parameters.
"""
tzenv = os.environ.get('TZ')
if tzenv:
return METHOD_NAME(tzenv)
# This is actually a pretty reliable way to test for the local time
# zone on operating systems like OS X. On OS X especially this is the
# only one that actually works.
try:
link_dst = os.readlink('/etc/localtime')
except OSError:
pass
else:
pos = link_dst.find('/zoneinfo/')
if pos >= 0:
zone_name = link_dst[pos + 10:]
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# If we are on OS X now we are pretty sure that the rest of the
# code will fail and just fall through until it hits the reading
# of /etc/localtime and using it without name. At this point we
# can invoke systemconfig which internally invokes ICU. ICU itself
# does the same thing we do (readlink + compare file contents) but
# since it knows where the zone files are that should be a bit
# better than reimplementing the logic here.
if sys.platform == 'darwin':
c = subprocess.Popen(['systemsetup', '-gettimezone'],
stdout=subprocess.PIPE)
sys_result = c.communicate()[0]
c.wait()
tz_match = _systemconfig_tz.search(sys_result)
if tz_match is not None:
zone_name = tz_match.group(1)
try:
return pytz.timezone(zone_name)
except pytz.UnknownTimeZoneError:
pass
# Now look for distribution specific configuration files
# that contain the timezone name.
tzpath = os.path.join(_root, 'etc/timezone')
if os.path.exists(tzpath):
with open(tzpath, 'rb') as tzfile:
data = tzfile.read()
# Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file.
# That's a misconfiguration, but we need to handle it gracefully:
if data[:5] != 'TZif2':
etctz = data.strip().decode()
# Get rid of host definitions and comments:
if ' ' in etctz:
etctz, dummy = etctz.split(' ', 1)
if '#' in etctz:
etctz, dummy = etctz.split('#', 1)
return pytz.timezone(etctz.replace(' ', '_'))
# CentOS has a ZONE setting in /etc/sysconfig/clock,
# OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
# Gentoo has a TIMEZONE setting in /etc/conf.d/clock
# We look through these files for a timezone:
zone_re = re.compile('\s*ZONE\s*=\s*\"')
timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"')
end_re = re.compile('\"')
for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rt') as tzfile:
data = tzfile.readlines()
for line in data:
# Look for the ZONE= setting.
match = zone_re.match(line)
if match is None:
# No ZONE= setting. Look for the TIMEZONE= setting.
match = timezone_re.match(line)
if match is not None:
# Some setting existed
line = line[match.end():]
etctz = line[:end_re.search(line).start()]
# We found a timezone
return pytz.timezone(etctz.replace(' ', '_'))
# No explicit setting existed. Use localtime
for filename in ('etc/localtime', 'usr/local/etc/localtime'):
tzpath = os.path.join(_root, filename)
if not os.path.exists(tzpath):
continue
with open(tzpath, 'rb') as tzfile:
return pytz.tzfile.build_tzinfo('local', tzfile)
raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
|
4,403 |
test close expiry incidents without expiring tokens
|
from datetime import datetime, timedelta
from io import StringIO
import pytz
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from rest_framework.authtoken.models import Token
from argus.dev.management.commands.check_token_expiry import (
close_expiry_incidents_without_expiring_tokens,
find_expiring_tokens,
get_tokens_without_expiry_incident,
)
from argus.incident.factories import SourceSystemFactory
from argus.incident.models import Incident, IncidentTagRelation, Tag, create_token_expiry_incident
from argus.util.testing import connect_signals, disconnect_signals
class CheckTokenExpiryTests(TestCase):
def setUp(self):
disconnect_signals()
self.source_system1 = SourceSystemFactory()
self.source_system2 = SourceSystemFactory()
self.user1 = self.source_system1.user
self.user2 = self.source_system2.user
self.expiring_token = Token.objects.create(user=self.user1)
self.expiring_token.created = self.expiring_token.created - timedelta(days=100)
self.expiring_token.save()
self.expiring_token_expiration_date = self.expiring_token.created + timedelta(
days=settings.AUTH_TOKEN_EXPIRES_AFTER_DAYS
)
self.expiry_incident = create_token_expiry_incident(self.expiring_token, self.expiring_token_expiration_date)
self.current_token = Token.objects.create(user=self.user2)
self.current_token_expiration_date = self.current_token.created + timedelta(
days=settings.AUTH_TOKEN_EXPIRES_AFTER_DAYS
)
def tearDown(self):
connect_signals()
def call_command(self, *args, **kwargs):
out = StringIO()
call_command(
"check_token_expiry",
*args,
stdout=out,
stderr=StringIO(),
**kwargs,
)
return out.getvalue()
def test_find_token_expiry_finds_only_expiring_token(self):
self.assertEqual(
find_expiring_tokens(5),
[
(
self.expiring_token,
self.expiring_token_expiration_date,
)
],
)
def test_create_token_expiry_incident_creates_incident(self):
self.assertTrue(create_token_expiry_incident(self.current_token, self.current_token_expiration_date))
def test_create_token_expiry_incident_raises_error_if_no_given_token(self):
with self.assertRaises(ValueError):
create_token_expiry_incident(None, None)
def test_get_tokens_without_expiry_incident_returns_no_tokens_if_all_have_expiry_incident(self):
self.assertEqual(
get_tokens_without_expiry_incident([(self.expiring_token, self.expiring_token_expiration_date)]), []
)
def test_get_tokens_without_expiry_incident_returns_token_with_closed_expiry_incident(self):
current_token_expiry_incident = create_token_expiry_incident(
self.current_token, self.current_token_expiration_date
)
current_token_expiry_incident.set_closed(actor=self.user1)
self.assertEqual(
get_tokens_without_expiry_incident([(self.current_token, self.current_token_expiration_date)]),
[(self.current_token, self.current_token_expiration_date)],
)
def test_get_tokens_without_expiry_incident_returns_all_tokens_if_no_open_expiry_incident(self):
self.expiry_incident.set_closed(actor=self.user1)
self.assertEqual(
get_tokens_without_expiry_incident(
[
(self.expiring_token, self.expiring_token_expiration_date),
(self.current_token, self.current_token_expiration_date),
]
),
[
(self.expiring_token, self.expiring_token_expiration_date),
(self.current_token, self.current_token_expiration_date),
],
)
def test_get_tokens_without_expiry_incident_ignores_other_tokens_expiry_incidents(self):
self.assertTrue(get_tokens_without_expiry_incident([(self.current_token, self.current_token_expiration_date)]))
def test_close_expiry_incidents_without_expiring_tokens_does_not_close_connected_incidents(self):
close_expiry_incidents_without_expiring_tokens([(self.expiring_token, self.expiring_token_expiration_date)])
self.assertTrue(Incident.objects.get(pk=self.expiry_incident.pk).open)
def METHOD_NAME(self):
close_expiry_incidents_without_expiring_tokens([])
self.assertFalse(Incident.objects.get(pk=self.expiry_incident.pk).open)
def test_expiry_incident_is_closed_when_token_deleted(self):
self.expiring_token.delete()
self.assertFalse(Incident.objects.get(pk=self.expiry_incident.pk).open)
def test_expiry_incident_is_closed_when_token_updated(self):
self.expiring_token.created = datetime.now(pytz.utc)
self.expiring_token.save()
self.assertFalse(Incident.objects.get(pk=self.expiry_incident.pk).open)
def test_check_token_expiry_can_handle_days_input(self):
days = settings.AUTH_TOKEN_EXPIRES_AFTER_DAYS + 1
out = self.call_command(f"--days={days}")
self.assertFalse(out)
token_expiry_tag = Tag.objects.get(key="problem_type", value="token_expiry")
source_system_id_tag = Tag.objects.get(key="source_system_id", value=self.current_token.user.source_system.id)
self.assertTrue(token_expiry_tag)
self.assertTrue(source_system_id_tag)
self.assertTrue(
Incident.objects.filter(incident_tag_relations__tag=token_expiry_tag)
.filter(incident_tag_relations__tag=source_system_id_tag)
.exists()
)
|
4,404 |
get value
|
# Project: MXCuBE
# https://github.com/mxcube.
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Lesser Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
"""
Read the state of the hutch from the PSS device server and take actions
when enter (1) or interlock (0) the hutch.
0 = The hutch has been interlocked and the sample environment should be made
ready for data collection. The actions are extract the detector cover,
move the detector to its previous position, set the MD2 to Centring.
1 = The interlock is cleared and the user is entering the hutch to change
the sample(s). The actions are insert the detector cover, move the
detecto to a safe position, set MD2 to sample Transfer.
Example xml file:
<object class = "ESRF.BlissHutchTrigger">
<username>Hutch Trigger</username>
<pss_tango_device>acs:10000/bl/sa-pss/id30-crate02</pss_tango_device>
<polling_interval>5</polling_interval>
<pss_card_ch>9/4</pss_card_ch>
<object href="/bliss" role="controller"/>
<values>{"ENABLED": 1, "DISABLED": 0}</values>
</object>
"""
import logging
from gevent import sleep, spawn
from PyTango.gevent import DeviceProxy
from PyTango import DevFailed
from mxcubecore.HardwareObjects.abstract.AbstractNState import AbstractNState
__copyright__ = """ Copyright © 2010-2020 by the MXCuBE collaboration """
__license__ = "LGPLv3+"
class BlissHutchTrigger(AbstractNState):
"""Read the state of the hutch from the PSS and take actions."""
def __init__(self, name):
super(BlissHutchTrigger, self).__init__(name)
self._bliss_obj = None
self._proxy = None
self.card = None
self.channel = None
self._pss_value = None
self._nominal_value = None
self._polling_interval = None
self._poll_task = None
def init(self):
"""Initialise properties and polling"""
super(BlissHutchTrigger, self).init()
self._bliss_obj = self.get_object_by_role("controller")
tango_device = self.get_property("pss_tango_device")
try:
self._proxy = DeviceProxy(tango_device)
except DevFailed as _traceback:
last_error = _traceback[-1]
msg = f"{self.name()}: {last_error['desc']}"
raise RuntimeError(msg)
pss = self.get_property("pss_card_ch")
try:
self.card, self.channel = map(int, pss.split("/"))
except AttributeError:
msg = f"{self.name()}: cannot find PSS number"
raise RuntimeError(msg)
# polling interval [s]
self._polling_interval = self.get_property("polling_interval", 5)
self._pss_value = self.get_pss_value()
# enable by default
self.update_value(self.VALUES.ENABLED)
self._poll_task = spawn(self._do_polling)
def _do_polling(self):
"""Do the polling of the PSS system"""
while True:
self._update_value(self.get_pss_value())
sleep(self._polling_interval)
def METHOD_NAME(self):
"""The value corresponds to activate/deactivate the hutch trigger
polling.
Returns:
(ValueEnum): Last set value.
"""
return self._nominal_value
def set_value(self, value, timeout=0):
super(BlissHutchTrigger, self).set_value(value, timeout=0)
def _set_value(self, value):
"""Set the hutch trigger enable/disable value
Args:
value (ValueEnum): ENABLED/DISABLED.
"""
self._nominal_value = value
self.emit("valueChanged", (value,))
def get_pss_value(self):
"""Get the interlock value
Returns:
(bool): 0 = Hutch interlocked, 1 = Hutch not interlocked.
"""
_ch1 = self._proxy.GetInterlockState([self.card - 1, 2 * (self.channel - 1)])[0]
_ch2 = self._proxy.GetInterlockState(
[self.card - 1, 2 * (self.channel - 1) + 1]
)[0]
return _ch1 & _ch2
def _update_value(self, value=None):
"""Check if the pss value has changed (door opens/closes).
Args:
value (bool): The value
"""
if self._nominal_value == self.VALUES.ENABLED:
if value is None:
value = self.get_pss_value()
if self._pss_value != value:
self._pss_value = value
# now do the action
self.hutch_actions(1 - value)
def hutch_actions(self, enter, **kwargs):
"""Take action as function of the PSS state
Args:
enter(bool): True if entering hutch (interlock state = 1)
"""
msg = "%s hutch" % ("Entering" if enter else "Leaving")
logging.getLogger("user_level_log").info(msg)
self._bliss_obj.hutch_actions(enter, hutch_trigger=True, **kwargs)
|
4,405 |
wait for task
|
"""
hades_logs
----------
This module provides access to Hades' radius logs utilizing its celery
RPC api.
"""
import typing as t
import logging
from celery.exceptions import TimeoutError as CeleryTimeoutError
from flask import Flask
from flask.globals import current_app
from kombu.exceptions import OperationalError
from werkzeug.local import LocalProxy
from .app import HadesCelery
from .exc import HadesConfigError, HadesOperationalError, HadesTimeout
from .parsing import RadiusLogEntry, reduce_radius_logs
_CONFIGURATION_DOCS = """\
This Flask application utilizes the `HadesLogs` extension, \
which needs certain config variables.
A minimal example configuration would look like this:
> app.config['HADES_CELERY_APP_NAME'] = 'hades'
> app.config['HADES_BROKER_URI'] = 'amqp://user:password@rabbitmq_host:5672/vhost'
> app.config['HADES_RESULT_BACKEND_URI'] = 'rpc://user:password@rabbitmq_host:5672/vhost'\
"""
class HadesLogs:
"""The ``HadesLogs`` Flask extension
This extension provides access to the Hades RPC. The core
functionality is provided by :py:meth:`fetch_logs`.
You need to provide the following configuration to
:py:obj:`app.config`:
- 'HADES_CELERY_APP_NAME': The Name of the celery app
- 'HADES_BROKER_URI': The broker URI
- 'HADES_RESULT_BACKEND_URI': The URI of the Result backend
- 'HADES_TIMEOUT' (Optional, default=5): The Timeout to wait
with each task in seconds.
- 'HADES_ROUTING_KEY' (Optional, default=None): The routing
key to use for the celery messages
Usage:
>>> from flask import Flask
>>> from hades_logs import HadesLogs
>>> app = Flask('test')
>>> logs = HadesLogs(app)
>>> logs.fetch_logs(<nasip>, <portid>)
"""
def __init__(self, app: Flask | None = None) -> None:
self.app = app
self.logger = logging.getLogger('hades_logs')
if app is not None:
self.init_app(app)
def init_app(self, app: Flask) -> None:
try:
app_name = app.config['HADES_CELERY_APP_NAME']
broker_uri = app.config['HADES_BROKER_URI']
backend_uri = app.config['HADES_RESULT_BACKEND_URI']
routing_key = app.config.get('HADES_ROUTING_KEY', 'masters.root.agdsn')
except KeyError as e:
self.logger.warning("Missing config key: %s\n%s", e, _CONFIGURATION_DOCS)
raise KeyError(f"Missing config key: {e}") from e
self.timeout = app.config.get('HADES_TIMEOUT', 5)
task_default_exchange = app.config.get(
"HADES_TASK_DEFAULT_EXCHANGE", "hades.rpc-call"
)
result_exchange = app.config.get("HADES_RESULT_EXCHANGE", "hades.rpc-result")
self.celery = HadesCelery(
app_name,
broker=broker_uri,
backend=backend_uri,
task_default_exchange=task_default_exchange,
result_exchange=result_exchange,
routing_key=routing_key,
)
# Gets run only on success
self.logger.info("Initialization complete, registering 'hades_logs' extension")
app.extensions['hades_logs'] = self
def create_task(self, name, *args, **kwargs):
"""Create a Celery task object by name, args and kwargs
``*args`` and ``**kwargs`` are passed to the corresponding
parameters of :py:func:`Celery.signature(name, args, kwargs)`
:param name: The name of the task without the celery app name.
Assembling is done using :py:attr:`self.celery.main`.
:returns: the signature of the task
:rtype: :py:obj:`celery.Signature`
"""
full_task_name = f'{self.celery.main}.{name}'
return self.celery.signature(full_task_name, args=args, kwargs=kwargs)
def fetch_logs(
self, nasipaddress: str, nasportid: str, limit: int = 100, reduced: bool = True
) -> t.Iterator[RadiusLogEntry]:
"""Fetch the auth logs of the given port
:param ipaddr nasipaddress: The IP address of the NAS
:param str nasportid: The port identifier (e.g. `C12`) of the
NAS port
:returns: the result of the task (see
``get_port_auth_attempts`` in hades)
:rtype: iterable (generator if :param:`reduced`)
:raises HadesTimeout: raised when no response arrives in the time window
configured by the `timeout` parameter.
"""
if reduced:
reductor = reduce_radius_logs
else:
def reductor(x):
return x
task = self.create_task(name='get_auth_attempts_at_port',
nas_ip_address=nasipaddress, nas_port_id=nasportid,
limit=limit)
return reductor(RadiusLogEntry(*e) for e in self.METHOD_NAME(task))
def METHOD_NAME(self, task):
self.logger.info("Waiting for task: %s", task)
try:
return task.apply_async().wait(timeout=self.timeout)
except CeleryTimeoutError as e:
raise HadesTimeout("The Hades lookup task has timed out") from e
except OSError as e:
if "timeout" in str(e).lower():
# TODO this is mainly to make this error visible once it occurs (sentry).
# Since timeouts should actually be handled by the branch above,
# I'm not quite sure in what cases an `OSError` would be thrown!
self.logger.error("Hades task timed out with OSError", exc_info=True)
raise HadesTimeout("The Hades lookup task has timed out (from OSError)") from e
else:
raise
except OperationalError as e:
raise HadesOperationalError("OSError when fetching hades logs") from e
def _get_extension():
try:
return current_app.extensions['hades_logs']
except KeyError:
raise HadesConfigError(
"No HadesLogs instance registered to current Flask app"
) from None
hades_logs: HadesLogs = LocalProxy(_get_extension)
|
4,406 |
test parser functions raises unknown arg hook
|
import os
import pytest
from tackle import tackle
from tackle.cli import main
from tackle import exceptions
EXCEPTION_FIXTURES = [
# Check that return string not found caught
('return-str-not-found.yaml', exceptions.FunctionCallException),
# Check that type checking works with no exec method.
('no-exec-type-error.yaml', exceptions.HookParseException),
# Check args are required when they are not supplied.
('field-require.yaml', exceptions.HookParseException),
# Check that type is one of literals.
('field-bad-type.yaml', exceptions.MalformedFunctionFieldException),
# Check when extends is used with a missing base.
('extends-missing.yaml', exceptions.MalformedFunctionFieldException),
# Check when extends is a dict an error is thrown
('extends-dict.yaml', exceptions.MalformedFunctionFieldException),
]
@pytest.mark.parametrize("fixture,exception", EXCEPTION_FIXTURES)
def test_function_raises_exceptions(chdir, fixture, exception):
chdir('exceptions')
with pytest.raises(exception):
tackle(fixture)
def test_function_method_base_validate(chdir):
"""Check that when a method uses a base attribute, that validation still happens."""
chdir('method-fixtures')
with pytest.raises(Exception) as e:
tackle('method-base-validate.yaml')
assert 'string does not match regex' in e.value.message
FIELD_TYPE_EXCEPTION_FIXTURES = [
('str', 'str'),
('dict', 'str'),
('list', 'str'),
('str', 'type'),
('dict', 'type'),
('list', 'type'),
('str', 'default'),
('dict', 'default'),
('list', 'default'),
]
@pytest.mark.parametrize("type_,field_input", FIELD_TYPE_EXCEPTION_FIXTURES)
def test_function_raises_exceptions_field_types(chdir, type_, field_input):
"""Check that a validation error is returned for each type of field definition."""
chdir(os.path.join('fixtures', 'field-type-exceptions'))
with pytest.raises(exceptions.HookParseException):
tackle(f'field-types-{type_}-error-{field_input}.yaml')
def test_parser_functions_exceptions_try_in_default(chdir):
"""
When we have an error in a field's default like hook failure with `try`, we should
catch that.
"""
chdir('exceptions')
with pytest.raises(exceptions.FunctionCallException):
tackle('try-in-default.yaml')
def test_parser_functions_raises_unknown_arg(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-default-hook-no-context.yaml", 'NOT_HERE')
def test_parser_functions_raises_unknown_kwarg(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-default-hook-no-context.yaml", NOT_HERE='foo')
def test_parser_functions_raises_unknown_flags(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-default-hook-no-context.yaml", global_flags=['NOT_HERE'])
def METHOD_NAME(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-hook-no-context.yaml", 'run', 'NOT_HERE')
def test_parser_functions_raises_unknown_kwarg_hook(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-hook-no-context.yaml", 'run', NOT_HERE='foo')
def test_parser_functions_raises_unknown_flags_hook(chdir):
chdir('cli-fixtures')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("cli-hook-no-context.yaml", 'run', global_flags=['NOT_HERE'])
def test_parser_functions_raises_hook_kwarg_missing(chdir):
chdir('exceptions')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("hook-kwarg-missing.yaml", 'foo', baz='bang')
def test_parser_functions_raises_hook_kwarg_missing_default(chdir):
chdir('exceptions')
with pytest.raises(exceptions.UnknownInputArgumentException):
tackle("hook-kwarg-missing-default.yaml", baz='bang')
def test_parser_functions_raises_validation_missing_field(chdir):
chdir('exceptions')
with pytest.raises(exceptions.MalformedFunctionFieldException):
main(["missing-field.yaml", "stuff"])
def test_parser_functions_raises_(chdir):
chdir('exceptions')
with pytest.raises(exceptions.MalformedFunctionFieldException):
main(["str-value.yaml", "stuff"])
|
4,407 |
plugin shutdown
|
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
""" Async Plugin used for testing purpose """
import asyncio
import copy
import uuid
import logging
import async_ingest
from fledge.common import logger
from fledge.services.south import exceptions
from threading import Thread
from datetime import datetime, timezone, timedelta
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
c_callback = None
c_ingest_ref = None
loop = None
_task = None
t = None
_DEFAULT_CONFIG = {
'plugin': {
'description': 'Test Async Plugin',
'type': 'string',
'default': 'dummyplugin',
'readonly': 'true'
},
'assetPrefix': {
'description': 'Prefix of asset name',
'type': 'string',
'default': 'test-',
'order': '1',
'displayName': 'Asset Name Prefix'
},
'loudnessAssetName': {
'description': 'Loudness sensor asset name',
'type': 'string',
'default': 'loudness',
'order': '3',
'displayName': 'Loudness Sensor Asset Name'
}
}
_LOGGER = logger.setup(__name__, level=logging.INFO)
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'TEST Async Plugin',
'version': '2.0.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Initialise the plugin.
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
handle = copy.deepcopy(config)
return handle
def plugin_start(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for async mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
TimeoutError
"""
global _task, loop, t
loop = asyncio.new_event_loop()
_task = asyncio.ensure_future(_start_aiotest(handle), loop=loop)
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
async def _start_aiotest(handle):
# This plugin adds 6 data points 2 within same min, 2 within same hour and 2 within same day
# this data is useful when testing asset browsing based on timestamps
ts_lst = list()
ts_lst.append(str(datetime.now(timezone.utc).astimezone()))
ts_lst.append(str(datetime.now(timezone.utc).astimezone() - timedelta(seconds=3)))
ts_lst.append(str(datetime.now(timezone.utc).astimezone() - timedelta(minutes=5)))
ts_lst.append(str(datetime.now(timezone.utc).astimezone() - timedelta(minutes=6)))
ts_lst.append(str(datetime.now(timezone.utc).astimezone() - timedelta(hours=3)))
ts_lst.append(str(datetime.now(timezone.utc).astimezone() - timedelta(hours=5)))
i = 1
for user_ts in ts_lst:
try:
data = list()
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['loudnessAssetName']['value']),
'timestamp': user_ts,
'key': str(uuid.uuid4()),
'readings': {"loudness": i}
})
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
await asyncio.sleep(0.1)
except (Exception, RuntimeError) as ex:
_LOGGER.exception("TEST exception: {}".format(str(ex)))
raise exceptions.DataRetrievalError(ex)
else:
i += 1
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
"""
_LOGGER.info("Old config for TEST plugin {} \n new config {}".format(handle, new_config))
new_handle = copy.deepcopy(new_config)
return new_handle
def METHOD_NAME(handle):
""" Shutdowns the plugin doing required cleanup, to be called prior to the South plugin service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
plugin shutdown
"""
_LOGGER.info('TEST plugin shut down.')
|
4,408 |
season
|
""" class for PhotoInfo exposing SearchInfo data such as labels
"""
from ._constants import _PHOTOS_4_VERSION, search_category_factory
__all__ = ["SearchInfo"]
class SearchInfo:
"""Info about search terms such as machine learning labels that Photos knows about a photo"""
def __init__(self, photo, normalized=False):
"""photo: PhotoInfo object
normalized: if True, all properties return normalized (lower case) results"""
if photo._db._db_version <= _PHOTOS_4_VERSION:
raise NotImplementedError(
"search info not implemented for this database version"
)
self._categories = search_category_factory(photo._db._photos_ver)
self._photo = photo
self._normalized = normalized
self.uuid = photo.uuid
try:
# get search info for this UUID
# there might not be any search info data (e.g. if Photo was missing or photoanalysisd not run yet)
self._db_searchinfo = photo._db._db_searchinfo_uuid[self.uuid]
except KeyError:
self._db_searchinfo = None
@property
def labels(self):
"""return list of labels associated with Photo"""
return self._get_text_for_category(self._categories.LABEL)
@property
def place_names(self):
"""returns list of place names"""
return self._get_text_for_category(self._categories.PLACE_NAME)
@property
def streets(self):
"""returns list of street names"""
return self._get_text_for_category(self._categories.STREET)
@property
def neighborhoods(self):
"""returns list of neighborhoods"""
return self._get_text_for_category(self._categories.NEIGHBORHOOD)
@property
def locality_names(self):
"""returns list of other locality names"""
locality = []
for category in self._categories.ALL_LOCALITY:
locality += self._get_text_for_category(category)
return locality
@property
def city(self):
"""returns city/town"""
city = self._get_text_for_category(self._categories.CITY)
return city[0] if city else ""
@property
def state(self):
"""returns state name"""
state = self._get_text_for_category(self._categories.STATE)
return state[0] if state else ""
@property
def state_abbreviation(self):
"""returns state abbreviation"""
abbrev = self._get_text_for_category(self._categories.STATE_ABBREVIATION)
return abbrev[0] if abbrev else ""
@property
def country(self):
"""returns country name"""
country = self._get_text_for_category(self._categories.COUNTRY)
return country[0] if country else ""
@property
def month(self):
"""returns month name"""
month = self._get_text_for_category(self._categories.MONTH)
return month[0] if month else ""
@property
def year(self):
"""returns year"""
year = self._get_text_for_category(self._categories.YEAR)
return year[0] if year else ""
@property
def bodies_of_water(self):
"""returns list of body of water names"""
return self._get_text_for_category(self._categories.BODY_OF_WATER)
@property
def holidays(self):
"""returns list of holiday names"""
return self._get_text_for_category(self._categories.HOLIDAY)
@property
def activities(self):
"""returns list of activity names"""
return self._get_text_for_category(self._categories.ACTIVITY)
@property
def METHOD_NAME(self):
"""returns season name"""
METHOD_NAME = self._get_text_for_category(self._categories.SEASON)
return METHOD_NAME[0] if METHOD_NAME else ""
@property
def venues(self):
"""returns list of venue names"""
return self._get_text_for_category(self._categories.VENUE)
@property
def venue_types(self):
"""returns list of venue types"""
return self._get_text_for_category(self._categories.VENUE_TYPE)
@property
def media_types(self):
"""returns list of media types (photo, video, panorama, etc)"""
types = []
for category in self._categories.MEDIA_TYPES:
types += self._get_text_for_category(category)
return types
@property
def detected_text(self):
"""Returns text detected in the photo (macOS 13+ / Photos 8+ only)"""
if self._photo._db._photos_ver < 8:
return []
return self._get_text_for_category(self._categories.DETECTED_TEXT)
@property
def text_found(self):
"""Returns True if photos has detected text (macOS 13+ / Photos 8+ only)"""
if self._photo._db._photos_ver < 8:
return []
return self._get_text_for_category(self._categories.TEXT_FOUND)
@property
def camera(self):
"""returns camera name (macOS 13+ / Photos 8+ only)"""
if self._photo._db._photos_ver < 8:
return ""
camera = self._get_text_for_category(self._categories.CAMERA)
return camera[0] if camera else ""
@property
def source(self):
"""returns source of the photo (e.g. "Messages", "Safar", etc) (macOS 13+ / Photos 8+ only)"""
if self._photo._db._photos_ver < 8:
return ""
source = self._get_text_for_category(self._categories.SOURCE)
return source[0] if source else ""
@property
def all(self):
"""return all search info properties in a single list"""
all_ = (
self.labels
+ self.place_names
+ self.streets
+ self.neighborhoods
+ self.locality_names
+ self.bodies_of_water
+ self.holidays
+ self.activities
+ self.venues
+ self.venue_types
+ self.media_types
+ self.detected_text
)
if self.city:
all_ += [self.city]
if self.state:
all_ += [self.state]
if self.state_abbreviation:
all_ += [self.state_abbreviation]
if self.country:
all_ += [self.country]
if self.month:
all_ += [self.month]
if self.year:
all_ += [self.year]
if self.METHOD_NAME:
all_ += [self.METHOD_NAME]
if self.camera:
all_ += [self.camera]
return all_
def asdict(self):
"""return dict of search info"""
return {
"labels": self.labels,
"place_names": self.place_names,
"streets": self.streets,
"neighborhoods": self.neighborhoods,
"city": self.city,
"locality_names": self.locality_names,
"state": self.state,
"state_abbreviation": self.state_abbreviation,
"country": self.country,
"bodies_of_water": self.bodies_of_water,
"month": self.month,
"year": self.year,
"holidays": self.holidays,
"activities": self.activities,
"season": self.METHOD_NAME,
"venues": self.venues,
"venue_types": self.venue_types,
"media_types": self.media_types,
"detected_text": self.detected_text,
"camera": self.camera,
"source": self.source,
}
def _get_text_for_category(self, category):
"""return list of text for a specified category ID"""
if self._db_searchinfo:
content = "normalized_string" if self._normalized else "content_string"
return sorted(
[
rec[content]
for rec in self._db_searchinfo
if rec["category"] == category
]
)
else:
return []
|
4,409 |
pop
|
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2019 David R. MacIver
# ([email protected]), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
"""A module for miscellaneous useful bits and bobs that don't
obviously belong anywhere else. If you spot a better home for
anything that lives here, please move it."""
from __future__ import absolute_import, division, print_function
from hypothesis.internal.compat import (
array_or_list,
hbytes,
int_to_bytes,
integer_types,
)
def replace_all(buffer, replacements):
"""Substitute multiple replacement values into a buffer.
Replacements is a list of (start, end, value) triples.
"""
result = bytearray()
prev = 0
offset = 0
for u, v, r in replacements:
result.extend(buffer[prev:u])
result.extend(r)
prev = v
offset += len(r) - (v - u)
result.extend(buffer[prev:])
assert len(result) == len(buffer) + offset
return hbytes(result)
ARRAY_CODES = ["B", "H", "I", "L", "Q", "O"]
NEXT_ARRAY_CODE = dict(zip(ARRAY_CODES, ARRAY_CODES[1:]))
class IntList(object):
"""Class for storing a list of non-negative integers compactly.
We store them as the smallest size integer array we can get
away with. When we try to add an integer that is too large,
we upgrade the array to the smallest word size needed to store
the new value."""
__slots__ = ("__underlying",)
def __init__(self, values=()):
for code in ARRAY_CODES:
try:
self.__underlying = array_or_list(code, values)
break
except OverflowError:
pass
else: # pragma: no cover
raise AssertionError("Could not create storage for %r" % (values,))
if isinstance(self.__underlying, list):
for v in self.__underlying:
if v < 0 or not isinstance(v, integer_types):
raise ValueError("Could not create IntList for %r" % (values,))
@classmethod
def of_length(self, n):
return IntList(array_or_list("B", [0]) * n)
def count(self, n):
return self.__underlying.count(n)
def __repr__(self):
return "IntList(%r)" % (list(self),)
def __len__(self):
return len(self.__underlying)
def __getitem__(self, i):
if isinstance(i, slice):
return IntList(self.__underlying[i])
return self.__underlying[i]
def __delitem__(self, i):
del self.__underlying[i]
def __iter__(self):
return iter(self.__underlying)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying == other.__underlying
def __ne__(self, other):
if self is other:
return False
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying != other.__underlying
def append(self, n):
i = len(self)
self.__underlying.append(0)
self[i] = n
def __setitem__(self, i, n):
while True:
try:
self.__underlying[i] = n
return
except OverflowError:
assert n > 0
self.__upgrade()
def extend(self, ls):
for n in ls:
self.append(n)
def __upgrade(self):
code = NEXT_ARRAY_CODE[self.__underlying.typecode]
self.__underlying = array_or_list(code, self.__underlying)
def binary_search(lo, hi, f):
"""Binary searches in [lo , hi) to find
n such that f(n) == f(lo) but f(n + 1) != f(lo).
It is implicitly assumed and will not be checked
that f(hi) != f(lo).
"""
reference = f(lo)
while lo + 1 < hi:
mid = (lo + hi) // 2
if f(mid) == reference:
lo = mid
else:
hi = mid
return lo
def uniform(random, n):
"""Returns an hbytes of length n, distributed uniformly at random."""
return int_to_bytes(random.getrandbits(n * 8), n)
class LazySequenceCopy(object):
"""A "copy" of a sequence that works by inserting a mask in front
of the underlying sequence, so that you can mutate it without changing
the underlying sequence. Effectively behaves as if you could do list(x)
in O(1) time. The full list API is not supported yet but there's no reason
in principle it couldn't be."""
def __init__(self, values):
self.__values = values
self.__len = len(values)
self.__mask = None
def __len__(self):
return self.__len
def METHOD_NAME(self):
if len(self) == 0:
raise IndexError("Cannot pop from empty list")
result = self[-1]
self.__len -= 1
if self.__mask is not None:
self.__mask.METHOD_NAME(self.__len, None)
return result
def __getitem__(self, i):
i = self.__check_index(i)
default = self.__values[i]
if self.__mask is None:
return default
else:
return self.__mask.get(i, default)
def __setitem__(self, i, v):
i = self.__check_index(i)
if self.__mask is None:
self.__mask = {}
self.__mask[i] = v
def __check_index(self, i):
n = len(self)
if i < -n or i >= n:
raise IndexError("Index %d out of range [0, %d)" % (i, n))
if i < 0:
i += n
assert 0 <= i < n
return i
def clamp(lower, value, upper):
"""Given a value and lower/upper bounds, 'clamp' the value so that
it satisfies lower <= value <= upper."""
return max(lower, min(value, upper))
def swap(ls, i, j):
"""Swap the elements ls[i], ls[j]."""
if i == j:
return
ls[i], ls[j] = ls[j], ls[i]
|
4,410 |
test read emri with pillow
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
import os
import sys
import pytest
import pydicom
from pydicom.filereader import dcmread
from pydicom.data import get_testdata_file
pillow_missing_message = "pillow is not available " "in this test environment"
pillow_present_message = "pillow is being tested"
gdcm_missing_message = "GDCM is not available in this test environment"
numpy_missing_message = "numpy is not available " "in this test environment"
jpeg_ls_missing_message = "jpeg_ls is not available " "in this test environment"
try:
from pydicom.pixel_data_handlers import numpy_handler
HAVE_NP = numpy_handler.HAVE_NP
except ImportError:
HAVE_NP = False
numpy_handler = None
try:
from pydicom.pixel_data_handlers import pillow_handler
HAVE_PIL = pillow_handler.HAVE_PIL
HAVE_JPEG = pillow_handler.HAVE_JPEG
HAVE_JPEG2K = pillow_handler.HAVE_JPEG2K
except ImportError:
HAVE_PIL = False
pillow_handler = None
HAVE_JPEG = False
HAVE_JPEG2K = False
try:
from pydicom.pixel_data_handlers import jpeg_ls_handler
HAVE_JPEGLS = jpeg_ls_handler.HAVE_JPEGLS
except ImportError:
jpeg_ls_handler = None
HAVE_JPEGLS = False
try:
from pydicom.pixel_data_handlers import gdcm_handler
HAVE_GDCM = gdcm_handler.HAVE_GDCM
except ImportError:
gdcm_handler = None
HAVE_GDCM = False
mr_name = get_testdata_file("MR_small.dcm")
jpeg_ls_lossless_name = get_testdata_file("MR_small_jpeg_ls_lossless.dcm")
emri_name = get_testdata_file("emri_small.dcm")
emri_jpeg_ls_lossless = get_testdata_file("emri_small_jpeg_ls_lossless.dcm")
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
class Test_JPEG_LS_Lossless_transfer_syntax:
def setup_method(self, method):
self.jpeg_ls_lossless = dcmread(jpeg_ls_lossless_name)
self.mr_small = dcmread(mr_name)
self.emri_jpeg_ls_lossless = dcmread(emri_jpeg_ls_lossless)
self.emri_small = dcmread(emri_name)
self.original_handlers = pydicom.config.pixel_data_handlers
def teardown_method(self, method):
pydicom.config.pixel_data_handlers = self.original_handlers
@pytest.mark.skipif(not HAVE_NP, reason=numpy_missing_message)
def test_read_mr_with_numpy(self):
pydicom.config.pixel_data_handlers = [numpy_handler]
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.jpeg_ls_lossless.pixel_array
@pytest.mark.skipif(not HAVE_NP, reason=numpy_missing_message)
def test_read_emri_with_numpy(self):
pydicom.config.pixel_data_handlers = [numpy_handler]
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.emri_jpeg_ls_lossless.pixel_array
@pytest.mark.skipif(not HAVE_PIL, reason=pillow_missing_message)
def test_read_mr_with_pillow(self):
pydicom.config.pixel_data_handlers = [pillow_handler]
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.jpeg_ls_lossless.pixel_array
@pytest.mark.skipif(not HAVE_PIL, reason=pillow_missing_message)
def METHOD_NAME(self):
pydicom.config.pixel_data_handlers = [pillow_handler]
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.emri_jpeg_ls_lossless.pixel_array
@pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message)
def test_read_mr_with_gdcm(self):
pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler]
a = self.jpeg_ls_lossless.pixel_array
b = self.mr_small.pixel_array
a_mean = a.mean()
b_mean = b.mean()
msg = f"using GDCM Decoded pixel data is not all {b_mean} (mean == {a_mean})"
assert a_mean == b_mean, msg
@pytest.mark.skipif(not HAVE_GDCM, reason=gdcm_missing_message)
def test_read_emri_with_gdcm(self):
pydicom.config.pixel_data_handlers = [numpy_handler, gdcm_handler]
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
a_mean = a.mean()
b_mean = b.mean()
msg = f"using GDCM Decoded pixel data is not all {b_mean} (mean == {a_mean})"
assert a_mean == b_mean, msg
@pytest.mark.skipif(not HAVE_JPEGLS, reason=jpeg_ls_missing_message)
def test_read_mr_with_jpeg_ls(self):
pydicom.config.pixel_data_handlers = [numpy_handler, jpeg_ls_handler]
a = self.jpeg_ls_lossless.pixel_array
b = self.mr_small.pixel_array
a_mean = a.mean()
b_mean = b.mean()
msg = f"using jpeg_ls decoded pixel data is not all {b_mean} (mean == {a_mean})"
assert a_mean == b_mean, msg
@pytest.mark.skipif(not HAVE_JPEGLS, reason=jpeg_ls_missing_message)
def test_read_emri_with_jpeg_ls(self):
pydicom.config.pixel_data_handlers = [numpy_handler, jpeg_ls_handler]
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
a_mean = a.mean()
b_mean = b.mean()
msg = f"using jpeg_ls decoded pixel data is not all {b_mean} (mean == {a_mean})"
assert a_mean == b_mean, msg
def test_read_mr_without_any_handler(self):
pydicom.config.pixel_data_handlers = []
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.jpeg_ls_lossless.pixel_array
def test_read_emri_without_any_handler(self):
pydicom.config.pixel_data_handlers = []
msg = (
r"Unable to decode pixel data with a transfer syntax UID of "
r"'1.2.840.10008.1.2.4.80' \(JPEG-LS Lossless Image Compression\) "
r"as there are no pixel data handlers available."
)
with pytest.raises(NotImplementedError, match=msg):
self.emri_jpeg_ls_lossless.pixel_array
|
4,411 |
test setpoint
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pytest
from pymeasure.test import expected_protocol
from pymeasure.instruments.hcp import TC038D
# Testing the 'write multiple values' method of the device.
def test_write_multiple_values():
# Communication from manual.
with expected_protocol(
TC038D,
[(b"\x01\x10\x01\x0A\x00\x04\x08\x00\x00\x03\xE8\xFF\xFF\xFC\x18\x8D\xE9",
b"\x01\x10\x01\x0A\x00\x04\xE0\x34")]
) as inst:
inst.write("W,0x010A,1000,-1000")
inst.read()
def test_write_multiple_values_decimal_address():
# Communication from manual.
with expected_protocol(
TC038D,
[(b"\x01\x10\x01\x0A\x00\x04\x08\x00\x00\x03\xE8\xFF\xFF\xFC\x18\x8D\xE9",
b"\x01\x10\x01\x0A\x00\x04\xE0\x34")]
) as inst:
inst.write("W,266,1000,-1000")
inst.read()
def test_write_values_CRC_error():
"""Test whether an invalid response CRC code raises an Exception."""
with expected_protocol(
TC038D,
[(b"\x01\x10\x01\x06\x00\x02\x04\x00\x00\x01A\xbf\xb5",
b"\x01\x10\x01\x06\x00\x02\x01\x02")],
) as inst:
with pytest.raises(ConnectionError):
inst.setpoint = 32.1
def test_write_multiple_handle_wrong_start_address():
"""Test whether the error code (byte 2) of 2 raises the right error."""
with expected_protocol(
TC038D,
[(b"\x01\x10\x01\x06\x00\x02\x04\x00\x00\x01A\xbf\xb5",
b"\x01\x90\x02\x06\x00")],
) as inst:
with pytest.raises(ValueError, match="Wrong start address"):
inst.setpoint = 32.1
# Test the 'read register' method of the device
def test_read_CRC_error():
"""Test whether an invalid response CRC code raises an Exception."""
with expected_protocol(
TC038D,
[(b"\x01\x03\x00\x00\x00\x02\xC4\x0B",
b"\x01\x03\x04\x00\x00\x03\xE8\x01\x02")],
) as inst:
with pytest.raises(ConnectionError):
inst.temperature
def test_read_address_error():
"""Test whether the error code (byte 2) of 2 raises the right error."""
with expected_protocol(
TC038D,
[(b"\x01\x03\x00\x00\x00\x02\xC4\x0B",
b"\x01\x83\x02\01\02")],
) as inst:
with pytest.raises(ValueError, match="start address"):
inst.temperature
def test_read_elements_error():
"""Test whether the error code (byte 2) of 3 raises the right error."""
with expected_protocol(
TC038D,
[(b"\x01\x03\x00\x00\x00\x02\xC4\x0B",
b"\x01\x83\x03\01\02")],
) as inst:
with pytest.raises(ValueError, match="Variable data"):
inst.temperature
def test_read_any_error():
"""Test whether any wrong message (byte 1 is not 3) raises an error."""
with expected_protocol(
TC038D,
[(b"\x01\x03\x00\x00\x00\x02\xC4\x0B",
b"\x01\x43\x05\01\02")],
) as inst:
with pytest.raises(ConnectionError):
inst.temperature
# Test properties
def METHOD_NAME():
with expected_protocol(
TC038D,
[(b"\x01\x03\x01\x06\x00\x02\x25\xf6",
b"\x01\x03\x04\x00\x00\x00\x99:Y")],
) as inst:
assert inst.setpoint == 15.3
def test_setpoint_setter():
with expected_protocol(
TC038D,
[(b"\x01\x10\x01\x06\x00\x02\x04\x00\x00\x01A\xbf\xb5",
b"\x01\x10\x01\x06\x00\x02\xa0\x35")],
) as inst:
inst.setpoint = 32.1
def test_temperature():
# Communication from manual.
# Tests readRegister as well.
with expected_protocol(
TC038D,
[(b"\x01\x03\x00\x00\x00\x02\xC4\x0B",
b"\x01\x03\x04\x00\x00\x03\xE8\xFA\x8D")],
) as inst:
assert inst.temperature == 100
def test_ping():
# Communication from manual.
with expected_protocol(
TC038D,
[(b"\x01\x08\x00\x00\x12\x34\xed\x7c", b"\x01\x08\x00\x00\x12\x34\xed\x7c")],
) as inst:
inst.ping(4660)
|
4,412 |
vswr transformed
|
# NanoVNASaver
#
# A python program to view and export Touchstone data from a NanoVNA
# Copyright (C) 2019, 2020 Rune B. Broberg
# Copyright (C) 2020ff NanoVNA-Saver Authors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import csv
import logging
from PyQt6 import QtWidgets
import NanoVNASaver.AnalyticTools as at
from NanoVNASaver.Analysis.Base import Analysis, QHLine
from NanoVNASaver.Formatting import format_frequency, format_resistance
from NanoVNASaver.RFTools import reflection_coefficient
logger = logging.getLogger(__name__)
def format_resistence_neg(x):
return format_resistance(x, allow_negative=True)
def METHOD_NAME(z, ratio=49) -> float:
refl = reflection_coefficient(z / ratio)
mag = abs(refl)
return 1 if mag == 1 else (1 + mag) / (1 - mag)
class ResonanceAnalysis(Analysis):
def __init__(self, app):
super().__init__(app)
self.crossings: list[int] = []
self.filename = ""
self._widget = QtWidgets.QWidget()
self.layout = QtWidgets.QFormLayout()
self._widget.setLayout(self.layout)
self.input_description = QtWidgets.QLineEdit("")
self.checkbox_move_marker = QtWidgets.QCheckBox()
self.layout.addRow(QtWidgets.QLabel("<b>Settings</b>"))
self.layout.addRow("Description", self.input_description)
self.layout.addRow(QHLine())
self.layout.addRow(QHLine())
self.results_label = QtWidgets.QLabel("<b>Results</b>")
self.layout.addRow(self.results_label)
def _get_data(self, index):
s11 = self.app.data.s11
my_data = {
"freq": s11[index].freq,
"s11": s11[index].z,
"lambda": s11[index].wavelength,
"impedance": s11[index].impedance(),
"vswr": s11[index].vswr,
}
my_data["vswr_49"] = METHOD_NAME(my_data["impedance"], 49)
my_data["vswr_4"] = METHOD_NAME(my_data["impedance"], 4)
my_data["r"] = my_data["impedance"].real
my_data["x"] = my_data["impedance"].imag
return my_data
def runAnalysis(self):
self.reset()
self.filename = (
os.path.join("/tmp/", f"{self.input_description.text()}.csv")
if self.input_description.text()
else ""
)
results_header = self.layout.indexOf(self.results_label)
logger.debug(
"Results start at %d, out of %d",
results_header,
self.layout.rowCount(),
)
for _ in range(results_header, self.layout.rowCount()):
self.layout.removeRow(self.layout.rowCount() - 1)
self.crossings = sorted(
set(at.zero_crossings([d.phase for d in self.app.data.s11]))
)
logger.debug("Found %d sections ", len(self.crossings))
if not self.crossings:
self.layout.addRow(QtWidgets.QLabel("No resonance found"))
return
self.do_resonance_analysis()
def do_resonance_analysis(self):
extended_data = []
for crossing in self.crossings:
extended_data.append(self._get_data(crossing))
self.layout.addRow(
"Resonance",
QtWidgets.QLabel(
format_frequency(self.app.data.s11[crossing].freq)
),
)
self.layout.addWidget(QHLine())
# Remove the final separator line
self.layout.removeRow(self.layout.rowCount() - 1)
if self.filename and extended_data:
with open(
self.filename, "w", encoding="utf-8", newline=""
) as csvfile:
fieldnames = extended_data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in extended_data:
writer.writerow(row)
|
4,413 |
test seek
|
r"""Test S3 Blob DB
Commands to setup Docker with minio for development/testing
First, [install Docker](https://docs.docker.com/mac/#h_installation).
Initialize a docker machine for minio (skip if you already have one):
docker-machine create --driver virtualbox minio
eval "$(docker-machine env minio)"
Start the minio service:
mkdir -p ~/.minio/{data,conf}
# INSECURE KEY VALUES FOR TESTING ONLY, DO NOT USE IN PRODUCTION!
docker run -p 9988:9000 --name minio1 --detach \
-e "MINIO_ACCESS_KEY=admin-key" \
-e "MINIO_SECRET_KEY=admin-secret" \
-v ~/.minio/data:/data \
-v ~/.minio/conf:/root/.minio \
minio/minio server /data
Test minio connection:
DOCKER_MINIO_HOST=$(echo $DOCKER_HOST | cut -d/ -f3 | cut -d: -f1)
curl -i http://$DOCKER_MINIO_HOST:9988; echo ""
# expected output:
# HTTP/1.1 403 Forbidden
# ...
Finally, add the following to `localsettings.py`:
def _get_s3_params():
# these can change depending on host state
import os, re, subprocess
def run(command, pattern=None, group=1):
out = subprocess.check_output(command.split())
return re.search(pattern, out).group(group) if pattern else out
try:
evars = run("docker-machine env minio")
for match in re.finditer(r'export (.*?)="(.*?)"', evars):
os.environ[match.group(1)] = match.group(2)
host = re.search(r'DOCKER_HOST="tcp://(.*?):', evars).group(1)
port = run("docker port minio1 9000", r':(\d+)')
return {"host": host, "port": port}
except Exception:
return None # docker host is not running
_s3_params = _get_s3_params()
if _s3_params:
S3_BLOB_DB_SETTINGS = {
"url": "http://{host}:{port}".format(**_s3_params),
# NOTE: THESE KEYS ARE INSECURE AND MEANT FOR TESTING ONLY
"access_key": "admin-key",
"secret_key": "admin-secret",
# reduce timeouts to make tests fail faster
"config": {"connect_timeout": 1, "read_timeout": 1}
}
""" # noqa: W605
from io import BytesIO, SEEK_SET, TextIOWrapper
from django.conf import settings
from django.test import TestCase
from corehq.blobs import CODES
from corehq.blobs.s3db import S3BlobDB
from corehq.blobs.util import BlobStream
from corehq.blobs.tests.util import new_meta, TemporaryS3BlobDB
from corehq.blobs.tests.test_fsdb import _BlobDBTests
from corehq.util.test_utils import trap_extra_setup
class TestS3BlobDB(TestCase, _BlobDBTests):
@classmethod
def setUpClass(cls):
super(TestS3BlobDB, cls).setUpClass()
with trap_extra_setup(AttributeError, msg="S3_BLOB_DB_SETTINGS not configured"):
config = settings.S3_BLOB_DB_SETTINGS
cls.db = TemporaryS3BlobDB(config)
@classmethod
def tearDownClass(cls):
cls.db.close()
super(TestS3BlobDB, cls).tearDownClass()
def test_put_from_other_s3_db(self):
# cleanup will be done by self.db
db2 = S3BlobDB(settings.S3_BLOB_DB_SETTINGS)
meta = self.db.put(BytesIO(b"content"), meta=self.new_meta())
with self.db.get(meta=meta) as blob:
meta2 = db2.put(blob, meta=self.new_meta())
self.assertEqual(meta2.content_length, meta.content_length)
with db2.get(meta=meta2) as blob2:
self.assertEqual(blob2.read(), b"content")
class TestS3BlobDBCompressed(TestS3BlobDB):
meta_kwargs = {'compressed_length': -1}
class TestBlobStream(TestCase):
@classmethod
def new_meta(cls, **kwargs):
return new_meta(**kwargs)
@classmethod
def setUpClass(cls):
super(TestBlobStream, cls).setUpClass()
with trap_extra_setup(AttributeError, msg="S3_BLOB_DB_SETTINGS not configured"):
config = settings.S3_BLOB_DB_SETTINGS
cls.db = TemporaryS3BlobDB(config)
cls.meta = cls.db.put(BytesIO(b"bytes"), meta=cls.new_meta())
@classmethod
def tearDownClass(cls):
cls.db.close()
super(TestBlobStream, cls).tearDownClass()
def test_text_io_wrapper(self):
meta = self.db.put(BytesIO(b"x\ny\rz\n"), meta=self.new_meta())
with self.db.get(meta=meta) as fh:
# universl unewline mode: \r -> \n
textio = TextIOWrapper(fh, encoding="utf-8")
self.assertEqual(list(textio), ["x\n", "y\n", "z\n"])
def test_checks(self):
with self.get_blob() as fh:
self.assertTrue(fh.readable())
self.assertFalse(fh.seekable())
self.assertFalse(fh.writable())
self.assertFalse(fh.isatty())
def test_tell(self):
with self.get_blob() as fh:
self.assertEqual(fh.tell(), 0)
self.assertEqual(fh.read(2), b"by")
self.assertEqual(fh.tell(), 2)
def METHOD_NAME(self):
with self.get_blob() as fh:
self.assertEqual(fh.seek(0), 0)
fh.read(2)
self.assertEqual(fh.seek(2, SEEK_SET), 2)
def test_write(self):
with self.get_blob() as fh, self.assertRaises(IOError):
fh.write(b"def")
def test_truncate(self):
with self.get_blob() as fh, self.assertRaises(IOError):
fh.truncate()
def test_fileno(self):
with self.get_blob() as fh, self.assertRaises(IOError):
fh.fileno()
def test_closed(self):
with self.get_blob() as fh:
self.assertFalse(fh.closed)
self.assertTrue(fh.closed)
def test_close(self):
fake = FakeStream()
self.assertEqual(fake.close_calls, 0)
BlobStream(fake, fake, None, 0, 0).close()
self.assertEqual(fake.close_calls, 1)
def test_close_on_exit_context(self):
fake = FakeStream()
self.assertEqual(fake.close_calls, 0)
with BlobStream(fake, fake, None, 0, 0):
pass
self.assertEqual(fake.close_calls, 1)
def get_blob(self):
return self.db.get(meta=self.meta)
class TestBlobStreamCompressed(TestBlobStream):
@classmethod
def new_meta(cls, **kwargs):
# set compressed_length to anything except None
return new_meta(compressed_length=-1, type_code=CODES.form_xml)
class FakeStream(object):
close_calls = 0
def close(self):
self.close_calls += 1
|
4,414 |
test get cleanup statement
|
from datetime import datetime
from dagster import TimeWindow
from dagster._core.storage.db_io_manager import TablePartitionDimension, TableSlice
from dagster_snowflake.snowflake_io_manager import SnowflakeDbClient, _get_cleanup_statement
def test_get_select_statement():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(database="database_abc", schema="schema1", table="table1")
)
== "SELECT * FROM database_abc.schema1.table1"
)
def test_get_select_statement_columns():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
columns=["apple", "banana"],
)
)
== "SELECT apple, banana FROM database_abc.schema1.table1"
)
def test_get_select_statement_time_partitioned():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(
partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
)
],
columns=["apple", "banana"],
)
)
== "SELECT apple, banana FROM database_abc.schema1.table1 WHERE\nmy_timestamp_col >="
" '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03 00:00:00'"
)
def test_get_select_statement_static_partitioned():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"])
],
columns=["apple", "banana"],
)
)
== "SELECT apple, banana FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')"
)
def test_get_select_statement_multiple_static_partitions():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(
partition_expr="fruit_col", partitions=["apple", "banana"]
)
],
columns=["fruit_col", "other_col"],
)
)
== "SELECT fruit_col, other_col FROM database_abc.schema1.table1 WHERE\nfruit_col in"
" ('apple', 'banana')"
)
def test_get_select_statement_multi_partitioned():
assert (
SnowflakeDbClient.get_select_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]),
TablePartitionDimension(
partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
),
],
)
)
== "SELECT * FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')"
" AND\nmy_timestamp_col >= '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03"
" 00:00:00'"
)
def METHOD_NAME():
assert (
_get_cleanup_statement(
TableSlice(database="database_abc", schema="schema1", table="table1")
)
== "DELETE FROM database_abc.schema1.table1"
)
def test_get_cleanup_statement_time_partitioned():
assert (
_get_cleanup_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(
partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
)
],
)
)
== "DELETE FROM database_abc.schema1.table1 WHERE\nmy_timestamp_col >= '2020-01-02"
" 00:00:00' AND my_timestamp_col < '2020-02-03 00:00:00'"
)
def test_get_cleanup_statement_static_partitioned():
assert (
_get_cleanup_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"])
],
)
)
== "DELETE FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')"
)
def test_get_cleanup_statement_multi_partitioned():
assert (
_get_cleanup_statement(
TableSlice(
database="database_abc",
schema="schema1",
table="table1",
partition_dimensions=[
TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]),
TablePartitionDimension(
partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)),
partition_expr="my_timestamp_col",
),
],
)
)
== "DELETE FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')"
" AND\nmy_timestamp_col >= '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03"
" 00:00:00'"
)
|
4,415 |
get value in tfconfig
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for TF_CONFIG Environment Variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_TF_CONFIG_ENV = 'TF_CONFIG'
_SESSION_MASTER_KEY = 'session_master'
_RPC_LAYER_KEY = 'rpc_layer'
_TASK_KEY = 'task'
def format_master_url(master, rpc_layer=None):
if rpc_layer:
return '%s://%s' % (rpc_layer, master)
else:
return master
def _load_tf_config():
return json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
def METHOD_NAME(key, default=None):
tf_config = _load_tf_config()
return tf_config[key] if key in tf_config else default
@tf_export('distribute.cluster_resolver.TFConfigClusterResolver')
class TFConfigClusterResolver(ClusterResolver):
"""Implementation of a ClusterResolver which reads the TF_CONFIG EnvVar.
This is an implementation of cluster resolvers when using TF_CONFIG to set
information about the cluster. The cluster spec returned will be
initialized from the TF_CONFIG environment variable.
"""
def __init__(self,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
"""Creates a new TFConfigClusterResolver.
Args:
task_type: (String, optional) Overrides the task type specified in the
TF_CONFIG environment variable.
task_id: (Integer, optional) Overrides the task index specified in the
TF_CONFIG environment variable.
rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.
environment: (String, optional) Overrides the environment TensorFlow
operates in.
"""
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._environment = environment
@property
def task_type(self):
if self._task_type is None:
task_info = METHOD_NAME(_TASK_KEY, {})
return str(task_info['type']) if 'type' in task_info else None
else:
return str(self._task_type)
@property
def task_id(self):
if self._task_type is None:
task_info = METHOD_NAME(_TASK_KEY, {})
return int(task_info['index']) if 'index' in task_info else None
else:
return int(self._task_id)
@task_type.setter
def task_type(self, task_type):
self._task_type = task_type
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def environment(self):
return self._environment
@property
def rpc_layer(self):
if self._rpc_layer is None:
return METHOD_NAME(_RPC_LAYER_KEY)
else:
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
task_type = self.task_type if task_type is None else task_type
task_id = self.task_id if task_id is None else task_id
return super(TFConfigClusterResolver, self).num_accelerators(
task_type, task_id, config_proto)
def cluster_spec(self):
"""Returns a ClusterSpec based on the TF_CONFIG environment variable.
Returns:
A ClusterSpec with information from the TF_CONFIG environment variable.
"""
tf_config = _load_tf_config()
if 'cluster' not in tf_config:
return ClusterSpec({})
return ClusterSpec(tf_config['cluster'])
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a TensorFlow session.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the
master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
`TF_CONFIG` environment variable does not contain a task section.
"""
# If `session_master` is set, just use that.
session_master = METHOD_NAME(_SESSION_MASTER_KEY)
if session_master is not None:
return session_master
# Return an empty string if we are the only job in the ClusterSpec.
cluster_spec = self.cluster_spec()
if (not cluster_spec.jobs or
(len(cluster_spec.jobs) == 1 and
len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1)):
return ''
# We try to auto-detect the task type and id, but uses the user-supplied one
# where available
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
return format_master_url(cluster_spec.task_address(task_type, task_id),
self.rpc_layer)
|
4,416 |
check int
|
from __future__ import annotations
from typing import TYPE_CHECKING
import os
from pyNastran.gui.utils.qt.checks.utils import (check_locale_float, is_ranged_value,
check_format_str)
if TYPE_CHECKING:
from qtpy.QtWidgets import QLineEdit
QLINE_EDIT_BASIC = 'QLineEdit{background: white;}'
QLINE_EDIT_ERROR = 'QLineEdit{background: red;}'
def check_path(cell: QLineEdit) -> tuple[str, bool]:
"""verifies that the path exists"""
path, passed = check_name_str(cell)
if not passed:
return None, False
if not os.path.exists(path):
cell.setStyleSheet(QLINE_EDIT_ERROR)
return None, False
cell.setStyleSheet(QLINE_EDIT_BASIC)
return path, True
def check_save_path(cell: QLineEdit) -> tuple[str, bool]:
"""verifies that the path is saveable..."""
text, passed = check_name_str(cell)
if not passed:
return None, False
return text, passed
#-------------------------------------------------------------------------------
def METHOD_NAME(cell: QLineEdit) -> tuple[int, bool]:
"""
Colors the cell red if the integer is invalid
Parameters
----------
cell : QLineEdit()
a PyQt/PySide object
Returns
-------
value : int / None
int : the value as a int
None : is_passed=False
is_passed : bool
is this a valid integer
"""
text = cell.text()
try:
value = int(text)
cell.setStyleSheet("QLineEdit{background: white;}")
return value, True
except ValueError:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
def check_positive_int_or_blank(cell: QLineEdit) -> tuple[int, bool]:
text = str(cell.text()).strip()
if len(text) == 0:
return None, True
try:
value = int(text)
except ValueError:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
if value < 1:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
cell.setStyleSheet("QLineEdit{background: white;}")
return value, True
#def check_float(cell):
#text = cell.text()
#try:
#value = eval_float_from_string(text)
#cell.setStyleSheet("QLineEdit{background: white;}")
#return value, True
#except ValueError:
#cell.setStyleSheet("QLineEdit{background: red;}")
#return None, False
def check_float(cell: QLineEdit) -> tuple[float, bool]:
"""
Colors the cell red if the float is invalid
Parameters
----------
cell : QLineEdit()
a PyQt/PySide object
Returns
-------
value : float / None
float : the value as a float
None : is_passed=False
is_passed : bool
is this a valid float
# Examples
>>> cell = QLineEdit('3.14')
>>> value, is_passed = check_float(cell)
# value=3.14, is_passed=True
>>> cell = QLineEdit('cat')
>>> value, is_passed = check_float(cell)
# value=None, is_passed=False
"""
text = cell.text()
value, is_valid = check_locale_float(text)
if is_valid:
cell.setStyleSheet("QLineEdit{background: white;}")
return value, True
else:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
def check_float_ranged(cell: QLineEdit,
min_value=None, max_value=None,
min_inclusive=True, max_inclusive=True) -> tuple[str, bool]:
"""
Colors the cell red if the float is invalid or the value is outside
the range [min_value, max_value].
Parameters
----------
cell : QLineEdit()
a PyQt/PySide object
min_value / max_value : float / None
float : the constraint is active
None : the constraint is inactive
min_inclusive / max_inclusive; bool; default=True
flips [min_value, max_value] to:
- (min_value, max_value)
- [min_value, max_value)
- (min_value, max_value]
Returns
-------
value : float / None
float : the value as a float
None : is_passed=False
is_passed : bool
is this a valid float that meets the range requirements
"""
value, is_passed = check_float(cell)
if not is_passed:
#print("failed %r" % value)
return value, is_passed
is_ranged = is_ranged_value(
value, min_value=min_value, max_value=max_value,
min_inclusive=min_inclusive, max_inclusive=max_inclusive)
color = 'white'
if not is_ranged:
value = None
color = 'red'
cell.setStyleSheet("QLineEdit{background: %s;}" % color)
return value, is_ranged
#-------------------------------------------------------------------------------
def check_name_str(cell: QLineEdit) -> tuple[str, bool]:
"""
Verifies that the data is string-able.
Parameters
----------
cell : QLineEdit
a QLineEdit containing a string.
"""
cell_value = cell.text()
try:
text = str(cell_value).strip()
except UnicodeEncodeError:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
if len(text):
cell.setStyleSheet("QLineEdit{background: white;}")
return text, True
else:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
def check_name_length(cell: QLineEdit) -> tuple[str, bool]:
"""
Verifies that the string has at least 1 non-whitespace character.
Parameters
----------
cell : QLineEdit
a QLineEdit containing a string.
"""
cell_value = cell.text()
text = cell_value.strip()
if len(text):
cell.setStyleSheet("QLineEdit{background: white;}")
return text, True
else:
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
def check_format(cell: QLineEdit) -> tuple[str, bool]:
"""
Checks a QLineEdit string formatter
Parameters
----------
cell : QLineEdit
a QLineEdit containing a string formatter like:
{'%s', '%i', '%d', %f', '%g', '%.3f', '%e'}
Returns
-------
text : str / None
str : the validated text of the QLineEdit
None : the format is invalid
is_valid : bool
The str/None flag to indicate if the string formatter is valid
"""
text = str(cell.text())
text2, is_valid = check_format_str(text)
if is_valid:
cell.setStyleSheet("QLineEdit{background: white;}")
return text2, True
cell.setStyleSheet("QLineEdit{background: red;}")
return None, False
|
4,417 |
tear down
|
from tests.integration.mock.local_node_network import LocalNodeNetwork
from tests.unit.helpers.mock_transactions import get_introduce_motion_tx, get_vote_tx, get_new_currency_tx
from unittest import TestCase
import asyncio
import json
import random
class TestNodeKick(TestCase):
def setUp(self):
self.network = LocalNodeNetwork(
num_of_masternodes=5,
network_await_connect_all_timeout=2
)
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
while not self.network.all_nodes_started:
self.loop.run_until_complete(asyncio.sleep(1))
self.exile = self.network.masternodes[-1]
self.voters = self.network.masternodes[:-1]
self.num_yays_needed = len(self.network.all_nodes) // 2 + 1
self.fund_founder()
def METHOD_NAME(self):
task = asyncio.ensure_future(self.network.stop_all_nodes())
while not task.done():
self.loop.run_until_complete(asyncio.sleep(0.1))
try:
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
except RuntimeError:
pass
def await_async_process(self, process, *args, **kwargs):
tasks = asyncio.gather(
process(*args, **kwargs)
)
loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
def fund_founder(self):
for node in self.network.all_nodes:
node.set_smart_contract_value(
key=f'currency.balances:{self.network.founders_wallet.verifying_key}',
value=1000000
)
def test_updates_state_and_shuts_down_kicked_node(self):
random_voter = random.choice(self.voters)
introduce_motion_remove_member_tx = get_introduce_motion_tx(
policy='masternodes', motion=1, vk=self.exile.vk, wallet=random_voter.node.wallet
)
random_voter.send_tx(json.dumps(introduce_motion_remove_member_tx).encode())
self.network.await_all_nodes_done_processing(block_height=2)
for voter in self.voters[:self.num_yays_needed]:
vote_yay_tx = get_vote_tx(policy='masternodes', obj=['vote_on_motion', True], wallet=voter.node.wallet, nonce=1)
voter.send_tx(json.dumps(vote_yay_tx).encode())
self.network.await_all_nodes_done_processing(block_height=self.num_yays_needed + 2)
expected_members = [voter.vk for voter in self.voters]
for voter in self.voters:
self.assertListEqual(voter.get_smart_contract_value('masternodes.S:members'), expected_members)
self.assertEqual(voter.network.num_of_peers(), len(self.voters) - 1)
self.assertFalse(self.exile.node_is_running)
def test_nodes_clear_results_from_kicked_peer(self):
for node in self.network.all_nodes:
self.await_async_process(node.node.pause_validation_queue)
random_voter = random.choice(self.voters)
introduce_motion_remove_member_tx = get_introduce_motion_tx(
policy='masternodes', motion=1, vk=self.exile.vk, wallet=random_voter.node.wallet
)
random_voter.send_tx(json.dumps(introduce_motion_remove_member_tx).encode())
self.await_async_process(asyncio.sleep, 1)
for voter in self.voters[:self.num_yays_needed]:
vote_yay_tx = get_vote_tx(policy='masternodes', obj=['vote_on_motion', True], wallet=voter.node.wallet, nonce=1)
voter.send_tx(json.dumps(vote_yay_tx).encode())
self.await_async_process(asyncio.sleep, 1)
random_voter.send_tx(
json.dumps(get_new_currency_tx(wallet=self.network.founders_wallet, processor=random_voter.vk, nonce=2)).encode()
)
num_tx_total = self.num_yays_needed + 2
for node in self.network.all_nodes:
while len(node.validation_queue) != num_tx_total:
self.await_async_process(asyncio.sleep, 0.1)
while node.blocks.total_blocks() != num_tx_total - 1:
self.await_async_process(node.validation_queue.process_next)
last_hlc = self.exile.validation_queue[-1]
for node in self.network.all_nodes:
# Assert results from exile node are present before we process last TX which will pass the motion
self.assertIsNotNone(
node.validation_queue.validation_results[last_hlc]['solutions'].get(self.exile.wallet.verifying_key, None)
)
while node.blocks.total_blocks() != num_tx_total:
self.await_async_process(node.validation_queue.process_next)
if node != self.exile:
# Assert last vote TX passed the motion thus removing older results from exile
self.assertIsNone(
node.validation_queue.validation_results[last_hlc]['solutions'].get(self.exile.wallet.verifying_key, None)
)
node.node.unpause_all_queues()
self.network.await_all_nodes_done_processing(block_height=num_tx_total + 1, nodes=self.voters)
expected_members = [voter.vk for voter in self.voters]
for voter in self.voters:
self.assertListEqual(voter.get_smart_contract_value('masternodes.S:members'), expected_members)
self.assertEqual(voter.network.num_of_peers(), len(self.voters) - 1)
self.assertFalse(self.exile.node_is_running)
def test_nodes_drop_tx_if_processor_was_kicked(self):
random_voter = random.choice(self.voters)
introduce_motion_remove_member_tx = get_introduce_motion_tx(
policy='masternodes', motion=1, vk=self.exile.vk, wallet=random_voter.node.wallet
)
random_voter.send_tx(json.dumps(introduce_motion_remove_member_tx).encode())
self.network.await_all_nodes_done_processing(block_height=2)
for voter in self.voters[:self.num_yays_needed]:
vote_yay_tx = get_vote_tx(policy='masternodes', obj=['vote_on_motion', True], wallet=voter.node.wallet, nonce=1)
voter.send_tx(json.dumps(vote_yay_tx).encode())
# This TX shouldn't eventually be processed since it is coming from exile.
self.exile.send_tx(
json.dumps(get_new_currency_tx(wallet=self.network.founders_wallet, processor=self.exile.vk)).encode()
)
num_tx_total = self.num_yays_needed + 1
self.network.await_all_nodes_done_processing(block_height=num_tx_total + 2, nodes=self.voters)
expected_members = [voter.vk for voter in self.voters]
for voter in self.voters:
self.assertListEqual(voter.get_smart_contract_value('masternodes.S:members'), expected_members)
self.assertEqual(voter.network.num_of_peers(), len(self.voters) - 1)
self.assertFalse(self.exile.node_is_running
|
4,418 |
test gui video
|
import numpy as np
from sleap import Instance, Skeleton
from sleap.gui.widgets.video import (
QtVideoPlayer,
GraphicsView,
QtInstance,
QtVideoPlayer,
QtTextWithBackground,
VisibleBoundingBox,
)
from qtpy import QtCore, QtWidgets
from qtpy.QtGui import QColor
def METHOD_NAME(qtbot):
vp = QtVideoPlayer()
vp.show()
qtbot.addWidget(vp)
assert vp.close()
# Click the button 20 times
# for i in range(20):
# qtbot.mouseClick(vp.btn, QtCore.Qt.LeftButton)
def test_gui_video_instances(qtbot, small_robot_mp4_vid, centered_pair_labels):
vp = QtVideoPlayer(small_robot_mp4_vid)
qtbot.addWidget(vp)
test_frame_idx = 63
labeled_frames = centered_pair_labels.labeled_frames
def plot_instances(vp, idx):
for instance in labeled_frames[test_frame_idx].instances:
vp.addInstance(instance=instance)
vp.changedPlot.connect(plot_instances)
vp.view.updatedViewer.emit()
vp.show()
vp.plot()
# Check that all instances are included in viewer
assert len(vp.instances) == len(labeled_frames[test_frame_idx].instances)
# All instances should be selectable
assert vp.selectable_instances == vp.instances
vp.zoomToFit()
# Check that we zoomed correctly
assert vp.view.zoomFactor > 1
vp.instances[0].updatePoints(complete=True)
# Check that node is marked as complete
nodes = [item for item in vp.instances[0].childItems() if hasattr(item, "point")]
assert all((node.point.complete for node in nodes))
# Check that selection via keyboard works
assert vp.view.getSelectionIndex() is None
qtbot.keyClick(vp, QtCore.Qt.Key_1)
assert vp.view.getSelectionIndex() == 0
qtbot.keyClick(vp, QtCore.Qt.Key_2)
assert vp.view.getSelectionIndex() == 1
# Check that updatedSelection signal is emitted
with qtbot.waitSignal(vp.view.updatedSelection, timeout=10):
qtbot.keyClick(vp, QtCore.Qt.Key_1)
# Check that selection by Instance works
for inst in labeled_frames[test_frame_idx].instances:
vp.view.selectInstance(inst)
assert vp.view.getSelectionInstance() == inst
# Check that sequence selection works
with qtbot.waitCallback() as cb:
vp.view.selectInstance(None)
vp.onSequenceSelect(2, cb)
qtbot.keyClick(vp, QtCore.Qt.Key_2)
qtbot.keyClick(vp, QtCore.Qt.Key_1)
inst_1 = vp.selectable_instances[1].instance
inst_0 = vp.selectable_instances[0].instance
assert cb.args[0] == [inst_1, inst_0]
assert vp.close()
def test_getInstancesBoundingRect():
rect = GraphicsView.getInstancesBoundingRect([])
assert rect.isNull()
def test_QtTextWithBackground(qtbot):
scene = QtWidgets.QGraphicsScene()
view = QtWidgets.QGraphicsView()
view.setScene(scene)
txt = QtTextWithBackground()
txt.setDefaultTextColor(QColor("yellow"))
bg_color = txt.getBackgroundColor()
assert bg_color.lightness() == 0
txt.setDefaultTextColor(QColor("black"))
bg_color = txt.getBackgroundColor()
assert bg_color.lightness() == 255
scene.addItem(txt)
qtbot.addWidget(view)
def test_VisibleBoundingBox(qtbot, centered_pair_labels):
vp = QtVideoPlayer(centered_pair_labels.video)
test_idx = 27
for instance in centered_pair_labels.labeled_frames[test_idx].instances:
vp.addInstance(instance)
inst = vp.instances[0]
# Check if type of bounding box is correct
assert type(inst.box) == VisibleBoundingBox
# Scale the bounding box
start_top_left = inst.box.rect().topLeft()
start_bottom_right = inst.box.rect().bottomRight()
initial_width = inst.box.rect().width()
initial_height = inst.box.rect().height()
dx = 5
dy = 10
end_top_left = QtCore.QPointF(start_top_left.x() - dx, start_top_left.y() - dy)
end_bottom_right = QtCore.QPointF(
start_bottom_right.x() + dx, start_bottom_right.y() + dy
)
inst.box.setRect(QtCore.QRectF(end_top_left, end_bottom_right))
# Check if bounding box scaled appropriately
assert inst.box.rect().width() - initial_width == 2 * dx
assert inst.box.rect().height() - initial_height == 2 * dy
|
4,419 |
update
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.regression.spearman import _spearman_corrcoef_compute, _spearman_corrcoef_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpearmanCorrCoef.plot"]
class SpearmanCorrCoef(Metric):
r"""Compute `spearmans rank correlation coefficient`_.
.. math:
r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}}
where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables :math:`x` and :math:`y`.
Spearmans correlations coefficient corresponds to the standard pearsons correlation coefficient calculated
on the rank variables.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,d)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,d)``
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``spearman`` (:class:`~torch.Tensor`): A tensor with the spearman correlation(s)
Args:
num_outputs: Number of outputs in multioutput setting
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (single output regression):
>>> from torch import tensor
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> target = tensor([3, -0.5, 2, 7])
>>> preds = tensor([2.5, 0.0, 2, 8])
>>> spearman = SpearmanCorrCoef()
>>> spearman(preds, target)
tensor(1.0000)
Example (multi output regression):
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> target = tensor([[3, -0.5], [2, 7]])
>>> preds = tensor([[2.5, 0.0], [2, 8]])
>>> spearman = SpearmanCorrCoef(num_outputs=2)
>>> spearman(preds, target)
tensor([1.0000, 1.0000])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = -1.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `SpearmanCorrcoef` will save all targets and predictions in the buffer."
" For large datasets, this may lead to large memory footprint."
)
if not isinstance(num_outputs, int) and num_outputs < 1:
raise ValueError("Expected argument `num_outputs` to be an int larger than 0, but got {num_outputs}")
self.num_outputs = num_outputs
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def METHOD_NAME(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _spearman_corrcoef_update(preds, target, num_outputs=self.num_outputs)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute Spearman's correlation coefficient."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _spearman_corrcoef_compute(preds, target)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting a single value
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> metric = SpearmanCorrCoef()
>>> metric.update(randn(10,), randn(10,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn
>>> # Example plotting multiple values
>>> from torchmetrics.regression import SpearmanCorrCoef
>>> metric = SpearmanCorrCoef()
>>> values = []
>>> for _ in range(10):
... values.append(metric(randn(10,), randn(10,)))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
|
4,420 |
backup resource encryption config cf
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Base Client Factories
def _resource_client_factory(cli_ctx, **_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
def _compute_client_factory(cli_ctx, **_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE)
def _common_client_factory(cli_ctx, **_):
from azure.mgmt.recoveryservices import RecoveryServicesClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, RecoveryServicesClient)
def _backup_client_factory(cli_ctx, **_):
from azure.mgmt.recoveryservicesbackup.activestamp import RecoveryServicesBackupClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, RecoveryServicesBackupClient)
def _backup_passive_client_factory(cli_ctx, **_):
from azure.mgmt.recoveryservicesbackup.passivestamp import RecoveryServicesBackupPassiveClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, RecoveryServicesBackupPassiveClient)
# External Deps Client Factories
def virtual_machines_cf(cli_ctx, *_):
return _compute_client_factory(cli_ctx).virtual_machines
def resources_cf(cli_ctx, *_):
return _resource_client_factory(cli_ctx).resources
def resource_groups_cf(cli_ctx, *_):
return _resource_client_factory(cli_ctx).resource_groups
# Internal Deps Client Factories
def vaults_cf(cli_ctx, *_):
return _common_client_factory(cli_ctx).vaults
def registered_identities_cf(cli_ctx, *_):
return _common_client_factory(cli_ctx).registered_identities
def backup_storage_configs_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).backup_resource_storage_configs
def backup_storage_configs_non_crr_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_resource_storage_configs_non_crr
def backup_status_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_status
# Protection Client Factories
def protection_intent_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protection_intent
def protection_policies_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protection_policies
def protection_containers_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protection_containers
def protectable_containers_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protectable_containers
def protection_container_operation_results_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protection_container_operation_results
def protection_container_refresh_operation_results_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protection_container_refresh_operation_results
def protected_items_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).protected_items
# Backup Client Factories
def backup_policies_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_policies
def backup_protection_containers_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_protection_containers
def backup_protection_intent_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_protection_intent
def backup_protectable_items_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_protectable_items
def backup_protected_items_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_protected_items
def backup_protected_items_crr_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).backup_protected_items_crr
def backup_operation_statuses_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_operation_statuses
def crr_operation_status_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).crr_operation_status
def backups_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backups
def backup_jobs_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_jobs
def backup_crr_jobs_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).backup_crr_jobs
def backup_workload_items_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_workload_items
# Job Client Factories
def job_details_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).job_details
def backup_crr_job_details_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).backup_crr_job_details
def job_cancellations_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).job_cancellations
# Recovery Client Factories
def recovery_points_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).recovery_points
def recovery_points_recommended_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).recovery_points_recommended_for_move
def recovery_points_crr_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).recovery_points_crr
def recovery_points_passive_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).recovery_points
def restores_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).restores
def cross_region_restore_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).cross_region_restore
def item_level_recovery_connections_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).item_level_recovery_connections
def backup_resource_vault_config_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_resource_vault_configs
def METHOD_NAME(cli_ctx, *_):
return _backup_client_factory(cli_ctx).backup_resource_encryption_configs
# Azure Active Directory Client Factories
def aad_properties_cf(cli_ctx, *_):
return _backup_passive_client_factory(cli_ctx).aad_properties
# Resource Guard Proxy Client Factories
def resource_guard_proxy_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).resource_guard_proxy
def resource_guard_proxies_cf(cli_ctx, *_):
return _backup_client_factory(cli_ctx).resource_guard_proxies
|
4,421 |
test main
|
import pipes
import os
import string
import unittest
from test.test_support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
with t.open(TESTFN, 'r') as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
with t.open(TESTFN, 'r') as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testQuoting(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unsafe = '"`$\\!'
self.assertEqual(pipes.quote(''), "''")
self.assertEqual(pipes.quote(safeunquoted), safeunquoted)
self.assertEqual(pipes.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(pipes.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(pipes.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def testReadOpenSink(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def METHOD_NAME():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
METHOD_NAME()
|
4,422 |
uselist
|
import sqlalchemy as sa
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.util.langhelpers import symbol
from .utils import str_coercible
@str_coercible
class Path:
def __init__(self, path, separator='.'):
if isinstance(path, Path):
self.path = path.path
else:
self.path = path
self.separator = separator
@property
def parts(self):
return self.path.split(self.separator)
def __iter__(self):
yield from self.parts
def __len__(self):
return len(self.parts)
def __repr__(self):
return f"{self.__class__.__name__}('{self.path}')"
def index(self, element):
return self.parts.index(element)
def __getitem__(self, slice):
result = self.parts[slice]
if isinstance(result, list):
return self.__class__(
self.separator.join(result),
separator=self.separator
)
return result
def __eq__(self, other):
return self.path == other.path and self.separator == other.separator
def __ne__(self, other):
return not (self == other)
def __unicode__(self):
return self.path
def get_attr(mixed, attr):
if isinstance(mixed, InstrumentedAttribute):
return getattr(
mixed.property.mapper.class_,
attr
)
else:
return getattr(mixed, attr)
@str_coercible
class AttrPath:
def __init__(self, class_, path):
self.class_ = class_
self.path = Path(path)
self.parts = []
last_attr = class_
for value in self.path:
last_attr = get_attr(last_attr, value)
self.parts.append(last_attr)
def __iter__(self):
yield from self.parts
def __invert__(self):
def get_backref(part):
prop = part.property
backref = prop.backref or prop.back_populates
if backref is None:
raise Exception(
"Invert failed because property '%s' of class "
"%s has no backref." % (
prop.key,
prop.parent.class_.__name__
)
)
if isinstance(backref, tuple):
return backref[0]
else:
return backref
if isinstance(self.parts[-1].property, sa.orm.ColumnProperty):
class_ = self.parts[-1].class_
else:
class_ = self.parts[-1].mapper.class_
return self.__class__(
class_,
'.'.join(map(get_backref, reversed(self.parts)))
)
def index(self, element):
for index, el in enumerate(self.parts):
if el is element:
return index
@property
def direction(self):
symbols = [part.property.direction for part in self.parts]
if symbol('MANYTOMANY') in symbols:
return symbol('MANYTOMANY')
elif symbol('MANYTOONE') in symbols and symbol('ONETOMANY') in symbols:
return symbol('MANYTOMANY')
return symbols[0]
@property
def METHOD_NAME(self):
return any(part.property.METHOD_NAME for part in self.parts)
def __getitem__(self, slice):
result = self.parts[slice]
if isinstance(result, list) and result:
if result[0] is self.parts[0]:
class_ = self.class_
else:
class_ = result[0].parent.class_
return self.__class__(
class_,
self.path[slice]
)
else:
return result
def __len__(self):
return len(self.path)
def __repr__(self):
return "{}({}, {!r})".format(
self.__class__.__name__,
self.class_.__name__,
self.path.path
)
def __eq__(self, other):
return self.path == other.path and self.class_ == other.class_
def __ne__(self, other):
return not (self == other)
def __unicode__(self):
return str(self.path)
|
4,423 |
test import contributions
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime, timedelta
from io import BytesIO
import pytest
from indico.core.errors import UserValueError
from indico.modules.events.contributions.util import import_contributions_from_csv
from indico.util.date_time import as_utc
def _check_importer_exception(event, csv):
with pytest.raises(UserValueError) as e:
import_contributions_from_csv(event, BytesIO(csv))
return e.value
def METHOD_NAME(dummy_event, dummy_user):
dummy_event.start_dt = as_utc(datetime(2017, 11, 27, 8, 0, 0))
dummy_event.end_dt = as_utc(datetime(2017, 11, 27, 12, 0, 0))
csv = b'\n'.join([b'2017-11-27T08:00,10,First contribution,,,,',
b',,Second contribution,John,Doe,ACME Inc.,[email protected]',
b'2017-11-27T08:30,15,Third contribution,Guinea Albert,Pig,,[email protected]'])
contributions, changes = import_contributions_from_csv(dummy_event, BytesIO(csv))
assert len(contributions) == 3
assert contributions[0].start_dt == dummy_event.start_dt
assert contributions[0].duration == timedelta(minutes=10)
assert contributions[0].title == 'First contribution'
assert len(contributions[0].speakers) == 0
assert contributions[1].start_dt is None
assert contributions[1].duration == timedelta(minutes=20)
assert contributions[1].title == 'Second contribution'
speakers = contributions[1].speakers
assert len(speakers) == 1
assert speakers[0].full_name == 'John Doe'
assert speakers[0].affiliation == 'ACME Inc.'
assert speakers[0].email == '[email protected]'
assert contributions[2].start_dt == as_utc(datetime(2017, 11, 27, 8, 30, 0))
assert contributions[2].duration == timedelta(minutes=15)
assert contributions[2].title == 'Third contribution'
speakers = contributions[2].speakers
assert len(speakers) == 1
# name comes from PersonLink, not user
assert speakers[0].full_name == 'Guinea Albert Pig'
assert not speakers[0].affiliation
assert speakers[0].email == '[email protected]'
assert speakers[0].person.user == dummy_user
assert not changes
def test_import_contributions_changes(db, dummy_event, dummy_user):
original_start_dt = as_utc(datetime(2017, 11, 27, 8, 0, 0))
original_end_dt = as_utc(datetime(2017, 11, 27, 12, 0, 0))
dummy_event.start_dt = original_start_dt
dummy_event.end_dt = original_end_dt
# Change of end time
csv = b'\n'.join([b'2017-11-27T08:00,10,First contribution,,,,',
b'2017-11-27T08:10:00,10,Second contribution,John,Doe,ACME Inc.,[email protected]',
b'2017-11-27T11:30,60,Third contribution,Guinea Albert,Pig,,[email protected]'])
contributions, changes = import_contributions_from_csv(dummy_event, BytesIO(csv))
new_end_dt = as_utc(datetime(2017, 11, 27, 12, 30, 0))
assert dummy_event.end_dt == new_end_dt
assert changes == {
'duration': [(timedelta(hours=4), timedelta(hours=4, minutes=30))],
'end_dt': [(original_end_dt, new_end_dt)]
}
# reset date/time
dummy_event.start_dt = original_start_dt
dummy_event.end_dt = original_end_dt
# Change of start/end date
csv = b'\n'.join([b'2017-11-26T08:00,10,First contribution,,,,',
b'2017-11-27T08:10:00,10,Second contribution,John,Doe,ACME Inc.,[email protected]',
b'2017-11-28T11:30,60,Third contribution,Guinea Albert,Pig,,[email protected]'])
contributions, changes = import_contributions_from_csv(dummy_event, BytesIO(csv))
new_start_dt = as_utc(datetime(2017, 11, 26, 8, 0, 0))
new_end_dt = as_utc(datetime(2017, 11, 28, 12, 30, 0))
assert dummy_event.start_dt == new_start_dt
assert dummy_event.end_dt == new_end_dt
assert len(changes) == 3
def test_import_contributions_errors(db, dummy_event):
original_start_dt = as_utc(datetime(2017, 11, 27, 8, 0, 0))
original_end_dt = as_utc(datetime(2017, 11, 27, 12, 0, 0))
dummy_event.start_dt = original_start_dt
dummy_event.end_dt = original_end_dt
e = _check_importer_exception(dummy_event, b',,Test,,,,,')
assert 'malformed' in str(e)
assert 'Row 1' in str(e)
e = _check_importer_exception(dummy_event, b',,,,,,')
assert 'title' in str(e)
e = _check_importer_exception(dummy_event, b'2010-23-02T00:00:00,,Test,,,,')
assert 'parse date' in str(e)
e = _check_importer_exception(dummy_event, b'2010-02-23T00:00:00,15min,Test,,,,')
assert 'parse duration' in str(e)
e = _check_importer_exception(dummy_event, b'2010-02-23T00:00:00,15,Test,Test,Test,Test,foobar')
assert 'invalid email' in str(e)
|
4,424 |
run task
|
import functools
from datetime import datetime, timezone
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.sql.sqltypes import Boolean, Integer, String, Text
from files.classes.cron.tasks import (RepeatableTask, ScheduledTaskType,
TaskRunContext)
from files.classes.submission import Submission
from files.classes.visstate import StateMod, StateReport, VisibilityState
from files.helpers.config.const import SUBMISSION_TITLE_LENGTH_MAXIMUM
from files.helpers.content import body_displayed
from files.helpers.lazy import lazy
from files.helpers.sanitize import filter_emojis_only
__all__ = ('ScheduledSubmissionTask',)
class ScheduledSubmissionTask(RepeatableTask):
__tablename__ = "tasks_repeatable_scheduled_submissions"
__mapper_args__ = {
"polymorphic_identity": int(ScheduledTaskType.SCHEDULED_SUBMISSION),
}
id = Column(Integer, ForeignKey(RepeatableTask.id), primary_key=True)
author_id_submission = Column(Integer, ForeignKey("users.id"), nullable=False)
ghost = Column(Boolean, default=False, nullable=False)
private = Column(Boolean, default=False, nullable=False)
over_18 = Column(Boolean, default=False, nullable=False)
is_bot = Column(Boolean, default=False, nullable=False)
title = Column(String(SUBMISSION_TITLE_LENGTH_MAXIMUM), nullable=False)
url = Column(String)
body = Column(Text)
body_html = Column(Text)
flair = Column(String)
embed_url = Column(String)
author = relationship("User", foreign_keys=author_id_submission)
task = relationship(RepeatableTask)
submissions = relationship(Submission,
back_populates="task", order_by="Submission.id.desc()")
def METHOD_NAME(self, ctx:TaskRunContext) -> None:
submission:Submission = self.make_submission(ctx)
with ctx.app_context():
# TODO: stop using app context (currently required for sanitize and
# username pings)
submission.submit(ctx.db) # TODO: thumbnails
submission.publish()
def make_submission(self, ctx:TaskRunContext) -> Submission:
title:str = self.make_title(ctx.trigger_time)
title_html:str = filter_emojis_only(title, graceful=True)
if len(title_html) > 1500: raise ValueError("Rendered title too large")
return Submission(
created_utc=int(ctx.trigger_time.timestamp()),
private=self.private,
author_id=self.author_id_submission,
over_18=self.over_18,
app_id=None,
is_bot =self.is_bot,
title=title,
title_html=title_html,
url=self.url,
body=self.body,
body_html=self.body_html,
flair=self.flair,
ghost=self.ghost,
state_mod=StateMod.VISIBLE,
embed_url=self.embed_url,
task_id=self.id,
)
def make_title(self, trigger_time:datetime) -> str:
return trigger_time.strftime(self.title)
# properties below here are mocked in order to reuse part of the submission
# HTML template for previewing a submitted task
@property
def state_user_deleted_utc(self) -> datetime | None:
return datetime.now(tz=timezone.utc) if not self.task.enabled else None
@functools.cached_property
def title_html(self) -> str:
'''
This is used as a mock property for display in submission listings that
contain scheduled posts.
.. warning::
This property should not be used for generating the HTML for an actual
submission as this will be missing the special formatting that may be
applies to titles. Instead call
`ScheduledSubmissionContext.make_title()` with the `datetime` that the
event was triggered at.
'''
return filter_emojis_only(self.title)
@property
def author_name(self) -> str:
return self.author.username
@property
def upvotes(self) -> int:
return 1
@property
def score(self) -> int:
return 1
@property
def downvotes(self) -> int:
return 0
@property
def realupvotes(self) -> int:
return 1
@property
def comment_count(self) -> int:
return 0
@property
def views(self) -> int:
return 0
@property
def state_mod(self) -> StateMod:
return StateMod.VISIBLE
def award_count(self, kind):
return 0
@lazy
def realurl(self, v):
return Submission.realurl(self, v)
def realbody(self, v):
return body_displayed(self, v, is_html=True)
def plainbody(self, v):
return body_displayed(self, v, is_html=False)
@lazy
def realtitle(self, v):
return self.title_html if self.title_html else self.title
@lazy
def plaintitle(self, v):
return self.title
@property
def permalink(self):
return f"/tasks/scheduled_posts/{self.id}"
@property
def shortlink(self):
return self.permalink
@property
def is_real_submission(self) -> bool:
return False
@property
def should_hide_score(self) -> bool:
return True
@property
def edit_url(self) -> str:
return f"/tasks/scheduled_posts/{self.id}/content"
@property
def visibility_state(self) -> VisibilityState:
return VisibilityState(
state_mod=StateMod.VISIBLE,
state_mod_set_by=None,
state_report=StateReport.UNREPORTED,
deleted=False, # we only want to show deleted UI color if disabled
op_shadowbanned=False,
op_id=self.author_id_submission,
op_name_safe=self.author_name
)
|
4,425 |
test property getter doc override
|
# Test case for property
# more tests are in test_descr
import sys
import unittest
from test.test_support import run_unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def METHOD_NAME(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
#This raises a TypeError in Jython.
except (AttributeError, TypeError):
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
@FooBase.spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
def test_main():
run_unittest(PropertyTests, PropertySubclassTests)
if __name__ == '__main__':
test_main()
|
4,426 |
test follow on comment
|
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
from dataclasses import dataclass
import frappe
import frappe.desk.form.document_follow as document_follow
from frappe.desk.form.assign_to import add
from frappe.desk.form.document_follow import get_document_followed_by_user
from frappe.desk.form.utils import add_comment
from frappe.desk.like import toggle_like
from frappe.query_builder import DocType
from frappe.query_builder.functions import Cast_
from frappe.share import add as share
from frappe.tests.utils import FrappeTestCase
class TestDocumentFollow(FrappeTestCase):
def test_document_follow_version(self):
user = get_user()
event_doc = get_event()
event_doc.description = "This is a test description for sending mail"
event_doc.save(ignore_version=False)
document_follow.unfollow_document("Event", event_doc.name, user.name)
doc = document_follow.follow_document("Event", event_doc.name, user.name)
self.assertEqual(doc.user, user.name)
document_follow.send_hourly_updates()
emails = get_emails(event_doc, "%This is a test description for sending mail%")
self.assertIsNotNone(emails)
def test_document_follow_comment(self):
user = get_user()
event_doc = get_event()
add_comment(
event_doc.doctype, event_doc.name, "This is a test comment", "[email protected]", "Bosh"
)
document_follow.unfollow_document("Event", event_doc.name, user.name)
doc = document_follow.follow_document("Event", event_doc.name, user.name)
self.assertEqual(doc.user, user.name)
document_follow.send_hourly_updates()
emails = get_emails(event_doc, "%This is a test comment%")
self.assertIsNotNone(emails)
def test_follow_limit(self):
user = get_user()
for _ in range(25):
event_doc = get_event()
document_follow.unfollow_document("Event", event_doc.name, user.name)
doc = document_follow.follow_document("Event", event_doc.name, user.name)
self.assertEqual(doc.user, user.name)
self.assertEqual(len(get_document_followed_by_user(user.name)), 20)
def test_follow_on_create(self):
user = get_user(DocumentFollowConditions(1))
frappe.set_user(user.name)
event = get_event()
event.description = "This is a test description for sending mail"
event.save(ignore_version=False)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertTrue(documents_followed)
def test_do_not_follow_on_create(self):
user = get_user()
frappe.set_user(user.name)
event = get_event()
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def test_do_not_follow_on_update(self):
user = get_user()
frappe.set_user(user.name)
event = get_event()
event.description = "This is a test description for sending mail"
event.save(ignore_version=False)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def METHOD_NAME(self):
user = get_user(DocumentFollowConditions(0, 1))
frappe.set_user(user.name)
event = get_event()
add_comment(
event.doctype, event.name, "This is a test comment", "[email protected]", "Bosh"
)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertTrue(documents_followed)
def test_do_not_follow_on_comment(self):
user = get_user()
frappe.set_user(user.name)
event = get_event()
add_comment(
event.doctype, event.name, "This is a test comment", "[email protected]", "Bosh"
)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def test_follow_on_like(self):
user = get_user(DocumentFollowConditions(0, 0, 1))
frappe.set_user(user.name)
event = get_event()
toggle_like(event.doctype, event.name, add="Yes")
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertTrue(documents_followed)
def test_do_not_follow_on_like(self):
user = get_user()
frappe.set_user(user.name)
event = get_event()
toggle_like(event.doctype, event.name)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def test_follow_on_assign(self):
user = get_user(DocumentFollowConditions(0, 0, 0, 1))
event = get_event()
add({"assign_to": [user.name], "doctype": event.doctype, "name": event.name})
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertTrue(documents_followed)
def test_do_not_follow_on_assign(self):
user = get_user()
frappe.set_user(user.name)
event = get_event()
add({"assign_to": [user.name], "doctype": event.doctype, "name": event.name})
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def test_follow_on_share(self):
user = get_user(DocumentFollowConditions(0, 0, 0, 0, 1))
event = get_event()
share(user=user.name, doctype=event.doctype, name=event.name)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertTrue(documents_followed)
def test_do_not_follow_on_share(self):
user = get_user()
event = get_event()
share(user=user.name, doctype=event.doctype, name=event.name)
documents_followed = get_events_followed_by_user(event.name, user.name)
self.assertFalse(documents_followed)
def tearDown(self):
frappe.db.rollback()
frappe.db.delete("Email Queue")
frappe.db.delete("Email Queue Recipient")
frappe.db.delete("Document Follow")
frappe.db.delete("Event")
def get_events_followed_by_user(event_name, user_name):
DocumentFollow = DocType("Document Follow")
return (
frappe.qb.from_(DocumentFollow)
.where(DocumentFollow.ref_doctype == "Event")
.where(DocumentFollow.ref_docname == event_name)
.where(DocumentFollow.user == user_name)
.select(DocumentFollow.name)
).run()
def get_event():
doc = frappe.get_doc(
{
"doctype": "Event",
"subject": "_Test_Doc_Follow",
"doc.starts_on": frappe.utils.now(),
"doc.ends_on": frappe.utils.add_days(frappe.utils.now(), 5),
"doc.description": "Hello",
}
)
doc.insert()
return doc
def get_user(document_follow=None):
frappe.set_user("Administrator")
if frappe.db.exists("User", "[email protected]"):
doc = frappe.delete_doc("User", "[email protected]")
doc = frappe.new_doc("User")
doc.email = "[email protected]"
doc.first_name = "Test"
doc.last_name = "User"
doc.send_welcome_email = 0
doc.document_follow_notify = 1
doc.document_follow_frequency = "Hourly"
doc.__dict__.update(document_follow.__dict__ if document_follow else {})
doc.insert()
doc.add_roles("System Manager")
return doc
def get_emails(event_doc, search_string):
EmailQueue = DocType("Email Queue")
EmailQueueRecipient = DocType("Email Queue Recipient")
return (
frappe.qb.from_(EmailQueue)
.join(EmailQueueRecipient)
.on(EmailQueueRecipient.parent == Cast_(EmailQueue.name, "varchar"))
.where(
EmailQueueRecipient.recipient == "[email protected]",
)
.where(EmailQueue.message.like(f"%{event_doc.doctype}%"))
.where(EmailQueue.message.like(f"%{event_doc.name}%"))
.where(EmailQueue.message.like(search_string))
.select(EmailQueue.message)
.limit(1)
).run()
@dataclass
class DocumentFollowConditions:
follow_created_documents: int = 0
follow_commented_documents: int = 0
follow_liked_documents: int = 0
follow_assigned_documents: int = 0
follow_shared_documents: int = 0
|
4,427 |
test include print
|
"""Copy of `test_sobol.py` modified to cover SciPy sobol interface."""
from pytest import raises, mark
from numpy.testing import assert_equal, assert_allclose
import numpy as np
from scipy.stats import norm
from SALib.analyze import sobol
from SALib.sample.sobol import sample as sobol_sample
from SALib.sample import sobol_sequence
from SALib.test_functions import Ishigami, Sobol_G
from SALib.util import read_param_file
def setup_samples(N=512, calc_second_order=True):
param_file = "src/SALib/test_functions/params/Ishigami.txt"
problem = read_param_file(param_file)
param_values = sobol_sample(problem, N=N, calc_second_order=calc_second_order)
return problem, param_values
def test_sobol_sequence():
# example from Joe & Kuo: http://web.maths.unsw.edu.au/~fkuo/sobol/
S = sobol_sequence.sample(10, 3)
expected = [
[0, 0, 0],
[0.5, 0.5, 0.5],
[0.75, 0.25, 0.25],
[0.25, 0.75, 0.75],
[0.375, 0.375, 0.625],
[0.875, 0.875, 0.125],
[0.625, 0.125, 0.875],
[0.125, 0.625, 0.375],
[0.1875, 0.3125, 0.9375],
[0.6875, 0.8125, 0.4375],
]
assert_allclose(S, expected, atol=5e-2, rtol=1e-1)
def test_sample_size_second_order():
N = 512
D = 3
problem, param_values = setup_samples(N=N)
assert_equal(param_values.shape, [N * (2 * D + 2), D])
def test_sample_size_first_order():
N = 512
D = 3
problem, param_values = setup_samples(N=N, calc_second_order=False)
assert_equal(param_values.shape, [N * (D + 2), D])
def test_incorrect_sample_size():
problem, param_values = setup_samples()
Y = Ishigami.evaluate(param_values)
with raises(RuntimeError):
sobol.analyze(problem, Y[:-10], calc_second_order=True)
def test_bad_conf_level():
problem, param_values = setup_samples()
Y = Ishigami.evaluate(param_values)
with raises(RuntimeError):
sobol.analyze(
problem, Y, calc_second_order=True, conf_level=1.01, print_to_console=False
)
with raises(RuntimeError):
sobol.analyze(
problem, Y, calc_second_order=True, conf_level=1.0, print_to_console=False
)
with raises(RuntimeError):
sobol.analyze(
problem, Y, calc_second_order=True, conf_level=0.0, print_to_console=False
)
@mark.filterwarnings("ignore::UserWarning")
def test_incorrect_second_order_setting():
# note this will still be a problem if N(2D+2) also divides by (D+2)
problem, param_values = setup_samples(N=511, calc_second_order=False)
Y = Ishigami.evaluate(param_values)
with raises(RuntimeError):
sobol.analyze(problem, Y, calc_second_order=True)
def METHOD_NAME():
problem, param_values = setup_samples()
Y = Ishigami.evaluate(param_values)
sobol.analyze(
problem, Y, calc_second_order=True, conf_level=0.95, print_to_console=True
)
def test_parallel_first_order():
c2o = False
N = 8192
problem, param_values = setup_samples(N=N, calc_second_order=c2o)
Y = Ishigami.evaluate(param_values)
A, B, AB, BA = sobol.separate_output_values(Y, D=3, N=N, calc_second_order=c2o)
r = np.random.randint(N, size=(N, 100))
Z = norm.ppf(0.5 + 0.95 / 2)
tasks, n_processors = sobol.create_task_list(
D=3, calc_second_order=c2o, n_processors=None
)
Si_list = []
for t in tasks:
Si_list.append(sobol.sobol_parallel(Z, A, AB, BA, B, r, t))
Si = sobol.Si_list_to_dict(
Si_list, D=3, num_resamples=100, keep_resamples=False, calc_second_order=c2o
)
assert_allclose(Si["S1"], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)
assert_allclose(Si["ST"], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)
def test_parallel_second_order():
c2o = True
N = 8192
problem, param_values = setup_samples(N=N, calc_second_order=c2o)
Y = Ishigami.evaluate(param_values)
A, B, AB, BA = sobol.separate_output_values(Y, D=3, N=N, calc_second_order=c2o)
r = np.random.randint(N, size=(N, 100))
Z = norm.ppf(0.5 + 0.95 / 2)
tasks, n_processors = sobol.create_task_list(
D=3, calc_second_order=c2o, n_processors=None
)
Si_list = []
for t in tasks:
Si_list.append(sobol.sobol_parallel(Z, A, AB, BA, B, r, t))
Si = sobol.Si_list_to_dict(
Si_list, D=3, num_resamples=100, keep_resamples=False, calc_second_order=c2o
)
assert_allclose(Si["S1"], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)
assert_allclose(Si["ST"], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)
assert_allclose(
[Si["S2"][0][1], Si["S2"][0][2], Si["S2"][1][2]],
[0.00, 0.25, 0.00],
atol=5e-2,
rtol=1e-1,
)
def test_Sobol_G_using_sobol():
"""
Tests the accuracy of the Sobol/Saltelli procedure using the Sobol_G
test function, comparing the results from the Sobol/Saltelli analysis
against the analytically computed sensitivity index from the Sobol_G
function.
"""
problem = {
"num_vars": 6,
"names": ["x1", "x2", "x3", "x4", "x5", "x6"],
"bounds": [[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
}
N = 4096
a = np.array([78, 12, 0.5, 2, 97, 33])
param_values = sobol_sample(problem, N, calc_second_order=False)
model_results = Sobol_G.evaluate(param_values, a)
Si = sobol.analyze(problem, model_results, calc_second_order=False)
expected = Sobol_G.sensitivity_index(a)
assert_allclose(Si["S1"], expected, atol=1e-2, rtol=1e-6)
def test_constant_output():
"""
No variance in model outputs, which should produce 0 sensitivity.
"""
def mock_model(X):
r, _ = X.shape
return np.array([0.4] * r)
problem = {
"num_vars": 3,
"names": ["x1", "x2", "x3"],
"bounds": [[0, 1], [0, 1], [0, 1]],
}
N = 256
param_values = sobol_sample(problem, N, calc_second_order=False)
model_results = mock_model(param_values)
Si = sobol.analyze(problem, model_results, calc_second_order=False)
assert np.all(
Si["S1"] == 0.0
), "Constant outputs should produce 0 first order sensitivity"
assert np.all(
Si["ST"] == 0.0
), "Constant outputs should produce 0 total order sensitivity"
assert np.all(Si["S1_conf"] == 0.0), "Constant outputs should produce 0 CI"
assert np.all(Si["ST_conf"] == 0.0), "Constant outputs should produce 0 CI"
def test_grouped_constant_output():
"""Test case where there is no variance in model outputs for grouped factors."""
def mock_model(X):
r, _ = X.shape
return np.array([0.4] * r)
problem = {
"num_vars": 3,
"names": ["x1", "x2", "x3"],
"groups": ["A", "B", "A"],
"bounds": [[0, 1], [0, 1], [0, 1]],
}
N = 256
param_values = sobol_sample(problem, N, calc_second_order=False)
model_results = mock_model(param_values)
Si = sobol.analyze(problem, model_results, calc_second_order=False)
assert np.all(
Si["S1"] == 0.0
), "Constant outputs should produce 0 first order sensitivity"
assert np.all(
Si["ST"] == 0.0
), "Constant outputs should produce 0 total order sensitivity"
assert np.all(Si["S1_conf"] == 0.0), "Constant outputs should produce 0 CI"
assert np.all(Si["ST_conf"] == 0.0), "Constant outputs should produce 0 CI"
|
4,428 |
test estimate observables
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests evaluator of auxiliary operators for algorithms."""
from __future__ import annotations
import unittest
from typing import Tuple
from test.python.algorithms import QiskitAlgorithmsTestCase
import numpy as np
from ddt import ddt, data
from qiskit.algorithms.list_or_dict import ListOrDict
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.algorithms import estimate_observables
from qiskit.primitives import Estimator
from qiskit.quantum_info import Statevector, SparsePauliOp
from qiskit import QuantumCircuit
from qiskit.circuit.library import EfficientSU2
from qiskit.opflow import PauliSumOp
from qiskit.utils import algorithm_globals
@ddt
class TestObservablesEvaluator(QiskitAlgorithmsTestCase):
"""Tests evaluator of auxiliary operators for algorithms."""
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
self.threshold = 1e-8
def get_exact_expectation(
self, ansatz: QuantumCircuit, observables: ListOrDict[BaseOperator | PauliSumOp]
):
"""
Calculates the exact expectation to be used as an expected result for unit tests.
"""
if isinstance(observables, dict):
observables_list = list(observables.values())
else:
observables_list = observables
# the exact value is a list of (mean, (variance, shots)) where we expect 0 variance and
# 0 shots
exact = [
(Statevector(ansatz).expectation_value(observable), {})
for observable in observables_list
]
if isinstance(observables, dict):
return dict(zip(observables.keys(), exact))
return exact
def _run_test(
self,
expected_result: ListOrDict[Tuple[complex, complex]],
quantum_state: QuantumCircuit,
decimal: int,
observables: ListOrDict[BaseOperator | PauliSumOp],
estimator: Estimator,
):
result = estimate_observables(estimator, quantum_state, observables, None, self.threshold)
if isinstance(observables, dict):
np.testing.assert_equal(list(result.keys()), list(expected_result.keys()))
means = [element[0] for element in result.values()]
expected_means = [element[0] for element in expected_result.values()]
np.testing.assert_array_almost_equal(means, expected_means, decimal=decimal)
vars_and_shots = [element[1] for element in result.values()]
expected_vars_and_shots = [element[1] for element in expected_result.values()]
np.testing.assert_array_equal(vars_and_shots, expected_vars_and_shots)
else:
means = [element[0] for element in result]
expected_means = [element[0] for element in expected_result]
np.testing.assert_array_almost_equal(means, expected_means, decimal=decimal)
vars_and_shots = [element[1] for element in result]
expected_vars_and_shots = [element[1] for element in expected_result]
np.testing.assert_array_equal(vars_and_shots, expected_vars_and_shots)
@data(
[
PauliSumOp.from_list([("II", 0.5), ("ZZ", 0.5), ("YY", 0.5), ("XX", -0.5)]),
PauliSumOp.from_list([("II", 2.0)]),
],
[
PauliSumOp.from_list([("ZZ", 2.0)]),
],
{
"op1": PauliSumOp.from_list([("II", 2.0)]),
"op2": PauliSumOp.from_list([("II", 0.5), ("ZZ", 0.5), ("YY", 0.5), ("XX", -0.5)]),
},
{
"op1": PauliSumOp.from_list([("ZZ", 2.0)]),
},
[],
{},
)
def METHOD_NAME(self, observables: ListOrDict[BaseOperator | PauliSumOp]):
"""Tests evaluator of auxiliary operators for algorithms."""
ansatz = EfficientSU2(2)
parameters = np.array(
[1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0],
dtype=float,
)
bound_ansatz = ansatz.bind_parameters(parameters)
states = bound_ansatz
expected_result = self.get_exact_expectation(bound_ansatz, observables)
estimator = Estimator()
decimal = 6
self._run_test(
expected_result,
states,
decimal,
observables,
estimator,
)
def test_estimate_observables_zero_op(self):
"""Tests if a zero operator is handled correctly."""
ansatz = EfficientSU2(2)
parameters = np.array(
[1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0],
dtype=float,
)
bound_ansatz = ansatz.bind_parameters(parameters)
state = bound_ansatz
estimator = Estimator()
observables = [SparsePauliOp(["XX", "YY"]), 0]
result = estimate_observables(estimator, state, observables, None, self.threshold)
expected_result = [(0.015607318055509564, {}), (0.0, {})]
means = [element[0] for element in result]
expected_means = [element[0] for element in expected_result]
np.testing.assert_array_almost_equal(means, expected_means, decimal=0.01)
vars_and_shots = [element[1] for element in result]
expected_vars_and_shots = [element[1] for element in expected_result]
np.testing.assert_array_equal(vars_and_shots, expected_vars_and_shots)
def test_estimate_observables_shots(self):
"""Tests that variances and shots are returned properly."""
ansatz = EfficientSU2(2)
parameters = np.array(
[1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0, 1.2, 4.2, 1.4, 2.0],
dtype=float,
)
bound_ansatz = ansatz.bind_parameters(parameters)
state = bound_ansatz
estimator = Estimator(options={"shots": 2048})
with self.assertWarns(DeprecationWarning):
observables = [PauliSumOp.from_list([("ZZ", 2.0)])]
result = estimate_observables(estimator, state, observables, None, self.threshold)
exact_result = self.get_exact_expectation(bound_ansatz, observables)
expected_result = [(exact_result[0][0], {"variance": 1.0898, "shots": 2048})]
means = [element[0] for element in result]
expected_means = [element[0] for element in expected_result]
np.testing.assert_array_almost_equal(means, expected_means, decimal=0.01)
vars_and_shots = [element[1] for element in result]
expected_vars_and_shots = [element[1] for element in expected_result]
for computed, expected in zip(vars_and_shots, expected_vars_and_shots):
self.assertAlmostEqual(computed.pop("variance"), expected.pop("variance"), 2)
self.assertEqual(computed.pop("shots"), expected.pop("shots"))
if __name__ == "__main__":
unittest.main()
|
4,429 |
create snapshot
|
import logging
from virttest import qemu_storage
from virttest import data_dir
from virttest import utils_disk
from virttest.qemu_capabilities import Flags
from provider import backup_utils
from provider.virt_storage.storage_admin import sp_admin
LOG_JOB = logging.getLogger('avocado.test')
class BlockDevSnapshotTest(object):
def __init__(self, test, params, env):
self.env = env
self.test = test
self.params = params
self.snapshot_tag = params["snapshot_tag"]
self.base_tag = params["base_tag"]
self.disks_info = {} # {tag, [dev, mnt]}
self.files_info = list()
self.main_vm = self.prepare_main_vm()
self.clone_vm = self.prepare_clone_vm()
self.snapshot_image = self.get_image_by_tag(self.snapshot_tag)
self.base_image = self.get_image_by_tag(self.base_tag)
def is_blockdev_mode(self):
return self.main_vm.check_capability(Flags.BLOCKDEV)
def prepare_main_vm(self):
return self.env.get_vm(self.params["main_vm"])
def prepare_clone_vm(self):
vm_params = self.main_vm.params.copy()
images = self.main_vm.params["images"].replace(
self.base_tag, self.snapshot_tag)
vm_params["images"] = images
return self.main_vm.clone(params=vm_params)
def get_image_by_tag(self, name):
image_dir = data_dir.get_data_dir()
image_params = self.params.object_params(name)
return qemu_storage.QemuImg(image_params, image_dir, name)
def prepare_snapshot_file(self):
if self.is_blockdev_mode():
params = self.params.copy()
params.setdefault("target_path", data_dir.get_data_dir())
image = sp_admin.volume_define_by_params(self.snapshot_tag, params)
image.hotplug(self.main_vm)
else:
if self.params.get("mode") == "existing":
self.snapshot_image.create()
def mount_data_disks(self):
if self.params["os_type"] == "windows":
return
session = self.clone_vm.wait_for_login()
try:
backup_utils.refresh_mounts(self.disks_info, self.params, session)
for info in self.disks_info.values():
disk_path = info[0]
mount_point = info[1]
utils_disk.mount(disk_path, mount_point, session=session)
finally:
session.close()
def verify_data_file(self):
for info in self.files_info:
mount_point, filename = info[0], info[1]
backup_utils.verify_file_md5(self.clone_vm, mount_point, filename)
def verify_snapshot(self):
if self.main_vm.is_alive():
self.main_vm.destroy()
if self.is_blockdev_mode():
self.snapshot_image.base_tag = self.base_tag
self.snapshot_image.base_format = self.base_image.get_format()
base_image_filename = self.base_image.image_filename
self.snapshot_image.base_image_filename = base_image_filename
self.snapshot_image.rebase(self.snapshot_image.params)
self.clone_vm.create()
self.clone_vm.verify_alive()
if self.base_tag != "image1":
self.mount_data_disks()
self.verify_data_file()
def METHOD_NAME(self):
if self.is_blockdev_mode():
options = ["node", "overlay"]
cmd = "blockdev-snapshot"
else:
options = ["device", "mode", "snapshot-file", "format"]
cmd = "blockdev-snapshot-sync"
arguments = self.params.copy_from_keys(options)
if not self.is_blockdev_mode():
arguments["snapshot-file"] = self.snapshot_image.image_filename
else:
arguments.setdefault("overlay", "drive_%s" % self.snapshot_tag)
return self.main_vm.monitor.cmd(cmd, dict(arguments))
@staticmethod
def get_linux_disk_path(session, disk_size):
disks = utils_disk.get_linux_disks(session, True)
for kname, attr in disks.items():
if attr[1] == disk_size and attr[2] == "disk":
return kname
return None
def configure_data_disk(self):
os_type = self.params["os_type"]
disk_params = self.params.object_params(self.base_tag)
disk_size = disk_params["image_size"]
session = self.main_vm.wait_for_login()
try:
if os_type != "windows":
disk_id = self.get_linux_disk_path(session, disk_size)
assert disk_id, "Disk not found in guest!"
mount_point = utils_disk.configure_empty_linux_disk(
session, disk_id, disk_size)[0]
self.disks_info[self.base_tag] = [r"/dev/%s1" % disk_id,
mount_point]
else:
disk_id = utils_disk.get_windows_disks_index(
session, disk_size)
driver_letter = utils_disk.configure_empty_windows_disk(
session, disk_id, disk_size)[0]
mount_point = r"%s:\\" % driver_letter
self.disks_info[self.base_tag] = [disk_id, mount_point]
finally:
session.close()
def generate_tempfile(self, root_dir, filename="data",
size="10M", timeout=360):
backup_utils.generate_tempfile(
self.main_vm, root_dir, filename, size, timeout)
self.files_info.append([root_dir, filename])
def snapshot_test(self):
self.METHOD_NAME()
for info in self.disks_info.values():
self.generate_tempfile(info[1])
self.verify_snapshot()
def pre_test(self):
if not self.main_vm.is_alive():
self.main_vm.create()
self.main_vm.verify_alive()
if self.base_tag != "image1":
self.configure_data_disk()
self.prepare_snapshot_file()
def post_test(self):
try:
self.clone_vm.destroy()
self.snapshot_image.remove()
except Exception as error:
LOG_JOB.error(str(error))
def run_test(self):
self.pre_test()
try:
self.snapshot_test()
finally:
self.post_test()
|
4,430 |
process
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Implements the Catmull-Clark Subdivision Node
# sverchok/nodes/modifier_change/opensubdivision.py
import bpy
from bpy.props import IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat
enable_module = False
try:
import pyOpenSubdiv
from pyOpenSubdiv.pysubdivision import pysubdivide
enable_module = True
except ModuleNotFoundError:
enable_module = False
from itertools import chain
import traceback
class SvOpenSubdivisionNode(bpy.types.Node,SverchCustomTreeNode):
bl_idname = "SvOpenSubdivisionNode" # Use this for index.md reference
bl_label = "Catmull-Clark Subdivision"
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = None
maxSubdivision = 6 # creates a self.maxSubdivision attribute
# Mute Node Implementation
@property
def sv_internal_links(self):
mapping = [
(self.inputs['Vertices'],self.outputs['Vertices']),
(self.inputs['Faces'],self.outputs['Faces'])
]
return mapping
def sv_init(self,context):
self.inputs.new('SvVerticesSocket', "Vertices")
self.inputs.new('SvStringsSocket', "Faces")
socket = self.inputs.new('SvStringsSocket', "Levels")
socket.use_prop=True
socket.default_property_type = 'int'
socket.default_int_property = 0
# socket.int_range = (0,self.maxSubdivision) # There's no way to visually limit the subdivision levels (it's handled internally), but something like this would be nice in the future.
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
self.outputs.new('SvStringsSocket', "Faces")
def METHOD_NAME(self):
if not enable_module:
raise Exception("The dependent library is not installed (pyOpenSubdiv).")
vert_sets = self.inputs['Vertices'].sv_get(default=[],deepcopy=False)
edges = []
face_sets = self.inputs['Faces'].sv_get(default=[],deepcopy=False)
new_meshes = {
'vertices':[],
'edges':[],
'faces':[]
}
if(vert_sets != [] and face_sets != []):
subdivision_levels = self.inputs["Levels"].sv_get()[0]
# This is definitely gonna crash.
# I think I'll take the "wait and see how" approach?
parameters = zip(*match_long_repeat([subdivision_levels,vert_sets,face_sets]))
for params in parameters:
subdivision_level = params[0] if params[0] <= self.maxSubdivision else self.maxSubdivision
vertices = params[1]
faces = params[2]
faceVerts = list(chain.from_iterable(faces))
vertsPerFace = [len(face) for face in faces]
new_mesh = pysubdivide(subdivision_level,
vertices,
faces,
faceVerts,
vertsPerFace,
verbose = False
)
new_meshes['vertices'].append(new_mesh['vertices'])
new_meshes['edges'].append(new_mesh['edges'])
new_meshes['faces'].append(new_mesh['faces'])
self.outputs['Vertices'].sv_set(new_meshes['vertices'])
self.outputs['Edges'].sv_set(new_meshes['edges'])
self.outputs['Faces'].sv_set(new_meshes['faces'])
def register():
bpy.utils.register_class(SvOpenSubdivisionNode)
def unregister():
bpy.utils.unregister_class(SvOpenSubdivisionNode)
|
4,431 |
test help
|
import datetime
import unittest
import stem
import stem.response
import stem.version
from unittest.mock import Mock, patch
from stem.interpreter.commands import ControlInterpreter, _get_fingerprint
from stem.response import ControlMessage
from test.unit.interpreter import CONTROLLER
EXPECTED_EVENTS_RESPONSE = """\
\x1b[34mBW 15 25\x1b[0m
\x1b[34mBW 758 570\x1b[0m
\x1b[34mDEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.\x1b[0m
"""
EXPECTED_INFO_RESPONSE = """\
moria1 (9695DFC35FFEB861329B9F1AB04C46397020CE31)
\x1b[34;1maddress: \x1b[0m128.31.0.34:9101 (moria.csail.mit.edu)
\x1b[34;1mtor version: \x1b[0m0.2.5.4-alpha-dev
\x1b[34;1mflags: \x1b[0mAuthority, Fast, Guard, HSDir, Named, Running, Stable, V2Dir, Valid
\x1b[34;1mexit policy: \x1b[0mreject *:*
\x1b[34;1mcontact: \x1b[0m1024D/28988BF5 arma mit edu
"""
EXPECTED_GETCONF_RESPONSE = """\
\x1b[34;1mlog\x1b[0m\x1b[34m => notice stdout\x1b[0m
\x1b[34;1maddress\x1b[0m\x1b[34m => \x1b[0m
"""
FINGERPRINT = '9695DFC35FFEB861329B9F1AB04C46397020CE31'
class TestInterpreterCommands(unittest.TestCase):
def test_get_fingerprint_for_ourselves(self):
controller = Mock()
controller.get_info.side_effect = lambda arg: {
'fingerprint': FINGERPRINT,
}[arg]
self.assertEqual(FINGERPRINT, _get_fingerprint('', controller))
controller.get_info.side_effect = stem.ControllerError
self.assertRaises(ValueError, _get_fingerprint, '', controller)
def test_get_fingerprint_for_fingerprint(self):
self.assertEqual(FINGERPRINT, _get_fingerprint(FINGERPRINT, Mock()))
def test_get_fingerprint_for_nickname(self):
controller, descriptor = Mock(), Mock()
descriptor.fingerprint = FINGERPRINT
controller.get_network_status.side_effect = lambda arg: {
'moria1': descriptor,
}[arg]
self.assertEqual(FINGERPRINT, _get_fingerprint('moria1', controller))
controller.get_network_status.side_effect = stem.ControllerError
self.assertRaises(ValueError, _get_fingerprint, 'moria1', controller)
def test_get_fingerprint_for_address(self):
controller = Mock()
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.1:-1', controller)
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.901:80', controller)
descriptor = Mock()
descriptor.address = '127.0.0.1'
descriptor.or_port = 80
descriptor.fingerprint = FINGERPRINT
controller.get_network_statuses.return_value = [descriptor]
self.assertEqual(FINGERPRINT, _get_fingerprint('127.0.0.1', controller))
self.assertEqual(FINGERPRINT, _get_fingerprint('127.0.0.1:80', controller))
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.1:81', controller)
self.assertRaises(ValueError, _get_fingerprint, '127.0.0.2', controller)
def test_get_fingerprint_for_unrecognized_inputs(self):
self.assertRaises(ValueError, _get_fingerprint, 'blarg!', Mock())
def test_when_disconnected(self):
controller = Mock()
controller.msg.side_effect = stem.SocketClosed('kaboom!')
interpreter = ControlInterpreter(controller)
# we should be able to run non-tor commands
self.assertTrue('Interpreter commands include:' in interpreter.run_command('/help'))
# ... but tor commands should provide exceptions
self.assertRaises(stem.SocketClosed, interpreter.run_command, 'GETINFO version')
def test_quit(self):
interpreter = ControlInterpreter(CONTROLLER)
self.assertRaises(stem.SocketClosed, interpreter.run_command, '/quit')
self.assertRaises(stem.SocketClosed, interpreter.run_command, 'QUIT')
def METHOD_NAME(self):
interpreter = ControlInterpreter(CONTROLLER)
self.assertTrue('Interpreter commands include:' in interpreter.run_command('/help'))
self.assertTrue('Queries the tor process for information.' in interpreter.run_command('/help GETINFO'))
self.assertTrue('Queries the tor process for information.' in interpreter.run_command('/help GETINFO version'))
def test_events(self):
interpreter = ControlInterpreter(CONTROLLER)
# no received events
self.assertEqual('', interpreter.run_command('/events'))
# with enqueued events
event_contents = (
'650 BW 15 25',
'650 BW 758 570',
'650 DEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.',
)
for content in event_contents:
event = ControlMessage.from_str(content, 'EVENT', normalize = True)
interpreter._received_events.append(event)
self.assertEqual(EXPECTED_EVENTS_RESPONSE, interpreter.run_command('/events'))
@patch('stem.descriptor.remote.DescriptorDownloader')
@patch('socket.gethostbyaddr', Mock(return_value = ['moria.csail.mit.edu']))
def test_info(self, downloader_mock):
controller, server_desc, ns_desc = Mock(), Mock(), Mock()
controller.get_microdescriptor.return_value = None
controller.get_server_descriptor.return_value = server_desc
controller.get_network_status.return_value = ns_desc
downloader_mock().get_server_descriptors.return_value = [server_desc]
controller.get_info.side_effect = lambda arg, _: {
'ip-to-country/128.31.0.34': 'us',
}[arg]
ns_desc.address = '128.31.0.34'
ns_desc.or_port = 9101
ns_desc.published = datetime.datetime(2014, 5, 5, 5, 52, 5)
ns_desc.nickname = 'moria1'
ns_desc.flags = ['Authority', 'Fast', 'Guard', 'HSDir', 'Named', 'Running', 'Stable', 'V2Dir', 'Valid']
server_desc.exit_policy = 'reject *:*'
server_desc.platform = 'Linux'
server_desc.tor_version = stem.version.Version('0.2.5.4-alpha-dev')
server_desc.contact = '1024D/28988BF5 arma mit edu'
interpreter = ControlInterpreter(controller)
self.assertTrue(interpreter.run_command('/info ' + FINGERPRINT).startswith(EXPECTED_INFO_RESPONSE))
def test_unrecognized_interpreter_command(self):
interpreter = ControlInterpreter(CONTROLLER)
expected = "\x1b[1;31m'/unrecognized' isn't a recognized command\x1b[0m\n"
self.assertEqual(expected, interpreter.run_command('/unrecognized'))
def test_getinfo(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\n250 OK\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\x1b[0m\n\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('GETINFO version'))
self.assertEqual('\x1b[34m250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)\r\x1b[0m\n\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('GETINFO version'))
controller.msg.assert_called_with('GETINFO version')
controller.msg.side_effect = stem.ControllerError('kaboom!')
self.assertEqual('\x1b[1;31mkaboom!\x1b[0m\n', interpreter.run_command('getinfo process/user'))
def test_getconf(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250-Log=notice stdout\r\n250 Address\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250-Log=notice stdout\r\x1b[0m\n\x1b[34m250 Address\x1b[0m\n', interpreter.run_command('GETCONF log address'))
controller.msg.assert_called_with('GETCONF log address')
def test_setevents(self):
controller = Mock()
controller.msg.return_value = ControlMessage.from_str('250 OK\r\n')
interpreter = ControlInterpreter(controller)
self.assertEqual('\x1b[34m250 OK\x1b[0m\n', interpreter.run_command('SETEVENTS BW'))
|
4,432 |
exe comb
|
from ir import map, var, bound, expr
'''
I am trying to use this class to demonstrate what a spec is.
It should contain all the information for generate hardware,
mapping constraint and functional model.
'''
class Accessor:
def __init__(self):
self.map_dict = {}
self.config_cons_dict = {}
'''
This method should define all the generator parameter
We need to add a parser/decorator to get all the information
for hardware generation, I think generator parameter also
subsume all the constraint that compiler need to be awared.
'''
def setConstraint(self, **kwargs):
for k, v in kwargs.items():
self.config_cons_dict[k] = bound(k + "_bound", 0, v)
def checkConstraint(self):
for k, bd in self.config_cons_dict.items():
if k == "st_size":
assert bd.inBound(len(self.map_dict)), "too much expr, controller number exceeded!"
elif k == "var_dim":
for _, ctrl in self.map_dict.items():
assert bd.inBound(ctrl.in_dim), "iterator dimension exceeded!"
elif k == "expr_dim":
for _, ctrl in self.map_dict.items():
assert bd.inBound(ctrl.out_dim), "output dimension exceeded!"
elif k == "expr_piece_dim":
for _, ctrl in self.map_dict.items():
for e in ctrl.expr_list:
assert bd.inBound(len(e.bd_list)), "piecewise expression pieces exceeded!"
print("All constraints satisfied!")
'''
This method should define all the configuration register
We need to add a parser/decorator to get all the information
for hardware generation
'''
def setConfig(self, **kwargs):
'''
example of config:
st_name_list: this is for prettyprint
st_size: 2
depth: 36
var_dim: [2, 2]
expr_dim: [1, 1]
var_range_list: [[5, 5], [3,3]]
expr_config:[different accessor
[multi dimension expr
[multi-piece
([5, 5], [1, 6, 0])]
],
[
[([3, 3], [1, 6, 14])]
]
]
'''
for k, v in kwargs.items():
if k == "st_size":
st_size = v
elif k == "depth":
depth = v
elif k == "st_name_list":
st_name = v
elif k == "var_dim":
var_dim = v
elif k == "var_range_list":
var_range_list = v
elif k == "expr_dim":
expr_dim = v
elif k == "expr_config":
expr_config = v
var_l = [var("cycle", 0, depth)]
expr_l = [expr(var_l, [([bound("bd", 0, depth)], [1, 0])])]
self.cycle_cnt = map(var_l, expr_l)
for i in range(st_size):
var_list = []
for in_dim in range(var_dim[i]):
tmp = var("i" + "_" + str(i) + "_" + str(in_dim), 0, var_range_list[i][in_dim])
var_list.append(tmp)
expr_list = []
for out_dim in range(expr_dim[i]):
pwa = expr_config[i][out_dim]
pw_list = []
for bd_info, w in pwa:
bd_list = [bound("bd" + "_" + str(i) + "_" + str(cnt), 0, bd) for cnt, bd in enumerate(bd_info)]
pw_list.append((bd_list, w))
tmp_expr = expr(var_list, pw_list)
expr_list.append(tmp_expr)
self.map_dict[st_name[i]] = map(var_list, expr_list)
'''
The method below should define the functional model
I feel that I am writing verilog/simulator,
I do not know if this is the best way. But it may
easily generate hw to some extent.
'''
def exeSeq(self):
self.cycle_cnt.update()
for k, is_update in self.is_update_dict.items():
if is_update:
self.map_dict[k].update()
def METHOD_NAME(self):
itr = self.cycle_cnt.eval()
cnt_dict = {k: m.eval() for k, m in self.map_dict.items()}
self.is_update_dict = {k: cnt == itr for k, cnt in cnt_dict.items()}
'''
Simulator print out info, add an data interface this will drive memory port
'''
def print_sim_info(self):
for k, is_update in self.is_update_dict.items():
if is_update:
print(k, ": ", self.map_dict[k].getDomain())
|
4,433 |
proc errors
|
# Jython Database Specification API 2.0
#
# Copyright (c) 2001 brian zimmer <[email protected]>
from zxtest import zxCoreTestCase
class OracleSPTest(zxCoreTestCase):
def setUp(self):
zxCoreTestCase.setUp(self)
c = self.cursor()
try:
try:
c.execute("drop table sptest")
except:
self.db.rollback()
try:
c.execute("create table sptest (x varchar2(20))")
c.execute("create or replace procedure procnone is begin insert into sptest values ('testing'); end;")
c.execute("create or replace procedure procin (y in varchar2) is begin insert into sptest values (y); end;")
c.execute("create or replace procedure procout (y out varchar2) is begin y := 'tested'; end;")
c.execute("create or replace procedure procinout (y out varchar2, z in varchar2) is begin insert into sptest values (z); y := 'tested'; end;")
c.execute("create or replace function funcnone return varchar2 is begin return 'tested'; end;")
c.execute("create or replace function funcin (y varchar2) return varchar2 is begin return y || y; end;")
c.execute("create or replace function funcout (y out varchar2) return varchar2 is begin y := 'tested'; return 'returned'; end;")
self.db.commit()
except:
self.db.rollback()
self.fail("procedure creation failed")
self.METHOD_NAME("PROC")
self.METHOD_NAME("FUNC")
finally:
c.close()
def tearDown(self):
zxCoreTestCase.tearDown(self)
def METHOD_NAME(self, name):
c = self.cursor()
try:
c.execute("select * from user_errors where name like '%s%%'" % (name.upper()))
errors = c.fetchall()
try:
assert not errors, "found errors"
except AssertionError, e:
print "printing errors:"
for a in errors:
print a
raise e
finally:
c.close()
def testCursor(self):
c = self.cursor()
try:
c.execute("insert into sptest values ('a')")
c.execute("insert into sptest values ('b')")
c.execute("insert into sptest values ('c')")
c.execute("insert into sptest values ('d')")
c.execute("insert into sptest values ('e')")
c.execute("""
CREATE OR REPLACE PACKAGE types
AS
TYPE ref_cursor IS REF CURSOR;
END;
""")
c.execute("""
CREATE OR REPLACE FUNCTION funccur(v_x IN VARCHAR)
RETURN types.ref_cursor
AS
funccur_cursor types.ref_cursor;
BEGIN
OPEN funccur_cursor FOR
SELECT x FROM sptest WHERE x < v_x;
RETURN funccur_cursor;
END;
""")
self.METHOD_NAME("funccur")
c.callproc("funccur", ("z",))
data = c.fetchall()
self.assertEquals(5, len(data))
c.callproc("funccur", ("c",))
data = c.fetchall()
self.assertEquals(2, len(data))
finally:
c.close()
def testProcin(self):
c = self.cursor()
try:
params = ["testProcin"]
c.callproc("procin", params)
self.assertEquals([], c.fetchall())
c.execute("select * from sptest")
self.assertEquals(1, len(c.fetchall()))
finally:
c.close()
def testProcinout(self):
c = self.cursor()
try:
params = [None, "testing"]
c.callproc("procinout", params)
data = c.fetchone()
assert data is None, "data was not None"
c.execute("select * from sptest")
data = c.fetchone()
self.assertEquals("testing", data[0])
self.assertEquals("tested", params[0])
finally:
c.close()
def testFuncnone(self):
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("tested", data[0])
finally:
c.close()
def testFuncin(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testCallingWithKws(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params=params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testFuncout(self):
c = self.cursor()
try:
params = [None]
c.callproc("funcout", params)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("returned", data[0])
self.assertEquals("tested", params[0].strip())
finally:
c.close()
def testMultipleFetch(self):
"""testing the second fetch call to a callproc() is None"""
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
data = c.fetchone()
assert data is None, "data was not None"
finally:
c.close()
class SQLServerSPTest(zxCoreTestCase):
def testProcWithResultSet(self):
c = self.cursor()
try:
for a in (("table", "sptest"), ("procedure", "sp_proctest")):
try:
c.execute("drop %s %s" % (a))
except:
pass
c.execute("create table sptest (a int, b varchar(32))")
c.execute("insert into sptest values (1, 'hello')")
c.execute("insert into sptest values (2, 'there')")
c.execute("insert into sptest values (3, 'goodbye')")
c.execute(""" create procedure sp_proctest (@A int) as select a, b from sptest where a <= @A """)
self.db.commit()
c.callproc("sp_proctest", (2,))
data = c.fetchall()
self.assertEquals(2, len(data))
self.assertEquals(2, len(c.description))
assert c.nextset() is not None, "expected an additional result set"
data = c.fetchall()
self.assertEquals(1, len(data))
self.assertEquals(1, len(c.description))
finally:
c.close()
# def testSalesByCategory(self):
# c = self.cursor()
# try:
# c.execute("use northwind")
# c.callproc(("northwind", "dbo", "SalesByCategory"), ["Seafood", "1998"])
# data = c.fetchall()
# assert data is not None, "no results from SalesByCategory"
# assert len(data) > 0, "expected numerous results"
# finally:
# c.close()
|
4,434 |
remove
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.METHOD_NAME(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def METHOD_NAME(self, item):
if self._pending_removals:
self._commit_removals()
self.data.METHOD_NAME(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
4,435 |
test phase active project private
|
import pytest
import rules
from adhocracy4.polls import phases
from adhocracy4.projects.enums import Access
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import freeze_pre_phase
from adhocracy4.test.helpers import setup_phase
from adhocracy4.test.helpers import setup_users
perm_name = "a4polls.comment_poll"
def test_perm_exists():
assert rules.perm_exists(perm_name)
@pytest.mark.django_db
def test_pre_phase(phase_factory, poll_factory, user, user_factory):
phase, _, project, item = setup_phase(
phase_factory, poll_factory, phases.VotingPhase
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
assert project.is_public
with freeze_pre_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active(phase_factory, poll_factory, user, user_factory):
phase, _, project, item = setup_phase(
phase_factory, poll_factory, phases.VotingPhase
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
assert project.is_public
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def METHOD_NAME(
phase_factory, poll_factory, user, another_user, user_factory
):
phase, _, project, item = setup_phase(
phase_factory,
poll_factory,
phases.VotingPhase,
module__project__access=Access.PRIVATE,
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
participant = another_user
project.participants.add(participant)
assert project.access == Access.PRIVATE
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, participant, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active_project_semipublic(
phase_factory, poll_factory, user, another_user, user_factory
):
phase, _, project, item = setup_phase(
phase_factory,
poll_factory,
phases.VotingPhase,
module__project__access=Access.SEMIPUBLIC,
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
participant = another_user
project.participants.add(participant)
assert project.access == Access.SEMIPUBLIC
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, participant, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active_project_draft(phase_factory, poll_factory, user, user_factory):
phase, _, project, item = setup_phase(
phase_factory, poll_factory, phases.VotingPhase, module__project__is_draft=True
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
assert project.is_draft
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_post_phase_project_archived(phase_factory, poll_factory, user, user_factory):
phase, _, project, item = setup_phase(
phase_factory,
poll_factory,
phases.VotingPhase,
module__project__is_archived=True,
)
anonymous, moderator, _ = setup_users(project)
initiator = user_factory()
project.organisation.initiators.add(initiator)
creator = item.creator
assert project.is_archived
with freeze_post_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
|
4,436 |
run and verify
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)?
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def METHOD_NAME(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.METHOD_NAME(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.METHOD_NAME(flag_value='0',
expected_output_re=None,
other_flag=None)
self.METHOD_NAME(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.METHOD_NAME(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.METHOD_NAME(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
4,437 |
test clone
|
# (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import pickle
import unittest
from traits.api import Expression, HasTraits, Int, TraitError
from traits.constants import DefaultValue
class TestExpression(unittest.TestCase):
def test_set_value(self):
class Foo(HasTraits):
bar = Expression()
f = Foo()
f.bar = "1"
self.assertEqual(f.bar, "1")
self.assertEqual(eval(f.bar_), 1)
def test_default_static(self):
class Foo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression(default_value="1")
f = Foo()
self.assertEqual(f.bar, "1")
self.assertEqual(eval(f.bar_), 1)
def test_default_method(self):
class Foo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression()
default_calls = Int(0)
def _bar_default(self):
self.default_calls += 1
return "1"
f = Foo()
self.assertEqual(f.bar, "1")
self.assertEqual(eval(f.bar_), 1)
self.assertEqual(f.default_calls, 1)
# Check that the order doesn't matter
f2 = Foo()
self.assertEqual(eval(f2.bar_), 1)
self.assertEqual(f2.bar, "1")
self.assertEqual(f2.default_calls, 1)
def test_default_method_non_valid(self):
class Foo(HasTraits):
bar = Expression()
def _bar_default(self):
return "{x=y"
f = Foo()
msg = "The 'bar' trait of a Foo instance must be a valid"
with self.assertRaisesRegex(TraitError, msg):
f.bar
def test_default_static_override_static(self):
class BaseFoo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression()
class Foo(BaseFoo):
bar = "3"
f = Foo()
self.assertEqual(f.bar, "3")
self.assertEqual(eval(f.bar_), 3)
def test_default_static_override_method(self):
class BaseFoo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression()
class Foo(BaseFoo):
default_calls = Int(0)
def _bar_default(self):
self.default_calls += 1
return "3"
f = Foo()
self.assertEqual(f.bar, "3")
self.assertEqual(eval(f.bar_), 3)
self.assertEqual(f.default_calls, 1)
def test_default_method_override_static(self):
class BaseFoo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression()
default_calls = Int(0)
def _bar_default(self):
self.default_calls += 1
return "1"
class Foo(BaseFoo):
bar = "3"
f = Foo()
self.assertEqual(f.bar, "3")
self.assertEqual(eval(f.bar_), 3)
self.assertEqual(f.default_calls, 0)
def test_default_method_override_method(self):
class BaseFoo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression()
default_calls = Int(0)
def _bar_default(self):
self.default_calls += 1
return "1"
class Foo(BaseFoo):
def _bar_default(self):
self.default_calls += 1
return "3"
f = Foo()
self.assertEqual(f.bar, "3")
self.assertEqual(eval(f.bar_), 3)
self.assertEqual(f.default_calls, 1)
def test_pickle_shadow_trait(self):
class Foo(HasTraits):
# The default value set in the class definition is "0"
bar = Expression(default_value="1")
f = Foo()
married_shadow_trait = f.trait("bar_")
reconstituted = pickle.loads(pickle.dumps(married_shadow_trait))
default_value_callable = reconstituted.default_value()[1]
self.assertEqual(eval(default_value_callable(f)), 1)
def METHOD_NAME(self):
expr = Expression(default_value="1")
cloned_expr = expr.clone("2")
self.assertEqual(cloned_expr.default_value_type, DefaultValue.constant)
self.assertEqual(cloned_expr.default_value, "2")
self.assertEqual(
cloned_expr.as_ctrait().default_value_for(None, "expr"),
"2",
)
|
4,438 |
get config defaults
|
#!/usr/bin/env python3
#
# VM testing aarch64 library
#
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <[email protected]>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
from qemu.utils import kvm_available
# This is the config needed for current version of QEMU.
# This works for both kvm and tcg.
CURRENT_CONFIG = {
'cpu' : "max",
'machine' : "virt,gic-version=max",
}
# The minimum minor version of QEMU we will support with aarch64 VMs is 3.
# QEMU versions less than 3 have various issues running these VMs.
QEMU_AARCH64_MIN_VERSION = 3
# The DEFAULT_CONFIG will default to a version of
# parameters that works for backwards compatibility.
DEFAULT_CONFIG = {'kvm' : {'cpu' : "host",
'machine' : "virt,gic-version=host"},
'tcg' : {'cpu' : "cortex-a57",
'machine' : "virt"},
}
def METHOD_NAME(vmcls, default_config):
"""Fetch the configuration defaults for this VM,
taking into consideration the defaults for
aarch64 first, followed by the defaults for this VM."""
config = default_config
config.update(aarch_get_config_defaults(vmcls))
return config
def aarch_get_config_defaults(vmcls):
"""Set the defaults for current version of QEMU."""
config = CURRENT_CONFIG
args = basevm.parse_args(vmcls)
qemu_path = basevm.get_qemu_path(vmcls.arch, args.build_path)
qemu_version = basevm.get_qemu_version(qemu_path)
if qemu_version < QEMU_AARCH64_MIN_VERSION:
error = "\nThis major version of QEMU {} is to old for aarch64 VMs.\n"\
"The major version must be at least {}.\n"\
"To continue with the current build of QEMU, "\
"please restart with QEMU_LOCAL=1 .\n"
print(error.format(qemu_version, QEMU_AARCH64_MIN_VERSION))
exit(1)
if qemu_version == QEMU_AARCH64_MIN_VERSION:
# We have an older version of QEMU,
# set the config values for backwards compatibility.
if kvm_available('aarch64'):
config.update(DEFAULT_CONFIG['kvm'])
else:
config.update(DEFAULT_CONFIG['tcg'])
return config
def create_flash_images(flash_dir="./", efi_img=""):
"""Creates the appropriate pflash files
for an aarch64 VM."""
flash0_path = get_flash_path(flash_dir, "flash0")
flash1_path = get_flash_path(flash_dir, "flash1")
fd_null = open(os.devnull, 'w')
subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash0_path),
"bs=1M", "count=64"],
stdout=fd_null, stderr=subprocess.STDOUT)
# A reliable way to get the QEMU EFI image is via an installed package or
# via the bios included with qemu.
if not os.path.exists(efi_img):
sys.stderr.write("*** efi argument is invalid ({})\n".format(efi_img))
sys.stderr.write("*** please check --efi-aarch64 argument or "\
"install qemu-efi-aarch64 package\n")
exit(3)
subprocess.check_call(["dd", "if={}".format(efi_img),
"of={}".format(flash0_path),
"conv=notrunc"],
stdout=fd_null, stderr=subprocess.STDOUT)
subprocess.check_call(["dd", "if=/dev/zero",
"of={}".format(flash1_path),
"bs=1M", "count=64"],
stdout=fd_null, stderr=subprocess.STDOUT)
fd_null.close()
def get_pflash_args(flash_dir="./"):
"""Returns a string that can be used to
boot qemu using the appropriate pflash files
for aarch64."""
flash0_path = get_flash_path(flash_dir, "flash0")
flash1_path = get_flash_path(flash_dir, "flash1")
pflash_args_str = "-drive file={},format=raw,if=pflash "\
"-drive file={},format=raw,if=pflash"
pflash_args = pflash_args_str.format(flash0_path, flash1_path)
return pflash_args.split(" ")
def get_flash_path(flash_dir, name):
return os.path.join(flash_dir, "{}.img".format(name))
|
4,439 |
run each partition
|
import functools
import asyncio
from typing import Callable, TYPE_CHECKING, Type, TypeVar
from contextlib import asynccontextmanager
from libertem.common.async_utils import (
adjust_event_loop_policy, sync_to_async, async_generator_eager
)
from libertem.common.executor import JobExecutor, AsyncJobExecutor
from libertem.common.tracing import TracedThreadPoolExecutor
if TYPE_CHECKING:
from libertem.udf.base import UDFRunner
T = TypeVar('T')
class ResourceError(RuntimeError):
"""
Thrown when there is a resource mismatch, for example if the task requests
resources that are not available in the worker pool.
"""
pass
class BaseJobExecutor(JobExecutor):
def get_udf_runner(self) -> Type['UDFRunner']:
from libertem.udf.base import UDFRunner
return UDFRunner
def ensure_async(self, pool=None):
"""
Returns an asynchronous executor; by default just wrap into `AsyncAdapter`.
"""
return AsyncAdapter(wrapped=self, pool=pool)
class AsyncAdapter(AsyncJobExecutor):
"""
Wrap a synchronous JobExecutor and allow to use it as AsyncJobExecutor. All methods are
converted to async and executed in a separate thread.
"""
def __init__(self, wrapped, pool=None):
self._wrapped = wrapped
if pool is None:
pool = AsyncAdapter.make_pool()
self._pool = pool
@classmethod
def make_pool(cls):
pool = TracedThreadPoolExecutor(1)
pool.submit(adjust_event_loop_policy).result()
pool.submit(lambda: asyncio.set_event_loop(asyncio.new_event_loop())).result()
return pool
def ensure_sync(self):
return self._wrapped
@asynccontextmanager
async def scatter(self, obj):
try:
res = await sync_to_async(self._wrapped.scatter.__enter__, self._pool)
yield res
finally:
exit_fn = functools.partial(
self._wrapped.scatter.__exit__,
None, None, None, # FIXME: exc_type, exc_value, traceback?
)
await sync_to_async(exit_fn, self._pool)
async def run_tasks(self, tasks, params_handle, cancel_id):
"""
run a number of Tasks
"""
gen = self._wrapped.run_tasks(tasks, params_handle, cancel_id)
agen = async_generator_eager(gen, self._pool)
async for i in agen:
yield i
async def run_function(self, fn: Callable[..., T], *args, **kwargs) -> T:
"""
run a callable :code:`fn` on an arbitrary worker node
"""
fn_with_args = functools.partial(self._wrapped.run_function, fn, *args, **kwargs)
return await sync_to_async(fn_with_args, self._pool)
async def METHOD_NAME(self, partitions, fn, all_nodes=False):
fn_with_args = functools.partial(
self._wrapped.METHOD_NAME, partitions, fn, all_nodes
)
return await sync_to_async(fn_with_args, self._pool)
async def map(self, fn, iterable):
"""
Run a callable :code:`fn` for each item in iterable, on arbitrary worker nodes
Parameters
----------
fn : callable
Function to call. Should accept exactly one parameter.
iterable : Iterable
Which elements to call the function on.
"""
fn_with_args = functools.partial(
self._wrapped.map, fn, iterable,
)
return await sync_to_async(fn_with_args, self._pool)
async def run_each_host(self, fn, *args, **kwargs):
fn_with_args = functools.partial(self._wrapped.run_each_host, fn, *args, **kwargs)
return await sync_to_async(fn_with_args, self._pool)
async def run_each_worker(self, fn, *args, **kwargs):
fn_with_args = functools.partial(self._wrapped.run_each_worker, fn, *args, **kwargs)
return await sync_to_async(fn_with_args, self._pool)
async def close(self):
"""
Cleanup resources used by this executor, if any, including the wrapped executor.
"""
res = await sync_to_async(self._wrapped.close, self._pool)
if self._pool:
self._pool.shutdown()
return res
async def cancel(self, cancel_id):
"""
cancel execution identified by cancel_id
"""
return await sync_to_async(
functools.partial(self._wrapped.cancel, cancel_id=cancel_id),
self._pool
)
async def get_available_workers(self):
return await sync_to_async(self._wrapped.get_available_workers)
async def get_resource_details(self):
return await sync_to_async(self._wrapped.get_resource_details)
def get_udf_runner(self) -> Type['UDFRunner']:
from libertem.udf.base import UDFRunner
return UDFRunner
|
4,440 |
get target agent
|
import logging
from xml.etree import ElementTree
from indra.statements import *
from indra.databases.identifiers import ensure_chebi_prefix, \
ensure_chembl_prefix
from indra.statements.validate import assert_valid_db_refs
from indra.ontology.standardize import standardize_name_db_refs, \
get_standard_agent
logger = logging.getLogger(__name__)
drugbank_ns = {'db': 'http://www.drugbank.ca'}
class DrugbankProcessor:
"""Processor to extract INDRA Statements from DrugBank content.
The processor assumes that an ElementTree is available which it then
traverses to find drug-target information.
Parameters
----------
xml_tree : xml.etree.ElementTree.ElementTree
An XML ElementTree representing DrugBank XML content.
Attributes
----------
statements : list of indra.statements.Statement
A list of INDRA Statements that were extracted from DrugBank content.
"""
def __init__(self, xml_tree: ElementTree.ElementTree):
self.xml_tree = xml_tree
self.statements = []
def extract_statements(self):
root = self.xml_tree.getroot()
for drug in db_findall(root, 'db:drug'):
for stmt in self._extract_statements_for_drug(drug):
self.statements.append(stmt)
@staticmethod
def _extract_statements_for_drug(drug_element):
drug = DrugbankProcessor._get_drug_agent(drug_element)
for target_element in db_findall(drug_element, 'db:targets/db:target'):
actions = {a.text for a in db_findall(target_element,
'db:actions/db:action')}
if not actions:
actions = {'N/A'}
for action in actions:
stmt_type = DrugbankProcessor._get_statement_type(action)
if not stmt_type:
continue
annotations = {'drugbank_action': action}
evs = DrugbankProcessor._get_evidences(target_element)
for ev in evs:
ev.annotations = annotations
target = DrugbankProcessor.METHOD_NAME(target_element)
yield stmt_type(drug, target, evidence=evs)
@staticmethod
def _get_statement_type(action):
if action in neutral_actions:
return None
elif action in activation_actions:
return Activation
elif action in inhibition_actions:
return Inhibition
elif action in decrease_amount_actions:
return DecreaseAmount
elif action in increase_amount_actions:
return IncreaseAmount
elif action == 'N/A':
return Inhibition
else:
return None
@staticmethod
def METHOD_NAME(target_element):
name_tag = db_find(target_element, 'db:name')
name = name_tag.text
db_refs = {}
# Get Drugbank target ID
target_id = db_find(target_element, 'db:id').text
db_refs['DRUGBANKV4.TARGET'] = target_id
# Extract other xrefs
for xref_tag in db_findall(target_element, 'db:polypeptide/'
'db:external-identifiers/'
'db:external-identifier'):
resource = db_find(xref_tag, 'db:resource').text
identifier = db_find(xref_tag, 'db:identifier').text
if resource == 'HUGO Gene Nomenclature Committee (HGNC)':
db_refs['HGNC'] = identifier[5:]
elif resource == 'UniProtKB':
db_refs['UP'] = identifier
return get_standard_agent(name, db_refs=db_refs)
@staticmethod
def _get_drug_agent(drug_element):
name_tag = db_find(drug_element, 'db:name')
name = name_tag.text
db_refs = {}
# Extract the DrugBank ID
drugbank_id_tags = db_findall(drug_element, 'db:drugbank-id')
# We do a sort here because sometimes there's more than one
# DrugBank ID and we choose the "smaller" one here
drugbank_id = sorted([di.text for di in drugbank_id_tags
if di.text.startswith('DB')])[0]
db_refs['DRUGBANK'] = drugbank_id
# Extract CAS ID
cas_tag = db_find(drug_element, 'db:cas-number')
if cas_tag is not None and cas_tag.text is not None:
db_refs['CAS'] = cas_tag.text
# Extract other xrefs
for xref_tag in db_findall(drug_element, 'db:external-identifiers/'
'db:external-identifier'):
resource = db_find(xref_tag, 'db:resource').text
identifier = db_find(xref_tag, 'db:identifier').text
if resource == 'ChEMBL':
db_refs['CHEMBL'] = ensure_chembl_prefix(identifier)
elif resource == 'PubChem Compound':
db_refs['PUBCHEM'] = identifier
elif resource == 'ChEBI':
db_refs['CHEBI'] = ensure_chebi_prefix(identifier)
assert_valid_db_refs(db_refs)
return get_standard_agent(name, db_refs)
@staticmethod
def _get_evidences(target_element):
# TODO: is there a source ID we can use here?
# TODO: is there context we can extract?
# refs also has: textbooks, attachments
pmids = db_findall(target_element,
'db:references/db:articles/db:article/db:pubmed-id')
urls = db_findall(target_element,
'db:references/db:links/db:link/db:url')
if pmids:
evs = [Evidence(source_api='drugbank', pmid=pmid.text)
for pmid in pmids]
elif urls:
evs = [Evidence(source_api='drugbank',
text_refs={'URL': url.text})
for url in urls]
else:
evs = [Evidence(source_api='drugbank')]
return evs
def db_find(element, path):
return element.find(path, namespaces=drugbank_ns)
def db_findall(element, path):
return element.findall(path, namespaces=drugbank_ns)
activation_actions = {'substrate', 'agonist', 'inducer', 'potentiator',
'stimulator', 'cofactor', 'activator', 'ligand',
'chaperone', 'partial agonist', 'protector',
'positive allosteric modulator', 'positive modulator'}
inhibition_actions = {'antagonist', 'inhibitor', 'binder', 'antibody',
'inactivator', 'binding', 'blocker', 'negative modulator',
'inverse agonist', 'neutralizer', 'weak inhibitor',
'suppressor', 'disruptor',
'inhibitory allosteric modulator'}
decrease_amount_actions = {'downregulator', 'metabolizer', 'chelator',
'degradation',
'incorporation into and destabilization'}
increase_amount_actions = {'stabilization'}
neutral_actions = {'modulator', 'other/unknown', 'unknown', 'other',
'regulator'
|
4,441 |
iterative closes point
|
import numpy as np
from sklearn.neighbors import NearestNeighbors
def calculate_affine_matrix(pointcloud_A, pointcloud_B):
'''Calculates affine transform with the best least-squares fit transforming
keypoints A to keypoints B.
# Argument:
pointcloud_A: Array of shape (num_keypoints, 3).
pointcloud_B: Array of shape (num_keypoints, 3).
# Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
# assert pointcloud_A.shape == pointcloud_B.shape
# translate points to their centroids
centroid3D_A = np.mean(pointcloud_A, axis=0)
centroid3D_B = np.mean(pointcloud_B, axis=0)
centered_keypoints3D_A = pointcloud_A - centroid3D_A
centered_keypoints3D_B = pointcloud_B - centroid3D_B
covariance = np.dot(centered_keypoints3D_A.T, centered_keypoints3D_B)
U, S, Vt = np.linalg.svd(covariance)
# compute rotation matrix
rotation_matrix = np.dot(Vt.T, U.T)
# resolve special reflection case
if np.linalg.det(rotation_matrix) < 0:
Vt[3 - 1, :] *= -1
rotation_matrix = np.dot(Vt.T, U.T)
# compute translation
translation3D = centroid3D_B.T - np.dot(rotation_matrix, centroid3D_A.T)
print(translation3D.shape)
affine_matrix = to_affine_matrix(rotation_matrix, translation3D)
return affine_matrix
def to_affine_matrix(rotation_matrix, translation_vector):
translation_vector = translation_vector.reshape(3, 1)
affine = np.concatenate([rotation_matrix, translation_vector], axis=1)
affine = np.concatenate([affine, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0)
return affine
def nearest_neighbor(pointcloud_A, pointcloud_B):
'''Find the nearest (Euclidean) neighbor in dst for each point in src
# Arguments:
src: Nxm array of points
dst: Nxm array of points
# Returns:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
# assert pointcloud_A.shape == pointcloud_B.shape
model = NearestNeighbors(n_neighbors=1)
model.fit(pointcloud_B)
distances, indices = model.kneighbors(pointcloud_A, return_distance=True)
return distances.ravel(), indices.ravel()
def add_homogenous_coordinate(keypoints3D):
num_keypoints = len(keypoints3D)
# ones = np.ones_like(num_keypoints).reshape(-1, 1)
ones = np.ones(num_keypoints).reshape(-1, 1)
homogenous_keypoints3D = np.concatenate([keypoints3D, ones], axis=1)
return homogenous_keypoints3D
def METHOD_NAME(pointcloud_A, pointcloud_B, initial_pose=None,
max_iterations=20, tolerance=1e-3):
'''Find best least square fit that transforms pointcloud A to pointcloud B.
Input:
A: Nxm numpy array of source mD points
B: Nxm numpy array of destination mD point
initial_pose: (m+1)x(m+1) homogeneous transformation
max_iterations: exit algorithm after max_iterations
tolerance: convergence criteria
Output:
T: final homogeneous transformation that maps A on to B
distances: Euclidean distances (errors) of the nearest neighbor
i: number of iterations to converge
'''
# assert pointcloud_A.shape == pointcloud_B.shape
pointcloud_A = add_homogenous_coordinate(pointcloud_A)
pointcloud_B = add_homogenous_coordinate(pointcloud_B)
pointcloud_A_0 = np.copy(pointcloud_A)
if initial_pose is not None:
pointcloud_A = np.dot(initial_pose, pointcloud_A.T).T
previous_error = 0
for iteration_arg in range(max_iterations):
distances, indices = nearest_neighbor(pointcloud_A, pointcloud_B)
print(indices.shape, pointcloud_A.shape, pointcloud_B.shape)
pointcloud_B = pointcloud_B[indices]
print(pointcloud_B.shape)
print('***********************')
affine_matrix = calculate_affine_matrix(pointcloud_A[:, :3], pointcloud_B[:, :3])
pointcloud_A = np.dot(affine_matrix, pointcloud_A.T).T
mean_error = np.mean(distances)
print(mean_error)
if np.abs(previous_error - mean_error) < tolerance:
break
previous_error = mean_error
affine_transform = calculate_affine_matrix(pointcloud_A_0[:, :3], pointcloud_A[:, :3])
return affine_transform, distances, iteration_arg
|
4,442 |
collect vars
|
import os
import sys
import csv
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from ..common import get_logger
import paddle
import paddle.nn.functional as F
from paddle.static.quantization.utils import load_variable_data
_logger = get_logger(__name__, level=logging.INFO)
def METHOD_NAME(scope, var_names):
all_vars = {}
for var_name in var_names:
var_tensor = load_variable_data(scope, var_name)
all_vars[var_name] = var_tensor
return all_vars
def plot_box_distribution(box_data, save_dir, save_name):
all_values = sum(list(box_data.values()), [])
max_value = np.max(all_values)
min_value = np.min(all_values)
pdf_path = os.path.join(save_dir, save_name)
labels = sorted(box_data.keys())
with PdfPages(pdf_path) as pdf:
for i in range(0, len(labels), 20):
r = i + 20 if i + 20 < len(labels) else len(labels)
dist = [box_data[n] for n in labels[i:r]]
plt.boxplot(
dist, labels=labels[i:r], showbox=True, patch_artist=True)
plt.xticks(rotation=90)
plt.tick_params(axis='x')
plt.ylim([min_value, max_value])
if 'act' in save_name:
plt.xlabel('Activation Name')
else:
plt.xlabel('Weight Name')
plt.ylabel("Box Distribution")
plt.tight_layout()
plt.show()
pdf.savefig()
plt.close()
_logger.info('Box plots is saved in {}'.format(pdf_path))
def plot_hist_distribution(hist_data, save_dir, save_name):
pdf_path = os.path.join(save_dir, save_name)
with PdfPages(pdf_path) as pdf:
for name in hist_data:
plt.hist(hist_data[name][0], bins=hist_data[name][1])
plt.xlabel(name)
plt.ylabel("Probability")
locs, _ = plt.yticks()
plt.yticks(locs, np.round(locs / len(hist_data[name][0]), 3))
if 'act' in save_name:
plt.title("Hist of Activation {}".format(name))
else:
plt.title("Hist of Weight {}".format(name))
plt.show()
pdf.savefig()
plt.close()
_logger.info('Histogram plot is saved in {}'.format(pdf_path))
def save_csv(data, save_dir, save_name, csv_columns):
save_path = os.path.join(save_dir, save_name)
with open(save_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for d in data:
writer.writerow(d)
_logger.info('Activation Statistic is saved in {}'.format(save_path))
def fp_quant_cosine_similarity(executor, data_loader, float_program,
quant_program, float_scope, quant_scope,
float_fetch_list, quant_fetch_list):
cosine_similarity = []
for step, data in enumerate(data_loader()):
with paddle.static.scope_guard(float_scope):
float_preds = executor.run(
program=float_program,
feed=data,
fetch_list=float_fetch_list,
return_numpy=False)
float_preds = float_preds[0]
with paddle.static.scope_guard(quant_scope):
quant_preds = executor.run(
program=quant_program,
feed=data,
fetch_list=quant_fetch_list,
return_numpy=False)
quant_preds = quant_preds[0]
paddle.disable_static()
float_preds = paddle.to_tensor(float_preds)
quant_preds = paddle.to_tensor(quant_preds)
cos_sim = F.cosine_similarity(float_preds, quant_preds).mean()
cos_sim = cos_sim.numpy()
cosine_similarity.append(cos_sim)
if step != 0 and (step % 10 == 0):
_logger.info("[step]: %d, cosine similarity: %.9f" %
(step, np.array(cosine_similarity).mean()))
paddle.enable_static()
return np.array(cosine_similarity).mean()
def get_new_in_out_map(input_name, graph, float_scope, quant_scope, place):
input_rename_map = {}
output_rename_map = {}
removed_ops = []
for op_node in graph.all_op_nodes():
if op_node.id() in removed_ops:
continue
in_names = op_node.input_arg_names()
out_names = op_node.output_arg_names()
if out_names[0] == input_name:
in_var = graph._find_node_by_name(op_node.inputs,
op_node.input('X')[0])
out_var = graph._find_node_by_name(op_node.outputs,
op_node.output('Y')[0])
if not in_var.persistable():
# act
for op in graph.all_op_nodes():
o_ns = op.output_arg_names()
if len(o_ns) == 1 and o_ns[0] == in_var.name():
in_var_1 = graph._find_node_by_name(
op.inputs, op.input('X')[0])
graph.safe_remove_nodes(op)
removed_ops.append(op.id())
input_rename_map[out_var.node] = in_var_1
else:
# weight
with paddle.static.scope_guard(float_scope):
float_name = in_var.name().replace('.quantized', '')
float_weight = np.array(
float_scope.find_var(float_name).get_tensor())
with paddle.static.scope_guard(quant_scope):
quant_scope.find_var(in_var.name()).get_tensor().set(
float_weight, place)
input_rename_map[out_var.node] = in_var
graph.safe_remove_nodes(op_node)
removed_ops.append(op_node.id())
output_rename_map[in_var.node] = out_var
return input_rename_map, output_rename_map, removed_ops
def relink_graph(graph, input_rename_map, output_rename_map, removed_ops):
for op_node in graph.all_op_nodes():
if op_node.id() in removed_ops:
continue
for var in op_node.inputs:
if var.node in input_rename_map:
old_in = var
new_in = input_rename_map[var.node]
graph.update_input_link(old_in, new_in, op_node)
_logger.info(
f'relink {op_node.name()} \'s input node from {old_in.name()} to {new_in.name()}.'
)
for var in op_node.outputs:
if var.node in output_rename_map:
old_out = var
new_out = output_rename_map[var.node]
graph.update_input_link(old_out, new_out, op_node)
_logger.info(
f'relink {op_node.name()} \'s output node from {old_out.name()} to {new_out.name()}.'
)
return graph.to_program()
|
4,443 |
num tokens vec
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch.utils.data
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
class EpochListening:
"""Mixin for receiving updates whenever the epoch increments."""
@property
def can_reuse_epoch_itr_across_epochs(self):
"""
Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for
this dataset across epochs.
This needs to return ``False`` if the sample sizes can change across
epochs, in which case we may need to regenerate batches at each epoch.
If your dataset relies in ``set_epoch`` then you should consider setting
this to ``False``.
"""
return True
def set_epoch(self, epoch):
"""Will receive the updated epoch number at the beginning of the epoch."""
pass
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def METHOD_NAME(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
def get_batch_shapes(self):
"""
Return a list of valid batch shapes, for example::
[(8, 512), (16, 256), (32, 128)]
The first dimension of each tuple is the batch size and can be ``None``
to automatically infer the max batch size based on ``--max-tokens``.
The second dimension of each tuple is the max supported length as given
by :func:`fairseq.data.FairseqDataset.num_tokens`.
This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size`
to restrict batch shapes. This is useful on TPUs to avoid too many
dynamic shapes (and recompilations).
"""
return None
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from fairseq.data import data_utils
fixed_shapes = self.get_batch_shapes()
if fixed_shapes is not None:
def adjust_bsz(bsz, num_tokens):
if bsz is None:
assert max_tokens is not None, "Must specify --max-tokens"
bsz = max_tokens // num_tokens
if max_sentences is not None:
bsz = min(bsz, max_sentences)
elif (
bsz >= required_batch_size_multiple
and bsz % required_batch_size_multiple != 0
):
bsz -= bsz % required_batch_size_multiple
return bsz
fixed_shapes = np.array(
[
[adjust_bsz(bsz, num_tokens), num_tokens]
for (bsz, num_tokens) in fixed_shapes
]
)
try:
METHOD_NAME = self.METHOD_NAME(indices).astype('int64')
except NotImplementedError:
METHOD_NAME = None
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
METHOD_NAME=METHOD_NAME,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
fixed_shapes=fixed_shapes,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
@property
def supports_fetch_outside_dataloader(self):
"""Whether this dataset supports fetching outside the workers of the dataloader."""
return True
class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening):
"""
For datasets that need to be read sequentially, usually because the data is
being streamed or otherwise can't be manipulated on a single machine.
"""
def __iter__(self):
raise NotImplementedError
|
4,444 |
resp get pipeline
|
"""
GitLab API: https://docs.gitlab.com/ee/api/pipelines.html
"""
import pytest
import responses
from gitlab.v4.objects import (
ProjectPipeline,
ProjectPipelineTestReport,
ProjectPipelineTestReportSummary,
)
pipeline_content = {
"id": 46,
"project_id": 1,
"status": "pending",
"ref": "main",
"sha": "a91957a858320c0e17f3a0eca7cfacbff50ea29a",
"before_sha": "a91957a858320c0e17f3a0eca7cfacbff50ea29a",
"tag": False,
"yaml_errors": None,
"user": {
"name": "Administrator",
"username": "root",
"id": 1,
"state": "active",
"avatar_url": "http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon",
"web_url": "http://localhost:3000/root",
},
"created_at": "2016-08-11T11:28:34.085Z",
"updated_at": "2016-08-11T11:32:35.169Z",
"started_at": None,
"finished_at": "2016-08-11T11:32:35.145Z",
"committed_at": None,
"duration": None,
"queued_duration": 0.010,
"coverage": None,
"web_url": "https://example.com/foo/bar/pipelines/46",
}
test_report_content = {
"total_time": 5,
"total_count": 1,
"success_count": 1,
"failed_count": 0,
"skipped_count": 0,
"error_count": 0,
"test_suites": [
{
"name": "Secure",
"total_time": 5,
"total_count": 1,
"success_count": 1,
"failed_count": 0,
"skipped_count": 0,
"error_count": 0,
"test_cases": [
{
"status": "success",
"name": "Security Reports can create an auto-remediation MR",
"classname": "vulnerability_management_spec",
"execution_time": 5,
"system_output": None,
"stack_trace": None,
}
],
}
],
}
test_report_summary_content = {
"total": {
"time": 1904,
"count": 3363,
"success": 3351,
"failed": 0,
"skipped": 12,
"error": 0,
"suite_error": None,
},
"test_suites": [
{
"name": "test",
"total_time": 1904,
"total_count": 3363,
"success_count": 3351,
"failed_count": 0,
"skipped_count": 12,
"error_count": 0,
"build_ids": [66004],
"suite_error": None,
}
],
}
@pytest.fixture
def METHOD_NAME():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/projects/1/pipelines/1",
json=pipeline_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_cancel_pipeline():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.POST,
url="http://localhost/api/v4/projects/1/pipelines/1/cancel",
json=pipeline_content,
content_type="application/json",
status=201,
)
yield rsps
@pytest.fixture
def resp_retry_pipeline():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.POST,
url="http://localhost/api/v4/projects/1/pipelines/1/retry",
json=pipeline_content,
content_type="application/json",
status=201,
)
yield rsps
@pytest.fixture
def resp_get_pipeline_test_report():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/projects/1/pipelines/1/test_report",
json=test_report_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_get_pipeline_test_report_summary():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url="http://localhost/api/v4/projects/1/pipelines/1/test_report_summary",
json=test_report_summary_content,
content_type="application/json",
status=200,
)
yield rsps
def test_get_project_pipeline(project, METHOD_NAME):
pipeline = project.pipelines.get(1)
assert isinstance(pipeline, ProjectPipeline)
assert pipeline.ref == "main"
def test_cancel_project_pipeline(project, resp_cancel_pipeline):
pipeline = project.pipelines.get(1, lazy=True)
output = pipeline.cancel()
assert output["ref"] == "main"
def test_retry_project_pipeline(project, resp_retry_pipeline):
pipeline = project.pipelines.get(1, lazy=True)
output = pipeline.retry()
assert output["ref"] == "main"
def test_get_project_pipeline_test_report(project, resp_get_pipeline_test_report):
pipeline = project.pipelines.get(1, lazy=True)
test_report = pipeline.test_report.get()
assert isinstance(test_report, ProjectPipelineTestReport)
assert test_report.total_time == 5
assert test_report.test_suites[0]["name"] == "Secure"
def test_get_project_pipeline_test_report_summary(
project, resp_get_pipeline_test_report_summary
):
pipeline = project.pipelines.get(1, lazy=True)
test_report_summary = pipeline.test_report_summary.get()
assert isinstance(test_report_summary, ProjectPipelineTestReportSummary)
assert test_report_summary.total["count"] == 3363
assert test_report_summary.test_suites[0]["name"] == "test"
|
4,445 |
location x changed
|
# -*- coding: utf-8 -*-
"""
Description:
Usage:
Author: YingzhiGou
Date: 20/06/2017
"""
from qtpy.QtWidgets import QGroupBox
from mtpy.gui.SmartMT.ui_asset.groupbox_text_box import Ui_GroupBox_text_box
class TextBox(QGroupBox):
def __init__(self, parent, point_size=True, key_size=False):
QGroupBox.__init__(self, parent)
self.ui = Ui_GroupBox_text_box()
self.ui.setupUi(self)
self._point_size = point_size
self._key_size = key_size
self.ui.comboBox_size.model().item(len(self._size_keys)).setEnabled(self._point_size)
# connect signal
self.ui.checkBox_size.stateChanged.connect(self._size_state_changed)
self.ui.checkBox_weight.stateChanged.connect(self._weight_state_changed)
self.ui.comboBox_size.currentIndexChanged.connect(self._size_index_changed)
self.ui.horizontalSlider_x.valueChanged.connect(self._x_slider_value_changed)
self.ui.horizontalSlider_y.valueChanged.connect(self._y_slider_value_changed)
self.ui.doubleSpinBox_x.editingFinished.connect(self._update_slider_x)
self.ui.doubleSpinBox_y.editingFinished.connect(self._update_slider_y)
self.ui.horizontalSlider_x_pad.valueChanged.connect(self._x_pad_slider_value_changed)
self.ui.horizontalSlider_y_pad.valueChanged.connect(self._y_pad_slider_value_changed)
self.ui.doubleSpinBox_x_pad.editingFinished.connect(self._update_slider_x_pad)
self.ui.doubleSpinBox_y_pad.editingFinished.connect(self._update_slider_y_pad)
_size_keys = [
'xx-small',
'x-small',
'small',
'medium',
'large',
'x-large',
'xx-large'
]
def METHOD_NAME(self, p_int):
self.ui.horizontalSlider_x.setEnabled(p_int != 0)
self.ui.doubleSpinBox_x.setEnabled(p_int != 0)
def _location_y_changed(self, p_int):
self.ui.horizontalSlider_y.setEnabled(p_int != 0)
self.ui.doubleSpinBox_y.setEnabled(p_int != 0)
def _size_index_changed(self, p_int):
self.ui.spinBox_size.setEnabled(p_int >= len(self._size_keys))
def _size_state_changed(self, p_int):
if self._key_size:
self.ui.comboBox_size.setEnabled(p_int != 0)
else:
self.ui.comboBox_size.setCurrentIndex(len(self._size_keys))
def _weight_state_changed(self, p_int):
self.ui.comboBox_weight.setEnabled(p_int != 0)
def _x_slider_value_changed(self, value):
self.ui.doubleSpinBox_x.setValue(value / 100.0)
def _y_slider_value_changed(self, value):
self.ui.doubleSpinBox_y.setValue(value / 100.0)
def _update_slider_x(self):
value = int(self.ui.doubleSpinBox_x.value() * 100)
self.ui.horizontalSlider_x.setValue(value)
def _update_slider_y(self):
value = int(self.ui.doubleSpinBox_y.value() * 100)
self.ui.horizontalSlider_y.setValue(value)
def _x_pad_slider_value_changed(self, value):
self.ui.doubleSpinBox_x_pad.setValue(value / 100.0)
def _y_pad_slider_value_changed(self, value):
self.ui.doubleSpinBox_y_pad.setValue(value / 100.0)
def _update_slider_x_pad(self):
value = int(self.ui.doubleSpinBox_x_pad.value() * 100)
self.ui.horizontalSlider_x_pad.setValue(value)
def _update_slider_y_pad(self):
value = int(self.ui.doubleSpinBox_y_pad.value() * 100)
self.ui.horizontalSlider_y_pad.setValue(value)
def get_size(self):
if self.ui.checkBox_size.isChecked():
return self._size_keys[self.ui.comboBox_size.currentIndex()] \
if self.ui.comboBox_size.currentIndex() < len(self._size_keys) \
else self.ui.spinBox_size.value()
else:
return None
def get_weight(self):
if self.ui.checkBox_weight.isChecked():
return str(self.ui.comboBox_weight.currentText())
else:
return None
def get_location(self):
if self.ui.groupBox_location.isChecked():
return self.ui.doubleSpinBox_x.value(), self.ui.doubleSpinBox_y.value()
else:
return None
def get_xpad(self):
if self.ui.groupBox_padding.isChecked():
return self.ui.doubleSpinBox_x_pad.value()
else:
return None
def get_ypad(self):
if self.ui.groupBox_padding.isChecked():
return self.ui.doubleSpinBox_y_pad.value()
else:
return None
|
4,446 |
find dir
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import glob
import io
import ipaddress
import itertools
import os
import shutil
import socket
from contextlib import contextmanager
from tempfile import mkdtemp, mkstemp
import pkg_resources
import ujson as json
import yaml
from appdirs import user_cache_dir, user_config_dir, user_data_dir
from urllib3.util import parse_url
# full path import results in unit test failures
from .exceptions import InvalidUploadUrlError
_DIR_APP_NAME = 'label-studio'
def good_path(path):
return os.path.abspath(os.path.expanduser(path))
def find_node(package_name, node_path, node_type):
assert node_type in ('dir', 'file', 'any')
basedir = pkg_resources.resource_filename(package_name, '')
node_path = os.path.join(*node_path.split('/')) # linux to windows compatibility
search_by_path = '/' in node_path or '\\' in node_path
for path, dirs, filenames in os.walk(basedir):
if node_type == 'file':
nodes = filenames
elif node_type == 'dir':
nodes = dirs
else:
nodes = filenames + dirs
if search_by_path:
for found_node in nodes:
found_node = os.path.join(path, found_node)
if found_node.endswith(node_path):
return found_node
elif node_path in nodes:
return os.path.join(path, node_path)
else:
raise IOError(
'Could not find "%s" at package "%s"' % (node_path, basedir)
)
def find_file(file):
return find_node('label_studio', file, 'file')
def METHOD_NAME(directory):
return find_node('label_studio', directory, 'dir')
@contextmanager
def get_temp_file():
fd, path = mkstemp()
yield path
os.close(fd)
@contextmanager
def get_temp_dir():
dirpath = mkdtemp()
yield dirpath
shutil.rmtree(dirpath)
def get_config_dir():
config_dir = user_config_dir(appname=_DIR_APP_NAME)
try:
os.makedirs(config_dir, exist_ok=True)
except OSError:
pass
return config_dir
def get_data_dir():
data_dir = user_data_dir(appname=_DIR_APP_NAME)
os.makedirs(data_dir, exist_ok=True)
return data_dir
def get_cache_dir():
cache_dir = user_cache_dir(appname=_DIR_APP_NAME)
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def delete_dir_content(dirpath):
for f in glob.glob(dirpath + '/*'):
remove_file_or_dir(f)
def remove_file_or_dir(path):
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def get_all_files_from_dir(d):
out = []
for name in os.listdir(d):
filepath = os.path.join(d, name)
if os.path.isfile(filepath):
out.append(filepath)
return out
def iter_files(root_dir, ext):
for root, _, files in os.walk(root_dir):
for f in files:
if f.lower().endswith(ext):
yield os.path.join(root, f)
def json_load(file, int_keys=False):
with io.open(file, encoding='utf8') as f:
data = json.load(f)
if int_keys:
return {int(k): v for k, v in data.items()}
else:
return data
def read_yaml(filepath):
if not os.path.exists(filepath):
filepath = find_file(filepath)
with io.open(filepath, encoding='utf-8') as f:
data = yaml.load(f, Loader=yaml.FullLoader) # nosec
return data
def read_bytes_stream(filepath):
with open(filepath, mode='rb') as f:
return io.BytesIO(f.read())
def get_all_dirs_from_dir(d):
out = []
for name in os.listdir(d):
filepath = os.path.join(d, name)
if os.path.isdir(filepath):
out.append(filepath)
return out
class SerializableGenerator(list):
"""Generator that is serializable by JSON"""
def __init__(self, iterable):
tmp_body = iter(iterable)
try:
self._head = iter([next(tmp_body)])
self.append(tmp_body)
except StopIteration:
self._head = []
def __iter__(self):
return itertools.chain(self._head, *self[:1])
def validate_upload_url(url, block_local_urls=True):
"""Utility function for defending against SSRF attacks. Raises
- InvalidUploadUrlError if the url is not HTTP[S], or if block_local_urls is enabled
and the URL resolves to a local address.
- LabelStudioApiException if the hostname cannot be resolved
:param url: Url to be checked for validity/safety,
:param block_local_urls: Whether urls that resolve to local/private networks should be allowed.
"""
parsed_url = parse_url(url)
if parsed_url.scheme not in ('http', 'https'):
raise InvalidUploadUrlError
domain = parsed_url.host
try:
ip = socket.gethostbyname(domain)
except socket.error:
from core.utils.exceptions import LabelStudioAPIException
raise LabelStudioAPIException(f"Can't resolve hostname {domain}")
if not block_local_urls:
return
if ip == '0.0.0.0': # nosec
raise InvalidUploadUrlError
local_subnets = [
'127.0.0.0/8',
'10.0.0.0/8',
'172.16.0.0/12',
'192.168.0.0/16',
]
for subnet in local_subnets:
if ipaddress.ip_address(ip) in ipaddress.ip_network(subnet):
raise InvalidUploadUrlError
|
4,447 |
main
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vedit Firat Arig <[email protected]>
# Outline and parts are reused from Mark Theunissen's mysql_db module
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: mssql_db
short_description: Add or remove MSSQL databases from a remote host
description:
- Add or remove MSSQL databases from a remote host.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- name of the database to add or remove
required: true
aliases: [ db ]
type: str
login_user:
description:
- The username used to authenticate with
type: str
default: ''
login_password:
description:
- The password used to authenticate with
type: str
default: ''
login_host:
description:
- Host running the database
type: str
required: true
login_port:
description:
- Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
default: '1433'
type: str
state:
description:
- The database state
default: present
choices: [ "present", "absent", "import" ]
type: str
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) files are supported.
type: str
autocommit:
description:
- Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
within a transaction.
type: bool
default: false
notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as pip install pymssql (See M(ansible.builtin.pip).)
requirements:
- python >= 2.7
- pymssql
author: Vedit Firat Arig (@vedit)
'''
EXAMPLES = '''
- name: Create a new database with name 'jackdata'
community.general.mssql_db:
name: jackdata
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- name: Copy database dump file to remote host
ansible.builtin.copy:
src: dump.sql
dest: /tmp
- name: Restore the dump file to database 'my_db'
community.general.mssql_db:
name: my_db
state: import
target: /tmp/dump.sql
'''
RETURN = '''
#
'''
import os
import traceback
PYMSSQL_IMP_ERR = None
try:
import pymssql
except ImportError:
PYMSSQL_IMP_ERR = traceback.format_exc()
mssql_found = False
else:
mssql_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
def db_exists(conn, cursor, db):
cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
conn.commit()
return bool(cursor.rowcount)
def db_create(conn, cursor, db):
cursor.execute("CREATE DATABASE [%s]" % db)
return db_exists(conn, cursor, db)
def db_delete(conn, cursor, db):
try:
cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
except Exception:
pass
cursor.execute("DROP DATABASE [%s]" % db)
return not db_exists(conn, cursor, db)
def db_import(conn, cursor, module, db, target):
if os.path.isfile(target):
with open(target, 'r') as backup:
sqlQuery = "USE [%s]\n" % db
for line in backup:
if line is None:
break
elif line.startswith('GO'):
cursor.execute(sqlQuery)
sqlQuery = "USE [%s]\n" % db
else:
sqlQuery += line
cursor.execute(sqlQuery)
conn.commit()
return 0, "import successful", ""
else:
return 1, "cannot find target file", "cannot find target file"
def METHOD_NAME():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['db']),
login_user=dict(default=''),
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
target=dict(default=None),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
)
)
if not mssql_found:
module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
db = module.params['name']
state = module.params['state']
autocommit = module.params['autocommit']
target = module.params["target"]
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_querystring = login_host
if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port)
if login_user != "" and login_password == "":
module.fail_json(msg="when supplying login_user arguments login_password must be provided")
try:
conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
cursor = conn.cursor()
except Exception as e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True)
changed = False
if db_exists(conn, cursor, db):
if state == "absent":
try:
changed = db_delete(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "import":
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
elif state == "import":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
module.exit_json(changed=changed, db=db)
if __name__ == '__main__':
METHOD_NAME()
|
4,448 |
validate fields
|
"""Credential format inner object."""
from collections import namedtuple
from enum import Enum
from typing import TYPE_CHECKING, Mapping, Sequence, Type, Union
from uuid import uuid4
from marshmallow import EXCLUDE, fields
from .....messaging.decorators.attach_decorator import AttachDecorator
from .....messaging.models.base import BaseModel, BaseModelSchema
from .....messaging.valid import UUID4_EXAMPLE
from .....utils.classloader import DeferLoad
if TYPE_CHECKING:
from ..formats.handler import V20PresFormatHandler
# aries prefix
FormatSpec = namedtuple("FormatSpec", "aries handler")
class V20PresFormat(BaseModel):
"""Present-proof protocol message attachment format."""
class Meta:
"""Present-proof protocol message attachment format metadata."""
schema_class = "V20PresFormatSchema"
class Format(Enum):
"""Attachment format."""
INDY = FormatSpec(
"hlindy/",
DeferLoad(
"aries_cloudagent.protocols.present_proof.v2_0"
".formats.indy.handler.IndyPresExchangeHandler"
),
)
DIF = FormatSpec(
"dif/",
DeferLoad(
"aries_cloudagent.protocols.present_proof.v2_0"
".formats.dif.handler.DIFPresFormatHandler"
),
)
@classmethod
def get(cls, label: Union[str, "V20PresFormat.Format"]):
"""Get format enum for label."""
if isinstance(label, str):
for fmt in V20PresFormat.Format:
if label.startswith(fmt.aries) or label == fmt.api:
return fmt
elif isinstance(label, V20PresFormat.Format):
return label
return None
@property
def api(self) -> str:
"""Admin API specifier."""
return self.name.lower()
@property
def aries(self) -> str:
"""Accessor for aries identifier."""
return self.value.aries
@property
def handler(self) -> Type["V20PresFormatHandler"]:
"""Accessor for presentation exchange format handler."""
return self.value.handler.resolved
def METHOD_NAME(self, message_type: str, attachment_data: Mapping):
"""Raise ValidationError for invalid attachment formats."""
self.handler.METHOD_NAME(message_type, attachment_data)
def get_attachment_data(
self,
formats: Sequence["V20PresFormat"],
attachments: Sequence[AttachDecorator],
):
"""Find attachment of current format, decode and return its content."""
for fmt in formats:
if V20PresFormat.Format.get(fmt.format) is self:
attach_id = fmt.attach_id
break
else:
return None
for atch in attachments:
if atch.ident == attach_id:
return atch.content
return None
def __init__(
self,
*,
attach_id: str = None,
format_: str = None,
):
"""Initialize present-proof protocol message attachment format."""
self.attach_id = attach_id or uuid4()
self.format_ = format_
@property
def format(self) -> str:
"""Return format."""
return self.format_
class V20PresFormatSchema(BaseModelSchema):
"""Present-proof protocol message attachment format schema."""
class Meta:
"""Present-proof protocol message attachment format schema metadata."""
model_class = V20PresFormat
unknown = EXCLUDE
attach_id = fields.Str(
required=True,
allow_none=False,
metadata={"description": "Attachment identifier", "example": UUID4_EXAMPLE},
)
format_ = fields.Str(
required=True,
allow_none=False,
data_key="format",
metadata={
"description": "Attachment format specifier",
"example": "dif/presentation-exchange/[email protected]",
},
)
|
4,449 |
add to vat
|
import sys, json, gzip, re, time
from collections import defaultdict
from pprint import pprint
from find_activity import find_activity
# Track vat import/exports and record the counts after each delivery, to
# estimate the size of the heap over time. We'll compare this against runtime
# (especially GC time) to look for a correlation.
# we track current vat imports and exports by vref
vat_clist = {} # vref => { kref, reachable, data }
def METHOD_NAME(vrefs, data):
for vref in vrefs:
if vref not in vat_clist:
vat_clist[vref] = { "reachable": True, "data": data }
vat_clist[vref]["reachable"] = True
def drop_from_vat(vrefs):
for vref in vrefs:
assert vref in vat_clist, vref
assert vat_clist[vref]["reachable"], vref
vat_clist[vref]["reachable"] = False
def retire_from_vat(vrefs):
for vref in vrefs:
if vref not in vat_clist:
print("odd: retire(%s) not in clist" % vref)
continue
assert vref in vat_clist, vref
if vref[0] == "o":
assert not vat_clist[vref]["reachable"], vref
del vat_clist[vref]
fn = sys.argv[1]
vatID = sys.argv[2]
(last_use, active, activity, could_drop, drop_schedule) = find_activity(fn, vatID)
print("deliverynum,promises,imports,exports,exports_used")
def print_heapsize(deliverynum, could_drop):
imports = 0
exports = 0
promises = 0
exports_used = 0
for vref in vat_clist:
if vref.startswith("o+"):
exports += 1
if vref not in could_drop or deliverynum < could_drop[vref]:
exports_used += 1
elif vref.startswith("o-"):
imports += 1
elif vref.startswith("p"):
promises += 1
print("%d,%d,%d,%d,%d" % (deliverynum, promises, imports, exports, exports_used))
opener = gzip.open if fn.endswith(".gz") else open
with opener(sys.argv[1]) as f:
for line in f:
if isinstance(line, bytes):
line = line.decode("utf-8")
if not line.strip():
continue
data = json.loads(line.strip())
type = data["type"]
if type not in ["deliver", "syscall", "deliver-result"]:
continue
if data["vatID"] != vatID:
continue
#print()
#print(sorted(vat_clist.keys()))
#print("line:", line.strip())
deliverynum = data["deliveryNum"]
added = set()
dropped = set()
retired = set()
# For each non-dropped exported object, record the last delivery
# which cited it. Our report has a column for what the heap size
# would have been if we'd dropped both imports and exports as soon as
# possible.
if type == "deliver":
vd = data["vd"]
dtype = vd[0]
if dtype == "message":
_, target, msg = vd
for vref in msg["args"]["slots"]:
added.add(vref)
result_vpid = msg.get("result") # maybe null/None
if result_vpid:
added.add(result_vpid)
if dtype == "notify":
_, resolutions = vd
for (vpid, reject, resdata) in resolutions:
retired.add(vpid)
for vref in resdata["slots"]:
added.add(vref)
if dtype == "dropExports":
dropped.update(set(vd[1]))
if dtype == "retireExports":
retired.update(set(vd[1]))
if dtype == "retireImports":
retired.update(set(vd[1]))
if type == "syscall":
vsc = data["vsc"]
stype = vsc[0]
if stype == "send":
_, target, msg = vsc
for vref in msg["args"]["slots"]:
added.add(vref)
result_vpid = msg.get("result")
if result_vpid:
added.add(result_vpid)
if stype == "invoke":
raise Error("unable to handle syscall.invoke yet")
if stype == "resolve":
_, resolutions = vsc
for (vpid, reject, resdata) in resolutions:
retired.add(vpid)
for vref in resdata["slots"]:
added.add(vref)
if stype == "dropImports":
dropped.update(set(vsc[1]))
if stype == "retireImports":
retired.update(set(vsc[1]))
if stype == "retireExports":
retired.update(set(vsc[1]))
#print(deliverynum, "add/drop/retire", added, dropped, retired)
METHOD_NAME(added, data)
drop_from_vat(dropped)
retire_from_vat(retired)
if type == "deliver-result":
print_heapsize(deliverynum, could_drop)
|
4,450 |
handle
|
"""
Command to import modulestore content into Content Libraries.
"""
import argparse
import logging
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocatorV2
from openedx.core.djangoapps.content_libraries import api as contentlib_api
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Import modulestore content, references by a course, into a Content Libraries
library.
"""
def add_arguments(self, parser):
"""
Add arguments to the argument parser.
"""
parser.add_argument(
'library-key',
type=LibraryLocatorV2.from_string,
help=('Usage key of the Content Library to import content into.'),
)
parser.add_argument(
'course-key',
type=CourseKey.from_string,
help=('The Course Key string, used to identify the course to import '
'content from.'),
)
subparser = parser.add_subparsers(
title='Courseware location and methods',
dest='method',
description=('Select the method and location to locate the course and '
'its contents.')
)
api_parser = subparser.add_parser(
'api',
help=('Query and retrieve course blocks from a remote instance using '
'Open edX course and OLX export APIs. You need to enable API access '
'on the instance.')
)
api_parser.add_argument(
'--lms-url',
default=settings.LMS_ROOT_URL,
help=("The LMS URL, used to retrieve course content (default: "
"'%(default)s')."),
)
api_parser.add_argument(
'--studio-url',
default=f"https://{settings.CMS_BASE}",
help=("The Studio URL, used to retrieve block OLX content (default: "
"'%(default)s')"),
)
oauth_group = api_parser.add_mutually_exclusive_group(required=False)
oauth_group.add_argument(
'--oauth-creds-file',
type=argparse.FileType('r'),
help=('The edX OAuth credentials in a filename. The first line is '
'the OAuth key, second line is the OAuth secret. This is '
'preferred compared to passing the credentials in the command '
'line.'),
)
oauth_group.add_argument(
'--oauth-creds',
nargs=2,
help=('The edX OAuth credentials in the command line. The first '
'argument is the OAuth secret, the second argument is the '
'OAuth key. Notice that command line arguments are insecure, '
'see `--oauth-creds-file`.'),
)
subparser.add_parser(
'modulestore',
help=("Use a local modulestore instance to retrieve blocks database on "
"the instance where the command is being run. You don't need "
"to enable API access.")
)
def METHOD_NAME(self, *args, **options):
"""
Collect all blocks from a course that are "importable" and write them to the
a blockstore library.
"""
# Search for the library.
try:
contentlib_api.get_library(options['library-key'])
except contentlib_api.ContentLibraryNotFound as exc:
raise CommandError("The library specified does not exist: "
f"{options['library-key']}") from exc
# Validate the method and its arguments, instantiate the openedx client.
if options['method'] == 'api':
if options['oauth_creds_file']:
with options['oauth_creds_file'] as creds_f:
oauth_key, oauth_secret = [v.strip() for v in creds_f.readlines()]
elif options['oauth_creds']:
oauth_key, oauth_secret = options['oauth_creds']
else:
raise CommandError("Method 'api' requires one of the "
"--oauth-* options, and none was specified.")
edx_client = contentlib_api.EdxApiImportClient(
options['lms_url'],
options['studio_url'],
oauth_key,
oauth_secret,
library_key=options['library-key'],
)
elif options['method'] == 'modulestore':
edx_client = contentlib_api.EdxModulestoreImportClient(
library_key=options['library-key'],
)
else:
raise CommandError(f"Method not supported: {options['method']}")
failed_blocks = []
def on_progress(block_key, block_num, block_count, exception=None):
self.stdout.write(f"{block_num}/{block_count}: {block_key}: ", ending='')
# In case stdout is a term and line buffered:
self.stdout.flush()
if exception:
self.stdout.write(self.style.ERROR('❌'))
log.error('Failed to import block: %s', block_key, exc_info=exception)
failed_blocks.append(block_key)
else:
self.stdout.write(self.style.SUCCESS('✓'))
edx_client.import_blocks_from_course(options['course-key'], on_progress)
if failed_blocks:
self.stdout.write(self.style.ERROR(f"❌ {len(failed_blocks)} failed:"))
for key in failed_blocks:
self.stdout.write(self.style.ERROR(str(key)))
|
4,451 |
get optimizer
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from Marie.EntityLinking.blink.common.ranker_base import BertEncoder, get_model_obj
from Marie.EntityLinking.blink.common.optimizer import get_bert_optimizer
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"])
cand_bert = BertModel.from_pretrained(params['bert_model'])
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = ctxt_bert.config
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
embedding_ctxt = None
if token_idx_ctxt is not None:
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt
)
embedding_cands = None
if token_idx_cands is not None:
embedding_cands = self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_ctxt, embedding_cands
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(model_path)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def METHOD_NAME(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
embedding_context, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands, None, None, None
)
return embedding_context.cpu().detach()
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands.cpu().detach()
# TODO: why do we need cpu here?
# return embedding_cands
# Score candidates given context input and label input
# If cand_encs is provided (pre-computed), cand_ves is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
random_negs=True,
cand_encs=None, # pre-computed candidate encoding.
):
# Encode contexts first
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX
)
embedding_ctxt, _ = self.model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, None, None, None
)
# Candidate encoding is given, do not need to re-compute
# Directly return the score of context encoding and candidate encoding
if cand_encs is not None:
return embedding_ctxt.mm(cand_encs.t())
# Train time. We compare with all elements of the batch
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cand_vecs, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
if random_negs:
# train on random negatives
return embedding_ctxt.mm(embedding_cands.t())
else:
# train on hard negatives
embedding_ctxt = embedding_ctxt.unsqueeze(1) # batchsize x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # batchsize x embed_size x 2
scores = torch.bmm(embedding_ctxt, embedding_cands) # batchsize x 1 x 1
scores = torch.squeeze(scores)
return scores
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(self, context_input, cand_input, label_input=None):
flag = label_input is None
scores = self.score_candidate(context_input, cand_input, flag)
bs = scores.size(0)
if label_input is None:
target = torch.LongTensor(torch.arange(bs))
target = target.to(self.device)
loss = F.cross_entropy(scores, target, reduction="mean")
else:
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
# TODO: add parameters?
loss = loss_fct(scores, label_input)
return loss, scores
def to_bert_input(token_idx, null_idx):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
4,452 |
add arguments
|
import logging
import math
import threading
from django.core.management.base import BaseCommand
from django.db.models import OuterRef, Subquery, F, Q
from django.db import IntegrityError
from squad.core.models import SuiteMetadata, Test
from squad.core.utils import split_list
correct_metadata = SuiteMetadata.objects.filter(kind='test', suite=OuterRef('suite__slug'), name=OuterRef('metadata__name'))
first_test = Test.objects.order_by().filter(metadata=OuterRef('pk')).annotate(correct_metadata=Subquery(correct_metadata.values('id')[:1]))
correct_slug = Test.objects.order_by().filter(metadata=OuterRef('pk')).annotate(suite_slug=F('suite__slug'))
annotations = {
'correct_metadata_id': Subquery(first_test.values('correct_metadata')[:1]),
'correct_suite_slug': Subquery(correct_slug.values('suite_slug')[:1])
}
buggy_ones = Q(suite__startswith='armeabi') | Q(suite__startswith='arm64')
logger = logging.getLogger()
STEP = 1000
class SuiteMetadataFixThread(threading.Thread):
def __init__(self, thread_id, suitemetadata_ids, show_progress=False):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.suitemetadata_ids = suitemetadata_ids
self.show_progress = show_progress
def run(self):
count = len(self.suitemetadata_ids)
logger.info('[thread-%s] processing %d suitemetadata' % (self.thread_id, count))
orphan_metadata = []
for offset in range(0, count, STEP):
ids = self.suitemetadata_ids[offset:offset + STEP]
for metadata in SuiteMetadata.objects.filter(id__in=ids).annotate(**annotations).all():
# It means there's no SuiteMetadata with fixed suite, so it's safe to change it in place
if metadata.correct_metadata_id is None:
if metadata.correct_suite_slug is None:
orphan_metadata.append(metadata.id)
else:
try:
metadata.suite = metadata.correct_suite_slug
metadata.save()
except IntegrityError:
logger.error('There appears to have a fixed suite metadata already')
logger.error('This was not supposed to happen though, check these cases carefuly')
logger.error('SuiteMetadata (id: %d, kind=test, suite="%s", name="%s")' % (metadata.id, metadata.suite, metadata.name))
return
# It means there's a correct one, so just update tests
else:
Test.objects.order_by().filter(metadata=metadata).update(metadata_id=metadata.correct_metadata_id)
# It's safe to delete buggy metadata now
orphan_metadata.append(metadata.id)
if self.show_progress:
print('.', end='', flush=True)
if len(orphan_metadata) > 0:
logger.info('Deleting %d orphan metadata objects' % len(orphan_metadata))
chunks = split_list(orphan_metadata, chunk_size=10000)
for chunk in chunks:
SuiteMetadata.objects.filter(id__in=chunk).delete()
logger.info('[thread-%s] done updating' % self.thread_id)
class Command(BaseCommand):
help = """helper that fixes buggy SuiteMetadata objects"""
def METHOD_NAME(self, parser):
parser.add_argument(
'--show-progress',
action='store_true',
help='Prints out one dot every 1000 (one thousand) metadata processed'
)
parser.add_argument(
'--num-threads',
type=int,
default=2,
help='Number of simultaneous parallel threads to work'
)
def handle(self, *args, **options):
show_progress = options['show_progress']
num_threads = options['num_threads']
logger.info('Discovering number of metadata that need work...')
count = int(SuiteMetadata.objects.filter(buggy_ones).count())
if count == 0:
logger.info('Nothing to do!')
return
logger.info('Working on %d metadatas' % count)
metadata_ids = SuiteMetadata.objects.filter(buggy_ones).order_by('-id').values_list('id', flat=True)
chunk_size = math.floor(len(metadata_ids) / num_threads) + 1
chunks = split_list(metadata_ids, chunk_size=chunk_size)
threads = []
for chunk in chunks:
thread_id = len(threads)
thread = SuiteMetadataFixThread(thread_id, chunk, show_progress=show_progress)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Done updating')
# Check that everything worked as expected
count = int(SuiteMetadata.objects.filter(buggy_ones).count())
if count > 0:
logger.error('Something went wrong! %d metadata are still buggy' % count)
return
logger.info('Done!')
|
4,453 |
build model
|
#SPDX-License-Identifier: MIT
## Added imports
import re
import unicodedata
import nltk
import string
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from datetime import date
##
import logging
import multiprocessing
import os
import traceback
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from keras.layers import Dense, Input
from keras.models import Model, load_model
from scipy.spatial.distance import cosine
from skimage.filters import threshold_otsu
from sklearn import utils as skl_utils
from augur.tasks.data_analysis.message_insights.preprocess_text import \
normalize_corpus as normalize_corpus
ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
train_path = os.path.join(ROOT_AUGUR_DIRECTORY, "tasks", "data_analysis", "message_insights", "train_data")
# ''' Doc2Vec model training
def METHOD_NAME(max_epochs, vec_size, alpha, tag_data):
model = Doc2Vec(vector_size=vec_size, alpha=alpha,min_alpha=0.00025, min_count=2, dm=1)
model.build_vocab(tag_data)
for epoch in range(max_epochs):
model.train(skl_utils.shuffle(tag_data),
total_examples=model.corpus_count,
epochs=model.epochs)
model.alpha -= 0.0002
model.min_alpha = model.alpha
#Doc2Vec.load(os.path.join(train_path,"doc2vec.model"))
model.save(os.path.join(train_path,"doc2vec.model"))
#logger.info("Model Saved")
return model
# '''
def autoencoder(vec_input, train):
input_dim = Input(shape = (vec_input, ))
encoded1 = Dense(vec_input//2, activation='sigmoid')(input_dim)
encoded2 = Dense(1, activation='sigmoid')(encoded1)
# Decoder Layers
decoded1 = Dense(vec_input//2, activation='tanh')(encoded2)
decoded2 = Dense(vec_input, activation='tanh')(decoded1)
# Combine Encoder and Deocder layers
model = Model(inputs = input_dim, outputs = decoded2)
# Compile the Model
model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mean_squared_error"])
model.fit(train, train, epochs = 60)
return model
def reconstruction(pred, val):
rec_error = []
for i in range(len(pred)):
rec_error.append(np.linalg.norm(pred[i] - val[i]))
rec_error = np.array(rec_error)
return rec_error
def get_normal_data(rec_error, val):
# otsu thresholding corresponding to the maximum value of between two class variances
threshold = threshold_otsu(rec_error)
normals = []
for i in range(len(rec_error)):
if rec_error[i] < threshold:
normals.append(val[i])
normals = np.array(normals)
return threshold, normals
''' Cosine similarity based novel detection
def key_cosine_similarity(tupple):
return tupple[1]
def get_computed_similarities(df_present, vectors, predicted_vectors, reverse=False):
data_size = len(df_present)
cosine_similarities = []
cosine_sim_values = []
for i in range(data_size):
cosine_sim_val = (1 - cosine(vectors[i], predicted_vectors[i]))
cosine_similarities.append((df_present['msg_id'].iloc[i], cosine_sim_val))
cosine_sim_values.append(cosine_sim_val)
df_present['uniqueness_score'] = cosine_sim_values
return df_present, sorted(cosine_similarities, key=key_cosine_similarity, reverse=reverse)
def display_unique(sorted_cosine_similarities):
i=0
unique_message_list=[]
cos_val = []
index, cosine_sim_val = sorted_cosine_similarities[0]
while cosine_sim_val<=-0.1:
if cosine_sim_val not in cos_val:
unique_message_list.append(index)
cos_val.append(cosine_sim_val)
print('Message id: ', index)
print('Cosine Sim Val :', cosine_sim_val)
i+=1
index, cosine_sim_val = sorted_cosine_similarities[i]
return unique_message_list
'''
def novelty_analysis(df_message, r_id, models_dir, full_train=True):
# Normlize text corpus
df_message['cleaned_msg_text'] = df_message['msg_text'].map(lambda x: normalize_corpus(x))
#logger.info('Normalized text corpus')
# Load pretrained Doc2Vec model
#logger.info(f'train path is: {train_path}')
#################
# building model ... need tag data
df_x = pd.DataFrame(df_message['cleaned_msg_text'])
tag_data = [TaggedDocument(str(row['cleaned_msg_text']).split(), [index]) for index, row in df_x.iterrows()]
# print(tag_data)
model = METHOD_NAME(max_epochs=100, vec_size=300, alpha=0.01, tag_data=tag_data)
today=datetime.today()
timer = today - timedelta(days=45)
timerstr = timer.strftime('%Y-%m-%d')
df_past = df_message[df_message['msg_timestamp'].astype(str)< timerstr]
df_present = df_message[df_message['msg_timestamp'].astype(str)>= timerstr]
doc2vec_vectors = np.array([model.infer_vector(str(row['cleaned_msg_text']).split())for index, row in df_past.iterrows()])
#####################
#####################
dvmodel = METHOD_NAME(max_epochs=100, vec_size=300, alpha=0.01, tag_data=tag_data)
dvmodel.save(f'{models_dir}/doc2vec.model')
d2v_model = Doc2Vec.load(os.path.join(train_path,"doc2vec.model"))
doc2vec_vectors = np.array([d2v_model.infer_vector(str(row['cleaned_msg_text']).split())for index, row in df_message.iterrows()])
#logger.info('Doc2Vec vectorization done')
encoder_length=len(doc2vec_vectors)
####################
# Trains the AE model when worker runs first time
if full_train:
# First autoencoder to identify normal data records
ae1 = autoencoder(300, doc2vec_vectors)
#logger.info('AE 1 training done')
pred_train = ae1.predict(doc2vec_vectors)
_rec_error1 = reconstruction(pred_train, doc2vec_vectors)
_, normal_data = get_normal_data(_rec_error1, doc2vec_vectors)
# Second autoencoder to decide threshold using otsu
ae = autoencoder(300, normal_data)
#logger.info('AE 2 training done')
predicted_vectors = ae.predict(doc2vec_vectors)
rec_error = reconstruction(predicted_vectors, doc2vec_vectors)
threshold, _ = get_normal_data(rec_error, doc2vec_vectors)
# Save final model for future
ae.save(f'{models_dir}/{r_id}_uniq.h5')
# Pretrained AE model already exists, directly predict
else:
threshold = 0
ae = load_model(f'{models_dir}/{r_id}_uniq.h5')
#logger.info('Loaded pretrained AE model for repo')
# Fitting on present data
predicted_vectors_test = ae.predict(doc2vec_vectors)
rec_error = reconstruction(predicted_vectors_test, doc2vec_vectors)
return (threshold, np.array(rec_error))
|
4,454 |
test grid combo savewasm
|
# LICENSE
#
# _This file is Copyright 2018 by the Image Processing and Analysis Group (BioImage Suite Team). Dept. of Radiology & Biomedical Imaging, Yale School of Medicine._
#
# BioImage Suite Web is licensed under the Apache License, Version 2.0 (the "License");
#
# - you may not use this software except in compliance with the License.
# - You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
#
# __Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.__
#
# ENDLICENSE
import os
import sys
import numpy as np
import unittest
my_path=os.path.dirname(os.path.realpath(__file__));
sys.path.insert(0,os.path.abspath(my_path+'/../'));
import biswebpython.core.bis_objects as bis
import biswebpython.core.bis_baseutils as bis_baseutils;
libbiswasm=bis_baseutils.getDynamicLibraryWrapper();
class TestTensorGrid(unittest.TestCase):
def setUp(self):
print(' --------------------------------------------------')
self.imgnames = [ 'MNI_2mm_orig.nii.gz',
'MNI_2mm_scaled.nii.gz',
'MNI_6mm.nii.gz',
'MNI_6mm_scaleddispfield.nii.gz'
];
self.images = [0,0,0,0];
for i in range(0,4):
name=my_path+'/../test/testdata/'+self.imgnames[i];
self.images[i]=bis.bisImage().load(name)
print('__ loaded image',i, 'from ', name,'dims=',self.images[i].dimensions,self.images[i].spacing,self.images[i].dimensions,self.images[i].get_data().dtype);
print('----------------------------------------------------------')
def test_grid_combo(self):
print(' --------------------------------------------------')
print('test_grid/combo');
filename=os.path.abspath(my_path+"/../test/testdata/MNI_2mm_scaled.grd");
combo=bis.bisComboTransformation();
combo.load(filename);
bsplinegrid=combo.grids[0];
pt370=[ -1.5743, -0.0616, -1.1677 ];
g=bsplinegrid.get_data();
n=bsplinegrid.getNumberOfControlPoints();
disp = [ g[370],g[370+n],g[370+2*n]];
error=0.0;
print('pt370=',pt370,' disp=',disp);
for k in range(0,3):
error+=abs(disp[k]-pt370[k]);
print("++++ checking bspline grid loading error0=",error);
obj = { "dimensions" : self.images[2].dimensions,
"spacing" : self.images[2].spacing,
};
wasm_out_g=libbiswasm.computeDisplacementFieldWASM(combo.grids[0],obj,1);
wasm_out_c=libbiswasm.computeDisplacementFieldWASM(combo,obj,1);
print('wasm_out_g=',wasm_out_g.dimensions);
print('wasm_out_c=',wasm_out_c.dimensions);
print('gold=',self.images[3].dimensions);
x=wasm_out_g.get_data()-self.images[3].get_data();
err_g=max(-x.min(), x.max());
x=wasm_out_c.get_data()-self.images[3].get_data();
err_c=max(-x.min(), x.max());
x=wasm_out_c.get_data()-wasm_out_g.get_data();
err_cg=max(-x.min(), x.max());
print('error: grid=',err_g, 'combo=', err_c,' combo-grid=',err_cg);
success=False;
if err_g<0.01 and err_c<0.01 and err_cg<0.01 and error<0.001:
success=True;
self.assertEqual(success,True);
def test_grid_combo_loadwasm(self):
print(' --------------------------------------------------')
print('test_grid/combo load wasm');
filename=os.path.abspath(my_path+"/../test/testdata/complex.grd");
with open(filename, 'r') as file:
text=file.read()
print('read file length=',len(text));
data = [ [ 20, 0.5250, 1.5128, 0.2732 ],
[ 47, -0.6805, 1.3356, 0.6628 ],
[ 9850, -0.3057, -1.4673, -0.1346 ]];
linear = np.array([
[ 0.999, -0.044, -0.021, 2.691 ],
[ 0.045, 0.998, 0.035, -0.860 ],
[ 0.020, -0.036, 0.999, 0.552 ],
[ 0.000, 0.000, 0.000, 1.000 ]]);
combo=libbiswasm.parseComboTransformTextFileWASM(text,0);
dl=combo.linear.data_array.flatten()-linear.flatten();
error=max(np.amax(dl),-np.amin(dl));
print('abs max error of linear=',error);
bsplinegrid=combo.grids[0];
g=bsplinegrid.get_data();
n=bsplinegrid.getNumberOfControlPoints();
print('lendata=',len(data));
error=0.0;
for i in range(0,len(data)):
cp=int(data[i][0]);
i_data = [ data[i][1],data[i][2],data[i][3] ];
o_data = [ g[cp],g[cp+n],g[cp+2*n]];
error=0.0;
print('pt',cp,' gold=',i_data,' grd=',o_data);
for k in range(0,3):
error+=abs(i_data[k]-o_data[k]);
print("++++ checking bspline grid loading error0=",error);
success=False;
if (error<0.001):
success=True;
self.assertEqual(success,True);
def METHOD_NAME(self):
filename=os.path.abspath(my_path+"/../test/testdata/complex.grd");
with open(filename, 'r') as file:
text=file.read()
print('=====================================================');
print('read file length=',len(text));
combo=bis.bisComboTransformation();
combo.load(filename);
s=libbiswasm.createComboTransformationTextFileWASM(combo);
combo2=libbiswasm.parseComboTransformTextFileWASM(s);
bsplinegrid=combo.grids[0];
bsplinegrid2=combo2.grids[0];
n=bsplinegrid.getNumberOfControlPoints();
grid_data1=bsplinegrid.get_data();
grid_data2=bsplinegrid2.get_data();
error=0.0;
data = [ 5,77,104,2100 , 5747 ];
for i in range(0,len(data)):
cp=int(data[i]);
i_data = [ grid_data2[cp],grid_data2[cp+n],grid_data2[cp+2*n]];
o_data = [ grid_data1[cp],grid_data1[cp+n],grid_data1[cp+2*n]];
error=0.0;
print('pt',cp,' gold=',i_data,' grd=',o_data);
for k in range(0,3):
error+=abs(i_data[k]-o_data[k]);
print("++++ checking bspline grid loading serializing and deserializing error0=",error);
success=False;
if (error<0.001):
success=True;
self.assertEqual(success,True);
if __name__ == '__main__':
TestTensorGrid().main()
|
4,455 |
send2trash
|
# Copyright 2017 Virgil Dupras
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
from __future__ import unicode_literals
import errno
import sys
import os
import os.path as op
from datetime import datetime
import stat
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from .compat import text_type, environb
from .exceptions import TrashPermissionError
try:
fsencode = os.fsencode # Python 3
fsdecode = os.fsdecode
except AttributeError:
def fsencode(u): # Python 2
return u.encode(sys.getfilesystemencoding())
def fsdecode(b):
return b.decode(sys.getfilesystemencoding())
# The Python 3 versions are a bit smarter, handling surrogate escapes,
# but these should work in most cases.
FILES_DIR = b'files'
INFO_DIR = b'info'
INFO_SUFFIX = b'.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = op.expanduser(environb.get(b'XDG_DATA_HOME', b'~/.local/share'))
HOMETRASH_B = op.join(XDG_DATA_HOME, b'Trash')
HOMETRASH = fsdecode(HOMETRASH_B)
uid = os.getuid()
TOPDIR_TRASH = b'.Trash'
TOPDIR_FALLBACK = b'.Trash-' + text_type(uid).encode('ascii')
def is_parent(parent, path):
path = op.realpath(path) # In case it's a symlink
if isinstance(path, text_type):
path = fsencode(path)
parent = op.realpath(parent)
if isinstance(parent, text_type):
parent = fsencode(parent)
return path.startswith(parent)
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".." directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir is None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + quote(src) + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = base_name + b' ' + text_type(counter).encode('ascii') + ext
check_create(filespath)
check_create(infopath)
os.rename(src, op.join(filespath, destname))
f = open(op.join(infopath, destname + INFO_SUFFIX), 'w')
f.write(info_for(src, topdir))
f.close()
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
# Use realpath in case it's a symlink
path = op.realpath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, text_type(uid).encode('ascii'))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we lack permission, raise TrashPermissionError
try:
check_create(trash_dir)
except OSError as e:
if e.errno == errno.EACCES:
raise TrashPermissionError(e.filename)
raise
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir is None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
# Pull this out so it's easy to stub (to avoid stubbing lstat itself)
def get_dev(path):
return os.lstat(path).st_dev
def METHOD_NAME(path):
if isinstance(path, text_type):
path_b = fsencode(path)
elif isinstance(path, bytes):
path_b = path
elif hasattr(path, '__fspath__'):
# Python 3.6 PathLike protocol
return METHOD_NAME(path.__fspath__())
else:
raise TypeError('str, bytes or PathLike expected, not %r' % type(path))
if not op.exists(path_b):
raise OSError("File not found: %s" % path)
# ...should check whether the user has the necessary permissions to delete
# it, before starting the trashing operation itself. [2]
if not os.access(path_b, os.W_OK):
raise OSError("Permission denied: %s" % path)
# if the file to be trashed is on the same device as HOMETRASH we
# want to move it there.
path_dev = get_dev(path_b)
# If XDG_DATA_HOME or HOMETRASH do not yet exist we need to stat the
# home directory, and these paths will be created further on if needed.
trash_dev = get_dev(op.expanduser(b'~'))
if path_dev == trash_dev:
topdir = XDG_DATA_HOME
dest_trash = HOMETRASH_B
else:
topdir = find_mount_point(path_b)
trash_dev = get_dev(topdir)
if trash_dev != path_dev:
raise OSError("Couldn't find mount point for %s" % path)
dest_trash = find_ext_volume_trash(topdir)
trash_move(path_b, dest_trash, topdir)
|
4,456 |
test nonnull lang
|
import unittest
from email import _encoded_words as _ew
from email import errors
from test.test_email import TestEmailBase
class TestDecodeQ(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_q(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_no_encoded(self):
self._test(b'foobar', b'foobar')
def test_spaces(self):
self._test(b'foo=20bar=20', b'foo bar ')
self._test(b'foo_bar_', b'foo bar ')
def test_run_of_encoded(self):
self._test(b'foo=20=20=21=2Cbar', b'foo !,bar')
class TestDecodeB(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_b(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_simple(self):
self._test(b'Zm9v', b'foo')
def test_missing_padding(self):
self._test(b'dmk', b'vi', [errors.InvalidBase64PaddingDefect])
def test_invalid_character(self):
self._test(b'dm\x01k===', b'vi', [errors.InvalidBase64CharactersDefect])
def test_invalid_character_and_bad_padding(self):
self._test(b'dm\x01k', b'vi', [errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
class TestDecode(TestEmailBase):
def test_wrong_format_input_raises(self):
with self.assertRaises(ValueError):
_ew.decode('=?badone?=')
with self.assertRaises(ValueError):
_ew.decode('=?')
with self.assertRaises(ValueError):
_ew.decode('')
def _test(self, source, result, charset='us-ascii', lang='', defects=[]):
res, char, l, d = _ew.decode(source)
self.assertEqual(res, result)
self.assertEqual(char, charset)
self.assertEqual(l, lang)
self.assertDefectsEqual(d, defects)
def test_simple_q(self):
self._test('=?us-ascii?q?foo?=', 'foo')
def test_simple_b(self):
self._test('=?us-ascii?b?dmk=?=', 'vi')
def test_q_case_ignored(self):
self._test('=?us-ascii?Q?foo?=', 'foo')
def test_b_case_ignored(self):
self._test('=?us-ascii?B?dmk=?=', 'vi')
def test_non_trivial_q(self):
self._test('=?latin-1?q?=20F=fcr=20Elise=20?=', ' Für Elise ', 'latin-1')
def test_q_escaped_bytes_preserved(self):
self._test(b'=?us-ascii?q?=20\xACfoo?='.decode('us-ascii',
'surrogateescape'),
' \uDCACfoo',
defects = [errors.UndecodableBytesDefect])
def test_b_undecodable_bytes_ignored_with_defect(self):
self._test(b'=?us-ascii?b?dm\xACk?='.decode('us-ascii',
'surrogateescape'),
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_invalid_bytes_ignored_with_defect(self):
self._test('=?us-ascii?b?dm\x01k===?=',
'vi',
defects = [errors.InvalidBase64CharactersDefect])
def test_b_invalid_bytes_incorrect_padding(self):
self._test('=?us-ascii?b?dm\x01k?=',
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_padding_defect(self):
self._test('=?us-ascii?b?dmk?=',
'vi',
defects = [errors.InvalidBase64PaddingDefect])
def METHOD_NAME(self):
self._test('=?us-ascii*jive?q?test?=', 'test', lang='jive')
def test_unknown_8bit_charset(self):
self._test('=?unknown-8bit?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'unknown-8bit',
defects = [])
def test_unknown_charset(self):
self._test('=?foobar?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'foobar',
# XXX Should this be a new Defect instead?
defects = [errors.CharsetError])
def test_q_nonascii(self):
self._test('=?utf-8?q?=C3=89ric?=',
'Éric',
charset='utf-8')
class TestEncodeQ(TestEmailBase):
def _test(self, src, expected):
self.assertEqual(_ew.encode_q(src), expected)
def test_all_safe(self):
self._test(b'foobar', 'foobar')
def test_spaces(self):
self._test(b'foo bar ', 'foo_bar_')
def test_run_of_encodables(self):
self._test(b'foo ,,bar', 'foo__=2C=2Cbar')
class TestEncodeB(TestEmailBase):
def test_simple(self):
self.assertEqual(_ew.encode_b(b'foo'), 'Zm9v')
def test_padding(self):
self.assertEqual(_ew.encode_b(b'vi'), 'dmk=')
class TestEncode(TestEmailBase):
def test_q(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'q'), '=?utf-8?q?foo?=')
def test_b(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'b'), '=?utf-8?b?Zm9v?=')
def test_auto_q(self):
self.assertEqual(_ew.encode('foo', 'utf-8'), '=?utf-8?q?foo?=')
def test_auto_q_if_short_mostly_safe(self):
self.assertEqual(_ew.encode('vi.', 'utf-8'), '=?utf-8?q?vi=2E?=')
def test_auto_b_if_enough_unsafe(self):
self.assertEqual(_ew.encode('.....', 'utf-8'), '=?utf-8?b?Li4uLi4=?=')
def test_auto_b_if_long_unsafe(self):
self.assertEqual(_ew.encode('vi.vi.vi.vi.vi.', 'utf-8'),
'=?utf-8?b?dmkudmkudmkudmkudmku?=')
def test_auto_q_if_long_mostly_safe(self):
self.assertEqual(_ew.encode('vi vi vi.vi ', 'utf-8'),
'=?utf-8?q?vi_vi_vi=2Evi_?=')
def test_utf8_default(self):
self.assertEqual(_ew.encode('foo'), '=?utf-8?q?foo?=')
def test_lang(self):
self.assertEqual(_ew.encode('foo', lang='jive'), '=?utf-8*jive?q?foo?=')
def test_unknown_8bit(self):
self.assertEqual(_ew.encode('foo\uDCACbar', charset='unknown-8bit'),
'=?unknown-8bit?q?foo=ACbar?=')
if __name__ == '__main__':
unittest.main()
|
4,457 |
get default device assignment
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""XLA LocalClient interface for interacting with TPUs via the TPU driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from tensorflow.compiler.xla.python import xla_client
from tensorflow.compiler.xla.python import xla_extension as _xla
from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client
class TpuBackend(xla_client.Backend):
"""XLA backend implemented using the Tpu driver API."""
# Cache the backends to prevent double driver initializations.
_local_backend = None
def __init__(self, client):
"""Creates a new TpuBackend.
Args:
client: A _tpu_client.TpuClient object.
"""
super(TpuBackend, self).__init__('tpu')
self.client = client
@staticmethod
def create(worker=None, force=False):
# `force` == True will skip caching any backends (if applicable) and will
# always try to create a new client.
if worker is None:
raise ValueError(
'Failed to create TpuBackend. The `worker` parameter must not be '
'`None`. Use `local` to connect to a local TPU or '
'`grpc://host:port` to connect to a remote TPU.')
if worker == 'local' or 'local://' in worker:
# We usually want to cache for local backends to prevent double
# initialization, except where `force` == True.
if worker == 'local':
worker = 'local://'
if force:
return TpuBackend(_tpu_client.TpuClient.Get(worker))
if TpuBackend._local_backend is None:
logging.info('Starting the local TPU driver.')
TpuBackend._local_backend = TpuBackend(
_tpu_client.TpuClient.Get(worker))
return TpuBackend._local_backend
else:
# We do not cache for non-local backends.
return TpuBackend(_tpu_client.TpuClient.Get(worker))
def device_count(self):
return self.client.device_count()
def local_device_count(self):
return self.client.local_device_count()
def local_devices(self):
return self.client.local_devices()
def devices(self):
return self.client.devices()
def host_id(self):
return self.client.host_id()
def buffer_from_pyval(self, pyval, device=None, force_copy=False):
if device is None:
device = self.client.local_devices()[0]
return _tpu_client.PyTpuBuffer.from_python(pyval, self.client, device)
def make_tuple(self, c_buffers, device):
return _tpu_client.PyTpuBuffer.make_tuple(c_buffers, self.client, device)
def compile(self, c_computation, compile_options):
options = _xla.ExecutableBuildOptions()
options.num_replicas = compile_options.num_replicas
options.num_partitions = compile_options.num_partitions
if compile_options.result_layout:
options.result_layout = compile_options.result_layout
options.debug_options.xla_cpu_fast_math_honor_infs = True
options.debug_options.xla_cpu_fast_math_honor_nans = True
options.debug_options.xla_cpu_fast_math_honor_division = True
options.debug_options.xla_cpu_fast_math_honor_functions = True
options.debug_options.xla_gpu_enable_fast_min_max = False
return _tpu_client.TpuExecutable.Compile(c_computation,
compile_options.argument_layouts,
options, self.client,
compile_options.device_assignment)
def METHOD_NAME(self, num_replicas, num_partitions=None):
if num_partitions is not None:
return self.client.GetDefaultDeviceAssignment(num_replicas,
num_partitions)
else:
# TODO(henrytan): delete this case after all callers can handle 2D output
return self.client.GetDefaultDeviceAssignment(num_replicas)
def serialize(self, executable):
return self.client.SerializeExecutable(executable)
def deserialize(self, serialized_executable):
return self.client.DeserializeExecutable(serialized_executable, self.client)
|
4,458 |
parse
|
import functools
from collections.abc import Mapping
from typing import Any, Callable, Optional
from flask.views import MethodView
from marshmallow import Schema, exceptions as ma_exceptions
from webargs.core import (
_UNKNOWN_DEFAULT_PARAM,
ArgMap,
Request,
ValidateArg,
_ensure_list_of_callables,
)
from webargs.flaskparser import FlaskParser
class ResourceReadingParser(FlaskParser):
"""A version of FlaskParser that can access the resource object it decorates"""
def use_args(
self,
argmap: ArgMap,
req: Optional[Request] = None,
*,
location: Optional[str] = None,
unknown: Optional[str] = _UNKNOWN_DEFAULT_PARAM, # pylint: disable=unused-argument
as_kwargs: bool = False,
validate: Optional[ValidateArg] = None,
error_status_code: Optional[int] = None,
error_headers: Optional[Mapping[str, str]] = None,
) -> Callable:
"""Decorator that injects parsed arguments into a view function or method.
Edited from core parser to include the resource object
"""
location = location or self.location
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, Mapping):
argmap = Schema.from_dict(argmap)()
def decorator(func: Callable) -> Callable:
req_ = request_obj
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Callable:
req_obj = req_
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs) # pylint: disable=assignment-from-none # noqa: E501
# NOTE: At this point, argmap may be a Schema, or a callable
parsed_args = self.METHOD_NAME(
args[0], # This should be the self of the resource object
argmap,
req=req_obj,
location=location,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
args, kwargs = self._update_args_kwargs( # type: ignore
args, kwargs, parsed_args, as_kwargs, # type: ignore
)
return func(*args, **kwargs)
wrapper.__wrapped__ = func # type: ignore
return wrapper
return decorator
def METHOD_NAME( # type: ignore # we have added the resource_object on top of parse
self,
resource_object: MethodView,
argmap: ArgMap,
req: Optional[Request] = None,
*,
location: Optional[str] = None,
unknown: Optional[str] = _UNKNOWN_DEFAULT_PARAM, # pylint: disable=unused-argument
validate: Optional[ValidateArg] = None,
error_status_code: Optional[int] = None,
error_headers: Optional[Mapping[str, str]] = None,
) -> dict[str, Any]:
"""Main request parsing method.
Different from core parser is that we also get the resource object and
pass it to the schema
"""
req = req if req is not None else self.get_default_request() # type: ignore
location = location or self.location
if req is None:
raise ValueError('Must pass req object')
data = None
validators = _ensure_list_of_callables(validate)
schema = self._get_schema(argmap, resource_object)
try:
location_data = self._load_location_data(
schema=schema, req=req, location=location,
)
data = schema.load(location_data)
self._validate_arguments(data, validators)
except ma_exceptions.ValidationError as error:
self._on_validation_error(
error,
req,
schema,
location,
error_status_code=error_status_code,
error_headers=error_headers,
)
return data
def _get_schema(
self,
argmap: ArgMap,
resource_object: Request,
) -> Schema:
"""Override the behaviour of the standard parser.
Initialize Schema with a callable that gets the resource object as argument"""
assert callable(argmap), 'Should only use this parser with a callable'
schema = argmap(resource_object)
return schema
class IgnoreKwargAfterPostLoadParser(FlaskParser):
"""A version of FlaskParser that does not augment with kwarg arguments after post_load"""
@staticmethod
def _update_args_kwargs( # type: ignore
args: tuple,
kwargs: dict[str, Any],
parsed_args: Mapping,
as_kwargs: bool,
) -> tuple[tuple, Mapping]:
return args, parsed_args
resource_parser = ResourceReadingParser()
ignore_kwarg_parser = IgnoreKwargAfterPostLoadParser()
|
4,459 |
test arm composite vld
|
import platform
import unittest
import gtirb
import snippets
class DataAccessTests(unittest.TestCase):
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def test_x86_simple(self):
module = snippets.asm_to_gtirb(
"""
.access:
movl .data0(%rip), %eax
jmp .end
.data0:
.long 0
.end:
"""
)
accesses = snippets.parse_souffle_output(
module, "arch.simple_data_load"
)
self.assertIn(
(
next(module.symbols_named(".access")).referent.address,
next(module.symbols_named(".data0")).referent.address,
4,
),
accesses,
)
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def test_x86_composite(self):
module = snippets.asm_to_gtirb(
"""
.ref:
leaq .data0(%rip), %rax
.load:
mov (.data1 - .data0)(%rax), %eax
jmp .end
.data0:
.long 0
.data1:
.long 0
.end:
"""
)
accesses = snippets.parse_souffle_output(
module, "composite_data_access"
)
self.assertIn(
(
next(module.symbols_named(".ref")).referent.address,
next(module.symbols_named(".load")).referent.address,
next(module.symbols_named(".data1")).referent.address,
4,
),
accesses,
)
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def test_arm_simple(self):
module = snippets.asm_to_gtirb(
"""
.access:
ldr r0, .data0
.data0:
.long 0
.end:
""",
arch=gtirb.Module.ISA.ARM,
)
accesses = snippets.parse_souffle_output(
module, "arch.simple_data_load"
)
self.assertIn(
(
next(module.symbols_named(".access")).referent.address,
next(module.symbols_named(".data0")).referent.address,
4,
),
accesses,
)
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def test_arm_composite_ldr(self):
module = snippets.asm_to_gtirb(
"""
.ref:
adr r0, .data0
.load:
ldr r0, [r0, #.data1-.data0]
.data0:
.long 0
.data1:
.long 0
.end:
""",
arch=gtirb.Module.ISA.ARM,
)
accesses = snippets.parse_souffle_output(
module, "composite_data_access"
)
self.assertIn(
(
next(module.symbols_named(".ref")).referent.address,
next(module.symbols_named(".load")).referent.address,
next(module.symbols_named(".data1")).referent.address,
4,
),
accesses,
)
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def test_arm_composite_ldm(self):
module = snippets.asm_to_gtirb(
"""
.ref:
adr r0, .data0
.load:
ldm r0, {r0, r1, r2}
.data0:
.long 0
.long 1
.long 2
.end:
""",
arch=gtirb.Module.ISA.ARM,
)
accesses = snippets.parse_souffle_output(
module, "composite_data_access"
)
self.assertIn(
(
next(module.symbols_named(".ref")).referent.address,
next(module.symbols_named(".load")).referent.address,
next(module.symbols_named(".data0")).referent.address,
12,
),
accesses,
)
@unittest.skipUnless(
platform.system() == "Linux", "This test is linux only."
)
def METHOD_NAME(self):
module = snippets.asm_to_gtirb(
"""
.ref:
adr r0, .data0
.load:
vld1.8 {d0}, [r0]
b .end
.data0:
.byte 0
.align 2
.end:
""",
arch=gtirb.Module.ISA.ARM,
)
accesses = snippets.parse_souffle_output(
module, "composite_data_access"
)
self.assertIn(
(
next(module.symbols_named(".ref")).referent.address,
next(module.symbols_named(".load")).referent.address,
next(module.symbols_named(".data0")).referent.address,
8,
),
accesses,
)
|
4,460 |
refresh token grant
|
from datetime import timedelta
from typing import Union
from uuid import UUID, uuid4
import sqlalchemy as sa
import sqlalchemy.orm as so
from flask import Response, jsonify, request
from werkzeug.security import check_password_hash
import db.models as dbm
from common import getnow
from db import dbsession
from errors.oauth2 import InvalidGrant, InvalidRequest, UnsupportedGrantType
from utils.check import raise_if, raise_if_none
from utils.token import LoadedAccessToken
class OAuth2:
def __call__(self):
"""Handles OAuth2 authentication"""
# get grant_type
if request.is_json:
grant_type = request.json.get("grant_type")
elif request.mimetype == "application/x-www-form-urlencoded":
grant_type = request.form.get("grant_type")
else:
grant_type = request.headers.get("grant_type")
if grant_type == "password":
# password grant
if request.is_json:
username = request.json.get("username")
password = request.json.get("password")
elif request.mimetype == "application/x-www-form-urlencoded":
username = request.form.get("username")
password = request.form.get("password")
else:
username = request.headers.get("username")
password = request.headers.get("password")
raise_if_none(
username,
InvalidRequest,
'Request was missing the "username" parameter.',
)
raise_if_none(
password,
InvalidRequest,
'Request was missing the "password" parameter.',
)
return self.password_grant(username, password)
if grant_type == "refresh_token":
# refresh token grant
if request.is_json:
refresh_token = request.json.get("refresh_token")
elif request.mimetype == "application/x-www-form-urlencoded":
refresh_token = request.form.get("refresh_token")
else:
refresh_token = request.headers.get("refresh_token")
raise_if_none(
refresh_token,
InvalidRequest,
'Request was missing the "refresh_token" parameter.',
)
try:
refresh_token = UUID(refresh_token)
except ValueError:
raise InvalidGrant("Refresh token is invalid.")
return self.METHOD_NAME(refresh_token)
# unknown grant
raise UnsupportedGrantType(
"{} is not a supported grant type.".format(grant_type)
)
@staticmethod
@dbsession
def password_grant(username: str, password: str, session: so.Session):
"""Implements logic for password grant."""
orm_user = dbm.User.get(
session, username, InvalidGrant, "Username or password is invalid."
)
# check password is valid
is_valid = check_password_hash(orm_user.password_hash, password)
raise_if(not is_valid, InvalidGrant, "Username or password is invalid.")
# generate token
access_token = LoadedAccessToken(
orm_user.id, orm_user.username, orm_user.scope or {}
).encode()
refresh_token = OAuth2.generate_refresh_token(orm_user.id, session)
return OAuth2.success_response(access_token, refresh_token)
@staticmethod
@dbsession
def METHOD_NAME(old_refresh_token: UUID, session: so.Session):
"""Implements logic for refresh token grant."""
# check token exists in database and get expire time and user id
old_token_document = session.execute(
sa.select(dbm.Refreshtoken).where(
dbm.Refreshtoken.token == old_refresh_token
)
).scalar_one_or_none()
raise_if_none(old_token_document, InvalidGrant, "Refresh token is invalid.")
# check token is not expired
expire_time = old_token_document.expire_time
raise_if(expire_time < getnow(), InvalidGrant, "Refresh token is expired.")
# check user exists
orm_user = old_token_document.user
dbm.User.check(orm_user, InvalidGrant, "Refresh token is invalid.")
# generate token
access_token = LoadedAccessToken(
orm_user.id, orm_user.username, orm_user.scope or {}
).encode()
refresh_token = OAuth2.generate_refresh_token(orm_user.id, session)
# delete old refresh token from database
session.delete(old_token_document)
session.execute(
sa.delete(dbm.Refreshtoken).where(dbm.Refreshtoken.expire_time < getnow())
)
return OAuth2.success_response(access_token, refresh_token)
@staticmethod
def generate_refresh_token(user_id: UUID, session: so.Session) -> UUID:
"""Generate and store refresh token in database.
:param user_id: id of user to associate the refresh token with
:return: a UUID4 refresh token
"""
refresh_token = uuid4()
# TODO: fetch "now" from database
refresh_token_db = dbm.Refreshtoken(
token=refresh_token,
expire_time=getnow() + timedelta(days=30),
)
# we set the user_id explicitely (instead of adding the refresh token to the
# list of current user refresh tokens) since it allows to not fetch + keep in
# memory all refresh tokens of current users (we don't need it)
refresh_token_db.user_id = user_id
session.add(refresh_token_db)
return refresh_token
@staticmethod
def success_response(
access_token: str, refresh_token: Union[str, dbm.Refreshtoken]
) -> Response:
"""Create a response when grant success."""
if isinstance(refresh_token, dbm.Refreshtoken):
refresh_token = refresh_token.token
response = jsonify(
{
"access_token": access_token,
"token_type": "bearer",
"expires_in": int(LoadedAccessToken.expire_time_delta.total_seconds()),
"refresh_token": refresh_token,
}
)
response.headers["Cache-Control"] = "no-store"
response.headers["Pragma"] = "no-cache"
return response
|
4,461 |
usage
|
#! /usr/bin/env python
from __future__ import print_function
import os,sys, DLFCN,getopt
sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
from CondCore.Utilities import iovInspector as inspect
from pluginCondDBPyInterface import *
import pluginCondDBPyInterface as CondDB
from ROOT import TCanvas,TH1F, TH2F,TFile
def unhashEBDetId(i):
pseudo_eta= i/360 - 85;
ieta=0
if pseudo_eta <0 :
ieta = pseudo_eta
else :
ieta = pseudo_eta +1
iphi = i%360 +1
return ieta,iphi
def setWhat(w,ret) :
for key in ret.keys():
_val = ret[key]
if (isinstance(_val, type([]))) :
_vi = CondDB.VInt()
for i in _val :
_vi.append(i)
exec ('w.set_'+key+'(_vi)')
else :
exec ('w.set_'+key+'(w.'+key+'().'+ret[key]+')')
return w
def METHOD_NAME():
print("inspectEcal -c [connectstring] -P [authpath] -t [tag] -f [outfile] -l -h")
print(" dump records in xml")
print(" -l: list tags and exit")
print(" -f [file] : dump to file")
print(" -p plot distribution ")
print(" -q compare [tag] ")
print(" -r reference [tag] ")
print(" -m draw map")
print(" -h : help")
try:
opts, args = getopt.getopt(sys.argv[1:], "c:P:t:f:lhpq:r:m", ["connect=","authpath=","tag","file","listtags","help","plot","compare","reference","map"])
if not len(opts):
METHOD_NAME()
sys.exit(0)
except getopt.GetoptError:
#* print help information and exit:*
METHOD_NAME()
sys.exit(2)
dbName = "oracle://cms_orcoff_prod/CMS_COND_31X_ECAL"
authpath= "/afs/cern.ch/cms/DB/conddb"
tag='EcalIntercalibConstants_mc'
do_list_tags= 0
dump_to_file =0
outfile=""
do_plot=0
do_compare=0
compare_tag=""
reference_tag=""
drawmap=0
for opt,arg in opts:
if opt in ("-c","--connect"):
try:
dbname=arg
except Exception as er :
print(er)
if opt in ("-P","--authpath"):
try:
rdbms=RDBMS(arg)
except Exception as er :
print(er)
if opt in ("-t","--tag"):
tag=arg
if opt in ("-l","--listtags"):
do_list_tags= 1
if opt in ("-f","--file"):
dump_to_file= 1
outfile=arg
if opt in ("-p","--plot"):
do_plot= 1
if opt in ("-q","--compare"):
do_compare=1
compare_tag=arg
if opt in ("-r","--reference"):
reference_tag=arg
if opt in ("-m","--map"):
drawmap=1
if opt in ("-h","--help"):
METHOD_NAME()
sys.exit(0)
a = FWIncantation()
rdbms = RDBMS(authpath)
db = rdbms.getDB(dbName)
if do_list_tags :
tags=db.allTags()
for tag in tags.split():
print(tag)
sys.exit(0)
try :
iov = inspect.Iov(db,tag)
print("===iov list ===")
iovlist=iov.list()
print(iovlist)
print("===iov summaries ===")
print(iov.summaries())
print("===payload dump ===")
for p in iovlist:
payload=inspect.PayLoad(db,p[0])
#print payload.summary()
if dump_to_file:
print("Dumping to file:", outfile)
out = open(outfile,"w")
print(payload, file=out)
else:
#print payload
if drawmap:
payload.plot("plot","",[],[])
if do_plot:
exec('import '+db.moduleName(tag)+' as Plug')
#what = {'how':'singleChannel','which': [0,1,2]}
what = {'how':'barrel'}
w = setWhat(Plug.What(),what)
ex = Plug.Extractor(w)
for elem in db.iov(tag).elements :
p = Plug.Object(elem)
p.extract(ex)
v = [i for i in ex.values()]
# print v
histo=TH1F("h","h",100,-2,2)
for c in v :
histo.Fill(c)
f=TFile("f.root","recreate")
histo.Write()
if do_compare:
exec('import '+db.moduleName(tag)+' as Plug')
what = {'how':'barrel'}
w = setWhat(Plug.What(),what)
ex = Plug.Extractor(w)
for elem in db.iov(reference_tag).elements :
p = Plug.Object(elem)
p.extract(ex)
coeff_1 = [i for i in ex.values()]
for elem in db.iov(compare_tag).elements :
p = Plug.Object(elem)
p.extract(ex)
coeff_2 = [i for i in ex.values()]
can=TCanvas("c","c")
histo = TH1F("h","h",100,-2,2)
for i,c in enumerate(coeff_1):
histo.Fill(c-coeff_2[i])
histo.Draw()
can.SaveAs("h.svg")
can2=TCanvas("cc","cc")
histo2=TH2F("hh","hh",171,-85,86,360,1,361)
for i,c in enumerate(coeff_1):
factor = c/coeff_2[i]
ieta,iphi= unhashEBDetId(i)
histo2.Fill(ieta,iphi,factor)
histo2.SetStats(0)
histo2.Draw("colz")
can2.SaveAs("h2.svg")
except Exception as er :
print(er)
|
4,462 |
test unit missing reader
|
import sys
sys.path.insert(0, '../common_python')
import tools
import os
import shutil
def get_default_parameters(dir_name, two_models=True):
data_reader_path = '{d}/model_zoo/data_readers/data_reader_mnist.prototext'.format(
d=dir_name)
model_path = '{d}/model_zoo/tests/model_lenet_mnist_ckpt.prototext'.format(
d=dir_name)
if two_models:
model_path = '{{{mp},{mp}}}'.format(mp=model_path)
optimizer_path = '{d}/model_zoo/optimizers/opt_sgd.prototext'.format(
d=dir_name)
return data_reader_path, model_path, optimizer_path
def get_file_names(dir_name, test_name):
output_file_name = '{d}/ci_test/unit_tests/output/lbann_invocation_{t}_output.txt'.format(
d=dir_name, t=test_name)
error_file_name = '{d}/ci_test/unit_tests/error/lbann_invocation_{t}_error.txt'.format(
d=dir_name, t=test_name)
return output_file_name, error_file_name
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_no_params_bad'
def test_unit_no_params_bad(cluster, dirname, weekly):
print('TESTING: run lbann with no params; lbann should throw exception\n')
(output_file_name, error_file_name) = get_file_names(dirname, 'no_params_bad')
command = tools.get_command(
cluster=cluster,
exit_after_setup=True,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name
)
return_code = os.system(command)
tools.assert_failure(return_code,
'Failed to load any prototext files',
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_one_model_bad'
def test_unit_one_model_bad(cluster, dirname, weekly):
print('TESTING: run lbann with no optimizer or reader; lbann should throw exception\n')
(_, model_path, _) = get_default_parameters(dirname, two_models=False)
(output_file_name, error_file_name) = get_file_names(dirname, 'one_model_bad')
command = tools.get_command(
cluster=cluster,
exit_after_setup=True,
model_path=model_path,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name
)
return_code = os.system(command)
tools.assert_failure(return_code,
'you specified 1 model filenames, and 0 optimizer filenames; you must specify 1 optimizer filenames',
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_two_models'
def test_unit_two_models(cluster, dirname, weekly):
print('TESTING: run lbann with two models; lbann should throw exception\n')
(data_reader_path, model_path, optimizer_path) = get_default_parameters(dirname)
(output_file_name, error_file_name) = get_file_names(dirname, 'two_models')
command = tools.get_command(
cluster=cluster, data_reader_path=data_reader_path,
data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST',
exit_after_setup=True,
model_path=model_path,
optimizer_path=optimizer_path,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name
)
return_code = os.system(command)
tools.assert_failure(return_code,
'Arguments could not be parsed.',
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_missing_optimizer'
def test_unit_missing_optimizer(cluster, dirname, weekly):
print('TESTING: run lbann with model, reader, but no optimizer; lbann should throw exception\n')
(data_reader_path, model_path, _) = get_default_parameters(dirname, two_models=False)
(output_file_name, error_file_name) = get_file_names(dirname, 'missing_optimizer')
command = tools.get_command(
cluster=cluster,
data_reader_path=data_reader_path,
data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST',
exit_after_setup=True, model_path=model_path,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name
)
return_code = os.system(command)
tools.assert_failure(return_code,
'you specified 1 model filenames, and 0 optimizer filenames; you must specify 1 optimizer filenames',
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_missing_reader'
def METHOD_NAME(cluster, dirname, weekly):
print('TESTING: run lbann with model, optimizer, but no reader; lbann should throw exception\n')
(_, model_path, optimizer_path) = get_default_parameters(dirname, two_models=False)
(output_file_name, error_file_name) = get_file_names(dirname, 'missing_reader')
command = tools.get_command(
cluster=cluster,
exit_after_setup=True,
model_path=model_path, optimizer_path=optimizer_path,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name
)
return_code = os.system(command)
tools.assert_failure(return_code,
'you specified 1 model filenames, and 0 reader filenames; you must specify 1 reader filenames',
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_bad_params'
def test_unit_bad_params(cluster, dirname, weekly):
exe = shutil.which('lbann')
print('TESTING: run lbann with ill-formed param (exit_after_setup should have `--` not `-`) lbann should throw exception\n')
(data_reader_path, model_path, optimizer_path) = get_default_parameters(
dirname, two_models=False)
(command_allocate, command_run, _, _) = tools.get_command(
cluster=cluster,
num_processes=1,
return_tuple=True)
(output_file_name, error_file_name) = get_file_names(dirname, 'bad_params')
command_string = '{ca}{cr} {e} -exit_after_setup --reader={d} --model={m} --optimizer={o} > {ofn} 2> {efn}'.format(
ca=command_allocate, cr=command_run, e=exe,
d=data_reader_path, m=model_path, o=optimizer_path,
ofn=output_file_name, efn=error_file_name
)
return_code = os.system(command_string)
tools.assert_failure(return_code,
"Arguments could not be parsed.",
error_file_name)
# Run with python3 -m pytest -s test_unit_lbann_invocation.py -k 'test_unit_should_work'
def test_unit_should_work(cluster, dirname, weekly):
print('TESTING: run lbann with model, reader, and optimizer; lbann should NOT throw exception\n')
(data_reader_path, model_path, optimizer_path) = get_default_parameters(
dirname, two_models=False)
(output_file_name, error_file_name) = get_file_names(dirname, 'should_work')
command = tools.get_command(
cluster=cluster, data_reader_path=data_reader_path,
data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST',
exit_after_setup=True, model_path=model_path,
optimizer_path=optimizer_path,
num_processes=1,
output_file_name=output_file_name,
error_file_name=error_file_name)
return_code = os.system(command)
tools.assert_success(return_code, error_file_name)
|
4,463 |
is available
|
#
# (c) 2008 Mandriva, http://www.mandriva.com/
#
# $Id: package_api.py 713 2009-02-27 14:06:11Z oroussy $
#
# This file is part of Pulse 2, http://pulse2.mandriva.org
#
# Pulse 2 is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pulse 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pulse 2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
This module define the package_api_get API
It provides methods to acces to package informations.
"""
from pulse2.apis.clients import Pulse2Api
import twisted.internet.defer
class PackageGetA(Pulse2Api):
def __init__(self, *attr):
self.name = "PackageGetApi"
Pulse2Api.__init__(self, *attr)
def getAllPackages(self, mirror = None):
try:
d = self.callRemote("getAllPackages", mirror)
d.addErrback(self.onError, "getAllPackages", mirror, [{'label':'A', 'version':'0', 'ERR':'PULSE2ERROR_GETALLPACKAGE', 'mirror':self.server_addr.replace(self.credentials, '')}])
return d
except Exception, e:
self.logger.error("getAllPackages %s"%(str(e)))
return [{'label':'A', 'version':'0', 'ERR':'PULSE2ERROR_GETALLPACKAGE', 'mirror':self.server_addr.replace(self.credentials, '')}]
def getAllPendingPackages(self, mirror = None):
try:
d = self.callRemote("getAllPendingPackages", mirror)
d.addErrback(self.onError, "getAllPendingPackages", mirror)
return d
except Exception, e:
self.logger.error("getAllPendingPackages %s"%(str(e)))
return []
# FIXME ! __convertDoReboot* shouldn't be needed
def __convertDoRebootList(self, pkgs):
ret = []
for pkg in pkgs:
ret.append(self.__convertDoReboot(pkg))
return ret
def __convertDoReboot(self, pkg):
if pkg:
try:
do_reboot = pkg['reboot']
if do_reboot == '' or do_reboot == '0' or do_reboot == 0 or do_reboot == u'0' or do_reboot == 'false' or do_reboot == u'false' or do_reboot == False or do_reboot == 'disable' or do_reboot == u'disable' or do_reboot == 'off' or do_reboot == u'off':
pkg['do_reboot'] = 'disable'
elif do_reboot == '1' or do_reboot == 1 or do_reboot == u'1' or do_reboot == 'true' or do_reboot == u'true' or do_reboot == True or do_reboot == 'enable' or do_reboot == u'enable' or do_reboot == 'on' or do_reboot == u'on':
pkg['do_reboot'] = 'enable'
else:
self.logger.warning("Dont know option '%s' for do_reboot, will use 'disable'"%(do_reboot))
del pkg['reboot']
except KeyError:
pkg['do_reboot'] = 'disable'
return pkg
def getPackageDetail(self, pid):
d = self.callRemote("getPackageDetail", pid)
d.addCallback(self.__convertDoReboot)
d.addErrback(self.onError, "getPackageDetail", pid, False)
return d
def getPackagesDetail(self, pids):
d = self.callRemote("getPackagesDetail", pids)
d.addCallback(self.__convertDoRebootList)
d.addErrback(self.onErrorGetPackageDetailCall, pids, False)
return d
def treatMultipleGetPackageDetailCall(self, results):
ret = []
for i in results:
ret.append(i[1])
return ret
def onErrorGetPackageDetailCall(self, error, pids, value = []):
# when the package server is old, this one call function does not exists
# so we call several time the existing function
self.logger.warn("one of your package server does not support getPackagesDetail, you should update it.")
ds = []
for pid in pids:
d = self.callRemote("getPackageDetail", pid)
d.addCallback(self.__convertDoReboot)
d.addErrback(self.onError, "getPackageDetail", pid, False)
ds.append(d)
dl = twisted.internet.defer.DeferredList(ds)
dl.addCallback(self.treatMultipleGetPackageDetailCall)
return dl
def getPackageLabel(self, pid):
d = self.callRemote("getPackageLabel", pid)
d.addErrback(self.onError, "getPackageLabel", pid, False)
return d
def _erGetLocalPackagePath(self):
return self.config.repopath
def getLocalPackagePath(self, pid):
d = self.callRemote("getLocalPackagePath", pid)
d.addErrback(self._erGetLocalPackagePath)
return d
def getLocalPackagesPath(self, pids):
d = self.callRemote("getLocalPackagesPath", pids)
d.addErrback(self.onError, "getLocalPackagesPath", pids, False)
return d
def getPackageVersion(self, pid):
d = self.callRemote("getPackageVersion", pid)
d.addErrback(self.onError, "getPackageVersion", pid, False)
return d
def getPackageSize(self, pid):
d = self.callRemote("getPackageSize", pid)
d.addErrback(self.onError, "getPackageSize", pid, 0)
return d
def getPackageInstallInit(self, pid):
d = self.callRemote("getPackageInstallInit", pid)
d.addErrback(self.onError, "getPackageInstallInit", pid, False)
return d
def getPackagePreCommand(self, pid):
d = self.callRemote("getPackagePreCommand", pid)
d.addErrback(self.onError, "getPackagePreCommand", pid, False)
return d
def getPackageCommand(self, pid):
d = self.callRemote("getPackageCommand", pid)
d.addErrback(self.onError, "getPackageCommand", pid, False)
return d
def getPackagePostCommandSuccess(self, pid):
d = self.callRemote("getPackagePostCommandSuccess", pid)
d.addErrback(self.onError, "getPackagePostCommandSuccess", pid, False)
return d
def getPackagePostCommandFailure(self, pid):
d = self.callRemote("getPackagePostCommandFailure", pid)
d.addErrback(self.onError, "getPackagePostCommandFailure", pid, False)
return d
def getPackageHasToReboot(self, pid):
d = self.callRemote("getPackageHasToReboot", pid)
d.addErrback(self.onError, "getPackageHasToReboot", pid, False)
return d
def getPackageFiles(self, pid):
d = self.callRemote("getPackageFiles", pid)
d.addErrback(self.onError, "getPackageFiles", pid)
return d
def getFileChecksum(self, file):
d = self.callRemote("getFileChecksum", file)
d.addErrback(self.onError, "getFileChecksum", file, False)
return d
def getPackagesIds(self, label):
d = self.callRemote("getPackagesIds", label)
d.addErrback(self.onError, "getPackagesIds", label)
return d
def getPackageId(self, label, version):
d = self.callRemote("getPackageId", label, version)
d.addErrback(self.onError, "getPackageId", (label, version), False)
return d
def METHOD_NAME(self, pid, mirror):
d = self.callRemote("isAvailable", pid, mirror)
d.addErrback(self.onError, "getPackageId", (pid, mirror), False)
return d
|
4,464 |
test add remove group
|
#coding: UTF-8
"""
Test groups api.
"""
import unittest
from tests.api.apitestbase import ApiTestBase
from tests.api.urls import GROUPS_URL
from tests.common.utils import apiurl, urljoin, randstring
class GroupsApiTest(ApiTestBase):
def test_add_remove_group_member(self):
with self.get_tmp_user() as user:
with self.get_tmp_group() as group:
test_group_members_url = urljoin(group.group_url, '/members/')
data = {'user_name': user.user_name}
res = self.put(test_group_members_url, data=data).json()
self.assertTrue(res['success'])
res = self.delete(test_group_members_url, data=data).json()
self.assertTrue(res['success'])
def test_list_groups(self):
with self.get_tmp_group() as group:
groups = self.get(GROUPS_URL).json()
self.assertGreaterEqual(groups['replynum'], 0)
self.assertNotEmpty(groups['groups'])
for group in groups['groups']:
self.assertIsNotNone(group['ctime'])
self.assertIsNotNone(group['creator'])
self.assertIsNotNone(group['msgnum'])
self.assertIsNotNone(group['id'])
self.assertIsNotNone(group['name'])
def METHOD_NAME(self):
data = {'group_name': randstring(16)}
info = self.put(GROUPS_URL, data=data).json()
self.assertTrue(info['success'])
group_id = info['group_id']
self.assertGreater(group_id, 0)
url = urljoin(GROUPS_URL, str(group_id))
self.delete(url)
# check group is really removed
groups = self.get(GROUPS_URL).json()['groups']
for group in groups:
self.assertNotEqual(group['id'], group_id)
def test_add_remove_group_with_blank(self):
data = {'group_name': randstring(4) + ' ' + randstring(4)}
info = self.put(GROUPS_URL, data=data).json()
self.assertTrue(info['success'])
group_id = info['group_id']
self.assertGreater(group_id, 0)
url = urljoin(GROUPS_URL, str(group_id))
self.delete(url)
# check group is really removed
groups = self.get(GROUPS_URL).json()['groups']
for group in groups:
self.assertNotEqual(group['id'], group_id)
def test_add_remove_group_with_hyphen(self):
data = {'group_name': randstring(4) + '-' + randstring(4)}
info = self.put(GROUPS_URL, data=data).json()
self.assertTrue(info['success'])
group_id = info['group_id']
self.assertGreater(group_id, 0)
url = urljoin(GROUPS_URL, str(group_id))
self.delete(url)
# check group is really removed
groups = self.get(GROUPS_URL).json()['groups']
for group in groups:
self.assertNotEqual(group['id'], group_id)
def test_add_remove_group_with_single_quote(self):
data = {'group_name': randstring(4) + "'" + randstring(4)}
info = self.put(GROUPS_URL, data=data).json()
self.assertTrue(info['success'])
group_id = info['group_id']
self.assertGreater(group_id, 0)
url = urljoin(GROUPS_URL, str(group_id))
self.delete(url)
# check group is really removed
groups = self.get(GROUPS_URL).json()['groups']
for group in groups:
self.assertNotEqual(group['id'], group_id)
def test_add_remove_group_with_blank_and_hyphen_and_single_quote(self):
data = {'group_name': randstring(2) + '-' + randstring(2) + ' ' + randstring(2) + "'" + randstring(2)}
info = self.put(GROUPS_URL, data=data).json()
self.assertTrue(info['success'])
group_id = info['group_id']
self.assertGreater(group_id, 0)
url = urljoin(GROUPS_URL, str(group_id))
self.delete(url)
# check group is really removed
groups = self.get(GROUPS_URL).json()['groups']
for group in groups:
self.assertNotEqual(group['id'], group_id)
|
4,465 |
replace ingame text
|
"""Update wrinkly hints compressed file."""
import random
from io import BytesIO
import js
from randomizer.Enums.Kongs import Kongs
from randomizer.Lists.WrinklyHints import HintLocation, hints
from randomizer.Patching.Lib import grabText, writeText
from randomizer.Patching.Patcher import ROM, LocalROM
def writeWrinklyHints(file_start_offset, text):
"""Write the text to ROM."""
ROM_COPY = LocalROM()
ROM_COPY.seek(file_start_offset)
ROM_COPY.writeMultipleBytes(len(text), 1)
position = 0
offset = 1
for textbox in text:
ROM_COPY.seek(file_start_offset + offset)
ROM_COPY.writeMultipleBytes(1, 1)
ROM_COPY.seek(file_start_offset + offset + 1)
ROM_COPY.writeMultipleBytes(1, 1)
ROM_COPY.seek(file_start_offset + offset + 2)
ROM_COPY.writeMultipleBytes(len(textbox), 1)
offset += 3
for string in textbox:
ROM_COPY.seek(file_start_offset + offset)
ROM_COPY.writeMultipleBytes(position, 4)
ROM_COPY.seek(file_start_offset + offset + 4)
ROM_COPY.writeMultipleBytes(len(string), 2)
ROM_COPY.seek(file_start_offset + offset + 6)
ROM_COPY.writeMultipleBytes(0, 2)
offset += 8
position += len(string)
ROM_COPY.seek(file_start_offset + offset)
ROM_COPY.writeMultipleBytes(0, 4)
offset += 4
ROM_COPY.seek(file_start_offset + offset)
ROM_COPY.writeMultipleBytes(position, 2)
offset += 2
for textbox in text:
for string in textbox:
for x in range(len(string)):
ROM_COPY.seek(file_start_offset + offset + x)
ROM_COPY.writeMultipleBytes(int.from_bytes(string[x].encode("ascii"), "big"), 1)
offset += len(string)
def UpdateHint(WrinklyHint: HintLocation, message: str):
"""Update the wrinkly hint with the new string.
Args:
WrinklyHint (Hint): Wrinkly hint object.
message (str): Hint message to write.
"""
# Seek to the wrinkly data
if len(message) <= 914:
# We're safely below the character limit
WrinklyHint.hint = message
return True
else:
raise Exception("Hint message is longer than allowed.")
return False
def updateRandomHint(message: str, kongs_req=[], keywords=[], levels=[]):
"""Update a random hint with the string specifed.
Args:
message (str): Hint message to write.
"""
hint_pool = []
for x in range(len(hints)):
if hints[x].hint == "" and hints[x].kong in kongs_req and hints[x].level in levels:
is_banned = False
for banned in hints[x].banned_keywords:
if banned in keywords:
is_banned = True
if not is_banned:
hint_pool.append(x)
if len(hint_pool) > 0:
selected = random.choice(hint_pool)
return UpdateHint(hints[selected], message)
return False
def PushHints(spoiler):
"""Update the ROM with all hints."""
hint_arr = []
for replacement_hint in spoiler.hint_list.values():
if replacement_hint == "":
replacement_hint = "PLACEHOLDER HINT"
hint_arr.append([replacement_hint.upper()])
writeWrinklyHints(js.pointer_addresses[12]["entries"][41]["pointing_to"], hint_arr)
spoiler.hint_list.pop("First Time Talk") # The FTT needs to be written to the ROM but should not be found in the spoiler log
def wipeHints():
"""Wipe the hint block."""
for x in range(len(hints)):
if hints[x].kong != Kongs.any:
hints[x].hint = ""
def METHOD_NAME(spoiler):
"""Replace text in-game with defined modifications."""
for file_index in spoiler.text_changes:
old_text = grabText(file_index)
modification_data = spoiler.text_changes[file_index]
for mod in modification_data:
if mod["mode"] == "replace":
old_textbox = old_text[mod["textbox_index"]]
new_textbox = []
for seg in old_textbox:
text = []
for line in seg["text"]:
new_line = line.replace(mod["search"], mod["target"])
text.append(new_line)
new_textbox.append({"text": text.copy()})
old_text[mod["textbox_index"]] = new_textbox.copy()
elif mod["mode"] == "replace_whole":
# print(mod["target"])
old_text[mod["textbox_index"]] = ({"text": [mod["target"]]},)
writeText(file_index, old_text)
|
4,466 |
assert normal equality implementation
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I{Private} test utilities for use throughout Twisted's test suite. Unlike
C{proto_helpers}, this is no exception to the
don't-use-it-outside-Twisted-we-won't-maintain-compatibility rule!
@note: Maintainers be aware: things in this module should be gradually promoted
to more full-featured test helpers and exposed as public API as your
maintenance time permits. In order to be public API though, they need
their own test cases.
"""
from io import BytesIO
from xml.dom import minidom as dom
from twisted.internet.protocol import FileWrapper
class IOPump:
"""Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
def flush(self):
"Pump until there is no more input or output."
while self.pump():
pass
def pump(self):
"""Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek(0)
cData = self.clientIO.read()
sData = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
for byte in cData:
self.server.dataReceived(byte)
for byte in sData:
self.client.dataReceived(byte)
if cData or sData:
return 1
else:
return 0
def returnConnected(server, client):
"""Take two Protocol instances and connect them."""
cio = BytesIO()
sio = BytesIO()
client.makeConnection(FileWrapper(cio))
server.makeConnection(FileWrapper(sio))
pump = IOPump(client, server, cio, sio)
# Challenge-response authentication:
pump.flush()
# Uh...
pump.flush()
return pump
class XMLAssertionMixin:
"""
Test mixin defining a method for comparing serialized XML documents.
Must be mixed in to a L{test case<unittest.TestCase>}.
"""
def assertXMLEqual(self, first, second):
"""
Verify that two strings represent the same XML document.
@param first: An XML string.
@type first: L{bytes}
@param second: An XML string that should match C{first}.
@type second: L{bytes}
"""
self.assertEqual(
dom.parseString(first).toxml(), dom.parseString(second).toxml()
)
class _Equal:
"""
A class the instances of which are equal to anything and everything.
"""
def __eq__(self, other: object) -> bool:
return True
class _NotEqual:
"""
A class the instances of which are equal to nothing.
"""
def __eq__(self, other: object) -> bool:
return False
class ComparisonTestsMixin:
"""
A mixin which defines a method for making assertions about the correctness
of an implementation of C{==} and C{!=}.
Use this to unit test objects which follow the common convention for C{==}
and C{!=}:
- The object compares equal to itself
- The object cooperates with unrecognized types to allow them to
implement the comparison
- The object implements not-equal as the opposite of equal
"""
def METHOD_NAME(
self, firstValueOne, secondValueOne, valueTwo
):
"""
Assert that C{firstValueOne} is equal to C{secondValueOne} but not
equal to C{valueOne} and that it defines equality cooperatively with
other types it doesn't know about.
@param firstValueOne: An object which is expected to compare as equal
to C{secondValueOne} and not equal to C{valueTwo}.
@param secondValueOne: A different object than C{firstValueOne} but
which is expected to compare equal to that object.
@param valueTwo: An object which is expected to compare as not equal to
C{firstValueOne}.
"""
# This doesn't use assertEqual and assertNotEqual because the exact
# operator those functions use is not very well defined. The point
# of these assertions is to check the results of the use of specific
# operators (precisely to ensure that using different permutations
# (eg "x == y" or "not (x != y)") which should yield the same results
# actually does yield the same result). -exarkun
self.assertTrue(firstValueOne == firstValueOne)
self.assertTrue(firstValueOne == secondValueOne)
self.assertFalse(firstValueOne == valueTwo)
self.assertFalse(firstValueOne != firstValueOne)
self.assertFalse(firstValueOne != secondValueOne)
self.assertTrue(firstValueOne != valueTwo)
self.assertTrue(firstValueOne == _Equal())
self.assertFalse(firstValueOne != _Equal())
self.assertFalse(firstValueOne == _NotEqual())
self.assertTrue(firstValueOne != _NotEqual())
|
4,467 |
send request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import StorageCacheManagementClientConfiguration
from .operations import (
AmlFilesystemsOperations,
AscOperationsOperations,
AscUsagesOperations,
CachesOperations,
Operations,
SkusOperations,
StorageCacheManagementClientOperationsMixin,
StorageTargetOperations,
StorageTargetsOperations,
UsageModelsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class StorageCacheManagementClient(
StorageCacheManagementClientOperationsMixin
): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Azure Managed Lustre provides a fully managed Lustre® file system, integrated with Blob
storage, for use on demand. These operations create and manage Azure Managed Lustre file
systems.
:ivar aml_filesystems: AmlFilesystemsOperations operations
:vartype aml_filesystems: azure.mgmt.storagecache.aio.operations.AmlFilesystemsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storagecache.aio.operations.Operations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.storagecache.aio.operations.SkusOperations
:ivar usage_models: UsageModelsOperations operations
:vartype usage_models: azure.mgmt.storagecache.aio.operations.UsageModelsOperations
:ivar asc_operations: AscOperationsOperations operations
:vartype asc_operations: azure.mgmt.storagecache.aio.operations.AscOperationsOperations
:ivar asc_usages: AscUsagesOperations operations
:vartype asc_usages: azure.mgmt.storagecache.aio.operations.AscUsagesOperations
:ivar caches: CachesOperations operations
:vartype caches: azure.mgmt.storagecache.aio.operations.CachesOperations
:ivar storage_targets: StorageTargetsOperations operations
:vartype storage_targets: azure.mgmt.storagecache.aio.operations.StorageTargetsOperations
:ivar storage_target: StorageTargetOperations operations
:vartype storage_target: azure.mgmt.storagecache.aio.operations.StorageTargetOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2023-05-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = StorageCacheManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.aml_filesystems = AmlFilesystemsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
self.usage_models = UsageModelsOperations(self._client, self._config, self._serialize, self._deserialize)
self.asc_operations = AscOperationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.asc_usages = AscUsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.caches = CachesOperations(self._client, self._config, self._serialize, self._deserialize)
self.storage_targets = StorageTargetsOperations(self._client, self._config, self._serialize, self._deserialize)
self.storage_target = StorageTargetOperations(self._client, self._config, self._serialize, self._deserialize)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "StorageCacheManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
4,468 |
read
|
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import io
from . import Image, ImageFile, ImagePalette
from ._binary import i8
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import si16be as si16
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3),
}
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
_close_exclusive_fp_after_loading = False
def _open(self):
METHOD_NAME = self.fp.METHOD_NAME
#
# header
s = METHOD_NAME(26)
if not _accept(s) or i16(s, 4) != 1:
msg = "not a PSD file"
raise SyntaxError(msg)
psd_bits = i16(s, 22)
psd_channels = i16(s, 12)
psd_mode = i16(s, 24)
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
msg = "not enough channels"
raise OSError(msg)
if mode == "RGB" and psd_channels == 4:
mode = "RGBA"
channels = 4
self.mode = mode
self._size = i32(s, 18), i32(s, 14)
#
# color mode data
size = i32(METHOD_NAME(4))
if size:
data = METHOD_NAME(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(METHOD_NAME(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
METHOD_NAME(4) # signature
id = i16(METHOD_NAME(2))
name = METHOD_NAME(i8(METHOD_NAME(1)))
if not (len(name) & 1):
METHOD_NAME(1) # padding
data = METHOD_NAME(i32(METHOD_NAME(4)))
if len(data) & 1:
METHOD_NAME(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(METHOD_NAME(4))
if size:
end = self.fp.tell() + size
size = i32(METHOD_NAME(4))
if size:
_layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))
self.layers = _layerinfo(_layer_data, size)
self.fp.seek(end)
self.n_frames = len(self.layers)
self.is_animated = self.n_frames > 1
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 1
self._min_frame = 1
def seek(self, layer):
if not self._seek_check(layer):
return
# seek to given layer (1..max)
try:
name, mode, bbox, tile = self.layers[layer - 1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError as e:
msg = "no such layer"
raise EOFError(msg) from e
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def _layerinfo(fp, ct_bytes):
# read layerinfo block
layers = []
def METHOD_NAME(size):
return ImageFile._safe_read(fp, size)
ct = si16(METHOD_NAME(2))
# sanity check
if ct_bytes < (abs(ct) * 20):
msg = "Layer block too short for number of layers requested"
raise SyntaxError(msg)
for _ in range(abs(ct)):
# bounding box
y0 = i32(METHOD_NAME(4))
x0 = i32(METHOD_NAME(4))
y1 = i32(METHOD_NAME(4))
x1 = i32(METHOD_NAME(4))
# image info
mode = []
ct_types = i16(METHOD_NAME(2))
types = list(range(ct_types))
if len(types) > 4:
continue
for _ in types:
type = i16(METHOD_NAME(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
METHOD_NAME(4) # size
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
METHOD_NAME(12) # filler
name = ""
size = i32(METHOD_NAME(4)) # length of the extra data field
if size:
data_end = fp.tell() + size
length = i32(METHOD_NAME(4))
if length:
fp.seek(length - 16, io.SEEK_CUR)
length = i32(METHOD_NAME(4))
if length:
fp.seek(length, io.SEEK_CUR)
length = i8(METHOD_NAME(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = METHOD_NAME(length).decode("latin-1", "replace")
fp.seek(data_end)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
for i, (name, mode, bbox) in enumerate(layers):
tile = []
for m in mode:
t = _maketile(fp, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
return layers
def _maketile(file, mode, bbox, channels):
tile = None
METHOD_NAME = file.METHOD_NAME
compression = i16(METHOD_NAME(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize * ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = METHOD_NAME(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("packbits", bbox, offset, layer))
for y in range(ysize):
offset = offset + i16(bytecount, i)
i += 2
file.seek(offset)
if offset & 1:
METHOD_NAME(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
Image.register_extension(PsdImageFile.format, ".psd")
Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
|
4,469 |
get user details
|
"""
Khan Academy OAuth backend, docs at:
https://github.com/Khan/khan-api/wiki/Khan-Academy-API-Authentication
"""
from urllib.parse import urlencode
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_TYPE_QUERY
from requests_oauthlib import OAuth1
from .oauth import BaseOAuth1
class BrowserBasedOAuth1(BaseOAuth1):
"""Browser based mechanism OAuth authentication, fill the needed
parameters to communicate properly with authentication service.
REQUEST_TOKEN_URL Request token URL (opened in web browser)
ACCESS_TOKEN_URL Access token URL
"""
REQUEST_TOKEN_URL = ""
OAUTH_TOKEN_PARAMETER_NAME = "oauth_token"
REDIRECT_URI_PARAMETER_NAME = "redirect_uri"
ACCESS_TOKEN_URL = ""
def auth_url(self):
"""Return redirect url"""
return self.unauthorized_token_request()
def get_unauthorized_token(self):
return self.strategy.request_data()
def unauthorized_token_request(self):
"""Return request for unauthorized token (first stage)"""
params = self.request_token_extra_arguments()
params.update(self.get_scope_argument())
key, secret = self.get_key_and_secret()
state = self.get_or_create_state()
auth = OAuth1(
key,
secret,
callback_uri=self.get_redirect_uri(state),
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_QUERY,
decoding=None,
)
url = self.REQUEST_TOKEN_URL + "?" + urlencode(params)
url, _, _ = auth.client.sign(url)
return url
def oauth_auth(self, token=None, oauth_verifier=None):
key, secret = self.get_key_and_secret()
oauth_verifier = oauth_verifier or self.data.get("oauth_verifier")
token = token or {}
state = self.get_or_create_state()
return OAuth1(
key,
secret,
resource_owner_key=token.get("oauth_token"),
resource_owner_secret=token.get("oauth_token_secret"),
callback_uri=self.get_redirect_uri(state),
verifier=oauth_verifier,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_QUERY,
decoding=None,
)
class KhanAcademyOAuth1(BrowserBasedOAuth1):
"""
Class used for autorising with Khan Academy.
Flow of Khan Academy is a bit different than most OAuth 1.0 and consinsts
of the following steps:
1. Create signed params to attach to the REQUEST_TOKEN_URL
2. Redirect user to the REQUEST_TOKEN_URL that will respond with
oauth_secret, oauth_token, oauth_verifier that should be used with
ACCESS_TOKEN_URL
3. Go to ACCESS_TOKEN_URL and grab oauth_token_secret.
Note that we don't use the AUTHORIZATION_URL.
REQUEST_TOKEN_URL requires the following arguments:
oauth_consumer_key - Your app's consumer key
oauth_nonce - Random 64-bit, unsigned number encoded as an ASCII string
in decimal format. The nonce/timestamp pair should always be unique.
oauth_version - OAuth version used by your app. Must be "1.0" for now.
oauth_signature - String generated using the referenced signature method.
oauth_signature_method - Signature algorithm (currently only support
"HMAC-SHA1")
oauth_timestamp - Integer representing the time the request is sent.
The timestamp should be expressed in number of seconds
after January 1, 1970 00:00:00 GMT.
oauth_callback (optional) - URL to redirect to after request token is
received and authorized by the user's chosen identity provider.
"""
name = "khanacademy-oauth1"
ID_KEY = "user_id"
REQUEST_TOKEN_URL = "http://www.khanacademy.org/api/auth/request_token"
ACCESS_TOKEN_URL = "https://www.khanacademy.org/api/auth/access_token"
REDIRECT_URI_PARAMETER_NAME = "oauth_callback"
USER_DATA_URL = "https://www.khanacademy.org/api/v1/user"
EXTRA_DATA = [("user_id", "user_id")]
def METHOD_NAME(self, response):
"""Return user details from Khan Academy account"""
return {
"username": response.get("email"),
"email": response.get("email"),
"fullname": response.get("nickname"),
"first_name": "",
"last_name": "",
"user_id": response.get("user_id"),
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
auth = self.oauth_auth(access_token)
url, _, _ = auth.client.sign(self.USER_DATA_URL)
return self.get_json(url)
|
4,470 |
iter errors
|
from _typeshed import Incomplete, SupportsKeysAndGetItem
from collections.abc import Callable, Generator, Iterable, Iterator, Mapping
from contextlib import contextmanager
from typing import Any, ClassVar
from typing_extensions import TypeAlias
from ._format import FormatChecker
from ._types import TypeChecker
from ._utils import Unset, URIDict
from .exceptions import ValidationError
# these type aliases do not exist at runtime, they're only defined here in the stub
_JsonObject: TypeAlias = Mapping[str, Any]
_JsonValue: TypeAlias = _JsonObject | list[Any] | str | int | float | bool | None
_ValidatorCallback: TypeAlias = Callable[[Any, Any, _JsonValue, _JsonObject], Iterator[ValidationError]]
_Schema: TypeAlias = Mapping[str, Any]
# This class does not exist at runtime. Compatible classes are created at
# runtime by create().
class _Validator:
VALIDATORS: ClassVar[dict[Incomplete, Incomplete]]
META_SCHEMA: ClassVar[dict[Incomplete, Incomplete]]
TYPE_CHECKER: ClassVar[Incomplete]
FORMAT_CHECKER: ClassVar[Incomplete]
@staticmethod
def ID_OF(schema: _Schema) -> str: ...
schema: _Schema
resolver: Incomplete
format_checker: Incomplete
evolve: Incomplete
def __init__(self, schema: _Schema, resolver: Incomplete | None = ..., format_checker: Incomplete | None = ...) -> None: ...
@classmethod
def check_schema(cls, schema: _Schema, format_checker: FormatChecker | Unset = ...) -> None: ...
def METHOD_NAME(self, instance, _schema: _Schema | None = ...) -> Generator[Incomplete, None, None]: ...
def descend(
self, instance, schema: _Schema, path: Incomplete | None = ..., schema_path: Incomplete | None = ...
) -> Generator[Incomplete, None, None]: ...
def validate(self, *args, **kwargs) -> None: ...
def is_type(self, instance, type): ...
def is_valid(self, instance, _schema: _Schema | None = ...) -> bool: ...
def validates(version: str) -> Callable[..., Incomplete]: ...
def create(
meta_schema: _Schema,
validators: Mapping[str, _ValidatorCallback] | tuple[()] = (),
version: Incomplete | None = None,
type_checker: TypeChecker = ...,
format_checker: FormatChecker = ...,
id_of: Callable[[_Schema], str] = ...,
applicable_validators: Callable[[_Schema], Iterable[tuple[str, _ValidatorCallback]]] = ...,
) -> type[_Validator]: ...
def extend(
validator,
validators=(),
version: Incomplete | None = None,
type_checker: Incomplete | None = None,
format_checker: Incomplete | None = None,
): ...
# At runtime these are fields that are assigned the return values of create() calls.
class Draft3Validator(_Validator): ...
class Draft4Validator(_Validator): ...
class Draft6Validator(_Validator): ...
class Draft7Validator(_Validator): ...
class Draft201909Validator(_Validator): ...
class Draft202012Validator(_Validator): ...
_Handler: TypeAlias = Callable[[str], Incomplete]
class RefResolver:
referrer: dict[str, Incomplete]
cache_remote: Incomplete
handlers: dict[str, _Handler]
store: URIDict
def __init__(
self,
base_uri: str,
referrer: dict[str, Incomplete],
store: SupportsKeysAndGetItem[str, str] | Iterable[tuple[str, str]] = ...,
cache_remote: bool = True,
handlers: SupportsKeysAndGetItem[str, _Handler] | Iterable[tuple[str, _Handler]] = (),
urljoin_cache: Incomplete | None = None,
remote_cache: Incomplete | None = None,
) -> None: ...
@classmethod
def from_schema(cls, schema: _Schema, id_of=..., *args, **kwargs): ...
def push_scope(self, scope) -> None: ...
def pop_scope(self) -> None: ...
@property
def resolution_scope(self): ...
@property
def base_uri(self): ...
@contextmanager
def in_scope(self, scope) -> Generator[None, None, None]: ...
@contextmanager
def resolving(self, ref) -> Generator[Incomplete, None, None]: ...
def resolve(self, ref): ...
def resolve_from_url(self, url): ...
def resolve_fragment(self, document, fragment): ...
def resolve_remote(self, uri): ...
def validate(instance: object, schema: _Schema, cls: type[_Validator] | None = None, *args: Any, **kwargs: Any) -> None: ...
def validator_for(schema: _Schema | bool, default=...): ...
|
4,471 |
connect
|
import logging
import os
import time
from pathlib import Path
from logging.handlers import BaseRotatingHandler
import zmq
from common.logging_extra import SwagLogger, SwagFormatter, SwagLogFileFormatter
from system.hardware import PC
if PC:
SWAGLOG_DIR = os.path.join(str(Path.home()), ".comma", "log")
else:
SWAGLOG_DIR = "/data/log/"
def get_file_handler():
Path(SWAGLOG_DIR).mkdir(parents=True, exist_ok=True)
base_filename = os.path.join(SWAGLOG_DIR, "swaglog")
handler = SwaglogRotatingFileHandler(base_filename)
return handler
class SwaglogRotatingFileHandler(BaseRotatingHandler):
def __init__(self, base_filename, interval=60, max_bytes=1024*256, backup_count=2500, encoding=None):
super().__init__(base_filename, mode="a", encoding=encoding, delay=True)
self.base_filename = base_filename
self.interval = interval # seconds
self.max_bytes = max_bytes
self.backup_count = backup_count
self.log_files = self.get_existing_logfiles()
log_indexes = [f.split(".")[-1] for f in self.log_files]
self.last_file_idx = max([int(i) for i in log_indexes if i.isdigit()] or [-1])
self.last_rollover = None
self.doRollover()
def _open(self):
self.last_rollover = time.monotonic()
self.last_file_idx += 1
next_filename = f"{self.base_filename}.{self.last_file_idx:010}"
stream = open(next_filename, self.mode, encoding=self.encoding)
self.log_files.insert(0, next_filename)
return stream
def get_existing_logfiles(self):
log_files = list()
base_dir = os.path.dirname(self.base_filename)
for fn in os.listdir(base_dir):
fp = os.path.join(base_dir, fn)
if fp.startswith(self.base_filename) and os.path.isfile(fp):
log_files.append(fp)
return sorted(log_files)
def shouldRollover(self, record):
size_exceeded = self.max_bytes > 0 and self.stream.tell() >= self.max_bytes
time_exceeded = self.interval > 0 and self.last_rollover + self.interval <= time.monotonic()
return size_exceeded or time_exceeded
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = self._open()
if self.backup_count > 0:
while len(self.log_files) > self.backup_count:
to_delete = self.log_files.pop()
if os.path.exists(to_delete): # just being safe, should always exist
os.remove(to_delete)
class UnixDomainSocketHandler(logging.Handler):
def __init__(self, formatter):
logging.Handler.__init__(self)
self.setFormatter(formatter)
self.pid = None
self.zctx = None
self.sock = None
def __del__(self):
if self.sock is not None:
self.sock.close()
if self.zctx is not None:
self.zctx.term()
def METHOD_NAME(self):
self.zctx = zmq.Context()
self.sock = self.zctx.socket(zmq.PUSH)
self.sock.setsockopt(zmq.LINGER, 10)
self.sock.METHOD_NAME("ipc:///tmp/logmessage")
self.pid = os.getpid()
def emit(self, record):
if os.getpid() != self.pid:
self.METHOD_NAME()
msg = self.format(record).rstrip('\n')
# print("SEND".format(repr(msg)))
try:
s = chr(record.levelno)+msg
self.sock.send(s.encode('utf8'), zmq.NOBLOCK)
except zmq.error.Again:
# drop :/
pass
def add_file_handler(log):
"""
Function to add the file log handler to swaglog.
This can be used to store logs when logmessaged is not running.
"""
handler = get_file_handler()
handler.setFormatter(SwagLogFileFormatter(log))
log.addHandler(handler)
cloudlog = log = SwagLogger()
log.setLevel(logging.DEBUG)
outhandler = logging.StreamHandler()
print_level = os.environ.get('LOGPRINT', 'warning')
if print_level == 'debug':
outhandler.setLevel(logging.DEBUG)
elif print_level == 'info':
outhandler.setLevel(logging.INFO)
elif print_level == 'warning':
outhandler.setLevel(logging.WARNING)
log.addHandler(outhandler)
# logs are sent through IPC before writing to disk to prevent disk I/O blocking
log.addHandler(UnixDomainSocketHandler(SwagFormatter(log)))
|
4,472 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWorkspaceManagerMemberResult',
'AwaitableGetWorkspaceManagerMemberResult',
'get_workspace_manager_member',
'get_workspace_manager_member_output',
]
@pulumi.output_type
class GetWorkspaceManagerMemberResult:
"""
The workspace manager member
"""
def __init__(__self__, etag=None, METHOD_NAME=None, name=None, system_data=None, target_workspace_id=None, target_workspace_tenant_id=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if target_workspace_id and not isinstance(target_workspace_id, str):
raise TypeError("Expected argument 'target_workspace_id' to be a str")
pulumi.set(__self__, "target_workspace_id", target_workspace_id)
if target_workspace_tenant_id and not isinstance(target_workspace_tenant_id, str):
raise TypeError("Expected argument 'target_workspace_tenant_id' to be a str")
pulumi.set(__self__, "target_workspace_tenant_id", target_workspace_tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
Resource Etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetWorkspaceId")
def target_workspace_id(self) -> str:
"""
Fully qualified resource ID of the target Sentinel workspace joining the given Sentinel workspace manager
"""
return pulumi.get(self, "target_workspace_id")
@property
@pulumi.getter(name="targetWorkspaceTenantId")
def target_workspace_tenant_id(self) -> str:
"""
Tenant id of the target Sentinel workspace joining the given Sentinel workspace manager
"""
return pulumi.get(self, "target_workspace_tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetWorkspaceManagerMemberResult(GetWorkspaceManagerMemberResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceManagerMemberResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
system_data=self.system_data,
target_workspace_id=self.target_workspace_id,
target_workspace_tenant_id=self.target_workspace_tenant_id,
type=self.type)
def get_workspace_manager_member(resource_group_name: Optional[str] = None,
workspace_manager_member_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceManagerMemberResult:
"""
Gets a workspace manager member
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_manager_member_name: The name of the workspace manager member
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceManagerMemberName'] = workspace_manager_member_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getWorkspaceManagerMember', __args__, opts=opts, typ=GetWorkspaceManagerMemberResult).value
return AwaitableGetWorkspaceManagerMemberResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
target_workspace_id=pulumi.get(__ret__, 'target_workspace_id'),
target_workspace_tenant_id=pulumi.get(__ret__, 'target_workspace_tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workspace_manager_member)
def get_workspace_manager_member_output(resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_manager_member_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceManagerMemberResult]:
"""
Gets a workspace manager member
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_manager_member_name: The name of the workspace manager member
:param str workspace_name: The name of the workspace.
"""
...
|
4,473 |
test plimit factorizations
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2022-08-09 06:34:21 by Brian McFee <[email protected]>
"""Unit tests for just intonation and friends"""
import os
import sys
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except KeyError:
pass
import warnings
import numpy as np
import pytest
import librosa
def test_pythagorean():
ivals = librosa.pythagorean_intervals(bins_per_octave=6, sort=False)
assert np.allclose(ivals, [1, 3 / 2, 9 / 8, 27 / 16, 81 / 64, 243 / 128])
ivals2 = librosa.pythagorean_intervals(bins_per_octave=6, sort=True)
assert np.allclose(sorted(ivals), ivals2)
def test_plimit3():
intervals = librosa.plimit_intervals(primes=[3], bins_per_octave=24, sort=False)
intervals_s = librosa.plimit_intervals(primes=[3], bins_per_octave=24, sort=True)
assert np.allclose(sorted(intervals), intervals_s)
assert np.allclose(
intervals,
[
1, # These are just alternating powers of 3/2
3 / 2,
4 / 3,
9 / 8,
16 / 9,
32 / 27,
27 / 16,
128 / 81,
81 / 64,
256 / 243,
243 / 128,
729 / 512,
2187 / 2048,
6561 / 4096,
1024 / 729,
4096 / 2187,
8192 / 6561,
7625 / 6347,
10037 / 6029,
13654 / 7577,
7577 / 6827,
12457 / 9217,
2006 / 1979,
12497 / 8445,
],
)
def test_plimit5():
intervals = librosa.plimit_intervals(primes=[3, 5], bins_per_octave=24, sort=False)
intervals_s = librosa.plimit_intervals(primes=[3, 5], bins_per_octave=24, sort=True)
assert np.allclose(sorted(intervals), intervals_s)
assert np.allclose(
intervals,
[
1, # Unison
3 / 2, # P5
4 / 3, # P4
9 / 8, # major wholetone
5 / 4, # Pt M3
15 / 8, # Pt M7
5 / 3, # Pt M6
45 / 32, # Pt TT
8 / 5, # Pt m6
6 / 5, # Pt m3
16 / 15, # major diatonic semitone
9 / 5, # Pt m7
16 / 9, # Py m7
27 / 16, # Py M6
10 / 9, # minor wholetone
64 / 45, # Pt dim5
32 / 27, # Py m3
25 / 16, # Pt aug5
75 / 64, # Pt aug2
25 / 24, # minor chromatic semitone
135 / 128, # major limma
225 / 128, # Pt aug6
40 / 27, # Pt narrow 5
25 / 18, # Rameau's tritone
],
)
def test_plimit7():
intervals = librosa.plimit_intervals(
primes=[3, 5, 7], bins_per_octave=24, sort=False
)
intervals_s = librosa.plimit_intervals(
primes=[3, 5, 7], bins_per_octave=24, sort=True
)
assert np.allclose(sorted(intervals), intervals_s)
assert np.allclose(
intervals,
[
1, # Unison
3 / 2, # P5
4 / 3, # P4
9 / 8, # major whole tone
5 / 4, # Pt M3
15 / 8, # Pt M7
5 / 3, # Pt M6
45 / 32, # Pt TT
8 / 5, # Pt m6
6 / 5, # Pt m3
16 / 15, # major diatonic semitone
9 / 5, # Pt m7
16 / 9, # Py m7
27 / 16, # Py M6
7 / 4, # N7
21 / 16, # septimal narrow 4
8 / 7, # septimal whole tone
12 / 7, # septimal M7
9 / 7, # septimal M3
32 / 21, # septimal wide fifth
7 / 6, # septimal m3
63 / 32, # septimal narrow octave
35 / 32, # septimal N2
105 / 64, # septimal N6
],
)
@pytest.mark.parametrize("n_bins", [6, 12, 24, 30])
@pytest.mark.parametrize(
"intervals", ["equal", "pythagorean", "ji3", "ji5", "ji7", [1, 4 / 3, 3 / 2, 5 / 4]]
)
@pytest.mark.parametrize("bins_per_octave", [6, 12, 15])
def test_interval_frequencies(n_bins, intervals, bins_per_octave):
freqs = librosa.interval_frequencies(
n_bins, fmin=10, intervals=intervals, bins_per_octave=bins_per_octave
)
assert len(freqs) == n_bins
assert min(freqs) == 10
@pytest.mark.parametrize("intervals", ["pythagorean", "ji3", "ji5", "ji7", [1, 3/2, 4/3, 5/4]])
def test_intervals_sorted(intervals):
freqs = librosa.interval_frequencies(12, fmin=1, intervals=intervals, sort=False)
freqs_s = librosa.interval_frequencies(12, fmin=1, intervals=intervals, sort=True)
assert not np.allclose(freqs, freqs_s)
assert np.allclose(sorted(freqs), freqs_s)
@pytest.mark.parametrize("sort", [False, True])
def test_pythagorean_factorizations(sort):
intervals = librosa.pythagorean_intervals(bins_per_octave=20, sort=sort, return_factors=False)
factors = librosa.pythagorean_intervals(bins_per_octave=20, sort=sort, return_factors=True)
assert len(intervals) == len(factors)
for ival, facts in zip(intervals, factors):
value = 0.
for prime in facts:
value += facts[prime] * np.log2(prime)
assert np.isclose(ival, np.power(2, value))
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("primes", [[3], [3, 5], [3, 5, 7]])
def METHOD_NAME(sort, primes):
intervals = librosa.plimit_intervals(primes=primes, bins_per_octave=20, sort=sort, return_factors=False)
factors = librosa.plimit_intervals(primes=primes, bins_per_octave=20, sort=sort, return_factors=True)
assert len(intervals) == len(factors)
for ival, facts in zip(intervals, factors):
value = 0.
for prime in facts:
value += facts[prime] * np.log2(prime)
assert np.isclose(ival, np.power(2, value))
|
4,474 |
test timezone property
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def METHOD_NAME(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
|
4,475 |
der encode tlv
|
import base64
import struct
from typing import Any, Dict, List, Optional
# This file is adapted from samples/shellinabox/ssh-krb-wrapper in
# https://github.com/davidben/webathena, which has the following
# license:
#
# Copyright (c) 2013 David Benjamin and Alan Huang
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as
# there is already an ASN.1 implementation, but in the interest of
# limiting MIT Kerberos's exposure to malformed ccaches, encode it
# ourselves. To that end, here's the laziest DER encoder ever.
def der_encode_length(length: int) -> bytes:
if length <= 127:
return struct.pack("!B", length)
out = b""
while length > 0:
out = struct.pack("!B", length & 0xFF) + out
length >>= 8
out = struct.pack("!B", len(out) | 0x80) + out
return out
def METHOD_NAME(tag: int, value: bytes) -> bytes:
return struct.pack("!B", tag) + der_encode_length(len(value)) + value
def der_encode_integer_value(val: int) -> bytes:
if not isinstance(val, int):
raise TypeError("int")
# base 256, MSB first, two's complement, minimum number of octets
# necessary. This has a number of annoying edge cases:
# * 0 and -1 are 0x00 and 0xFF, not the empty string.
# * 255 is 0x00 0xFF, not 0xFF
# * -256 is 0xFF 0x00, not 0x00
# Special-case to avoid an empty encoding.
if val == 0:
return b"\x00"
sign = 0 # What you would get if you sign-extended the current high bit.
out = b""
# We can stop once sign-extension matches the remaining value.
while val != sign:
byte = val & 0xFF
out = struct.pack("!B", byte) + out
sign = -1 if byte & 0x80 == 0x80 else 0
val >>= 8
return out
def der_encode_integer(val: int) -> bytes:
return METHOD_NAME(0x02, der_encode_integer_value(val))
def der_encode_int32(val: int) -> bytes:
if val < -2147483648 or val > 2147483647:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_uint32(val: int) -> bytes:
if val < 0 or val > 4294967295:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_string(val: str) -> bytes:
if not isinstance(val, str):
raise TypeError("unicode")
return METHOD_NAME(0x1B, val.encode())
def der_encode_octet_string(val: bytes) -> bytes:
if not isinstance(val, bytes):
raise TypeError("bytes")
return METHOD_NAME(0x04, val)
def der_encode_sequence(tlvs: List[Optional[bytes]], tagged: bool = True) -> bytes:
body = []
for i, tlv in enumerate(tlvs):
# Missing optional elements represented as None.
if tlv is None:
continue
if tagged:
# Assume kerberos-style explicit tagging of components.
tlv = METHOD_NAME(0xA0 | i, tlv)
body.append(tlv)
return METHOD_NAME(0x30, b"".join(body))
def der_encode_ticket(tkt: Dict[str, Any]) -> bytes:
return METHOD_NAME(
0x61, # Ticket
der_encode_sequence(
[
der_encode_integer(5), # tktVno
der_encode_string(tkt["realm"]),
der_encode_sequence( # PrincipalName
[
der_encode_int32(tkt["sname"]["nameType"]),
der_encode_sequence(
[der_encode_string(c) for c in tkt["sname"]["nameString"]], tagged=False
),
]
),
der_encode_sequence( # EncryptedData
[
der_encode_int32(tkt["encPart"]["etype"]),
(
der_encode_uint32(tkt["encPart"]["kvno"])
if "kvno" in tkt["encPart"]
else None
),
der_encode_octet_string(base64.b64decode(tkt["encPart"]["cipher"])),
]
),
]
),
)
# Kerberos ccache writing code. Using format documentation from here:
# https://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
def ccache_counted_octet_string(data: bytes) -> bytes:
if not isinstance(data, bytes):
raise TypeError("bytes")
return struct.pack("!I", len(data)) + data
def ccache_principal(name: Dict[str, str], realm: str) -> bytes:
header = struct.pack("!II", name["nameType"], len(name["nameString"]))
return (
header
+ ccache_counted_octet_string(realm.encode())
+ b"".join(ccache_counted_octet_string(c.encode()) for c in name["nameString"])
)
def ccache_key(key: Dict[str, str]) -> bytes:
return struct.pack("!H", key["keytype"]) + ccache_counted_octet_string(
base64.b64decode(key["keyvalue"])
)
def flags_to_uint32(flags: List[str]) -> int:
ret = 0
for i, v in enumerate(flags):
if v:
ret |= 1 << (31 - i)
return ret
def ccache_credential(cred: Dict[str, Any]) -> bytes:
out = ccache_principal(cred["cname"], cred["crealm"])
out += ccache_principal(cred["sname"], cred["srealm"])
out += ccache_key(cred["key"])
out += struct.pack(
"!IIII",
cred["authtime"] // 1000,
cred.get("starttime", cred["authtime"]) // 1000,
cred["endtime"] // 1000,
cred.get("renewTill", 0) // 1000,
)
out += struct.pack("!B", 0)
out += struct.pack("!I", flags_to_uint32(cred["flags"]))
# TODO: Care about addrs or authdata? Former is "caddr" key.
out += struct.pack("!II", 0, 0)
out += ccache_counted_octet_string(der_encode_ticket(cred["ticket"]))
# No second_ticket.
out += ccache_counted_octet_string(b"")
return out
def make_ccache(cred: Dict[str, Any]) -> bytes:
# Do we need a DeltaTime header? The ccache I get just puts zero
# in there, so do the same.
out = struct.pack(
"!HHHHII",
0x0504, # file_format_version
12, # headerlen
1, # tag (DeltaTime)
8, # taglen (two uint32_ts)
0,
0, # time_offset / usec_offset
)
out += ccache_principal(cred["cname"], cred["crealm"])
out += ccache_credential(cred)
return out
|
4,476 |
test invalid casting dtype
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from legate.core import LEGATE_MAX_DIM
from utils.contractions import (
check_default,
check_permutations,
check_shapes,
check_types,
)
import cunumeric as num
from cunumeric.utils import matmul_modes
@pytest.mark.parametrize("a_ndim", range(1, LEGATE_MAX_DIM + 1))
@pytest.mark.parametrize("b_ndim", range(1, LEGATE_MAX_DIM + 1))
def test(a_ndim, b_ndim):
name = f"matmul({a_ndim} x {b_ndim})"
modes = matmul_modes(a_ndim, b_ndim)
def operation(lib, *args, **kwargs):
return lib.matmul(*args, **kwargs)
check_default(name, modes, operation)
check_permutations(name, modes, operation)
check_shapes(name, modes, operation)
if a_ndim <= 2 and b_ndim <= 2:
check_types(name, modes, operation)
class TestMatmulErrors:
@pytest.mark.parametrize(
"shapesAB",
(
((2, 4), (2, 3)),
((3, 2, 4), (2, 4, 3)),
((3, 2, 4), (3, 2, 3)),
),
ids=lambda shapesAB: f"(shapesAB={shapesAB})",
)
def test_invalid_shape_dim_greater_than_one(self, shapesAB):
expected_exc = ValueError
shapeA, shapeB = shapesAB
A_np = np.ones(shapeA)
B_np = np.ones(shapeB)
A_num = num.ones(shapeA)
B_num = num.ones(shapeB)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num)
@pytest.mark.parametrize(
"shapesAB",
(
((3, 2), (3,)),
pytest.param(((4, 1), (3,)), marks=pytest.mark.xfail),
((1, 4), (3,)),
((3,), (2, 3)),
((3,), (4, 1)),
pytest.param(((3,), (1, 4)), marks=pytest.mark.xfail),
((3,), (2,)),
pytest.param(((3,), (1,)), marks=pytest.mark.xfail),
),
ids=lambda shapesAB: f"(shapesAB={shapesAB})",
)
def test_invalid_shape_with_vector(self, shapesAB):
# For ((4, 1), (3,)), ((3,), (1, 4)), ((3,), (1,)),
# In Numpy, raise ValueError
# In cuNumeric, broadcast 1 to 3 and pass
expected_exc = ValueError
shapeA, shapeB = shapesAB
A_np = np.ones(shapeA)
B_np = np.ones(shapeB)
A_num = num.ones(shapeA)
B_num = num.ones(shapeB)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num)
def test_invalid_shape_with_scalar(self):
expected_exc = ValueError
with pytest.raises(expected_exc):
np.matmul(3, 3)
with pytest.raises(expected_exc):
num.matmul(3, 3)
with pytest.raises(expected_exc):
np.matmul(3, np.ones((1,)))
with pytest.raises(expected_exc):
num.matmul(3, num.ones((1,)))
with pytest.raises(expected_exc):
np.matmul(np.ones((1,)), 3)
with pytest.raises(expected_exc):
num.matmul(num.ones((1,)), 3)
with pytest.raises(expected_exc):
np.matmul(3, np.ones((1, 1)))
with pytest.raises(expected_exc):
num.matmul(3, num.ones((1, 1)))
with pytest.raises(expected_exc):
np.matmul(np.ones((1, 1)), 3)
with pytest.raises(expected_exc):
num.matmul(num.ones((1, 1)), 3)
@pytest.mark.parametrize(
"shape", ((2, 3), (3, 4, 3)), ids=lambda shape: f"(shape={shape})"
)
def test_out_invalid_shape(self, shape):
expected_exc = ValueError
A_np = np.ones((3, 2, 4))
B_np = np.ones((3, 4, 3))
out_np = np.zeros(shape)
A_num = num.ones((3, 2, 4))
B_num = num.ones((3, 4, 3))
out_num = num.zeros(shape)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, out=out_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, out=out_num)
@pytest.mark.xfail
def test_out_invalid_shape_DIVERGENCE(self):
# In Numpy, PASS
# In cuNumeric, raise ValueError
A = num.ones((3, 2, 4))
B = num.ones((3, 4, 3))
shape = (3, 3, 2, 3)
out = num.zeros(shape)
num.matmul(A, B, out=out)
@pytest.mark.parametrize(
("dtype", "out_dtype", "casting"),
((None, np.int64, "same_kind"), (float, str, "safe")),
ids=("direct", "intermediate"),
)
def test_out_invalid_dtype(self, dtype, out_dtype, casting):
expected_exc = TypeError
A_np = np.ones((3, 2, 4))
B_np = np.ones((3, 4, 3))
A_num = num.ones((3, 2, 4))
B_num = num.ones((3, 4, 3))
out_np = np.zeros((3, 2, 3), dtype=out_dtype)
out_num = num.zeros((3, 2, 3), dtype=out_dtype)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, dtype=dtype, out=out_np, casting=casting)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, dtype=dtype, out=out_num, casting=casting)
@pytest.mark.parametrize(
"casting_dtype",
(
("no", np.float32),
("equiv", np.float32),
("safe", np.float32),
("same_kind", np.int64),
),
ids=lambda casting_dtype: f"(casting_dtype={casting_dtype})",
)
def METHOD_NAME(self, casting_dtype):
expected_exc = TypeError
casting, dtype = casting_dtype
A_np = np.ones((2, 4))
B_np = np.ones((4, 3))
A_num = num.ones((2, 4))
B_num = num.ones((4, 3))
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, casting=casting, dtype=dtype)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, casting=casting, dtype=dtype)
@pytest.mark.parametrize(
"dtype", (str, pytest.param(float, marks=pytest.mark.xfail)), ids=str
)
def test_invalid_casting(self, dtype):
expected_exc = ValueError
casting = "unknown"
A_np = np.ones((2, 4))
B_np = np.ones((4, 3), dtype=dtype)
A_num = num.ones((2, 4))
B_num = num.ones((4, 3), dtype=dtype)
# In Numpy, raise ValueError
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, casting=casting)
# cuNumeric does not check casting when A and B are of the same dtype
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, casting=casting)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
4,477 |
test user preference over browser
|
"""
Unit tests for janeway core middleware
"""
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import redirect
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from core.middleware import (
SiteSettingsMiddleware,
TimezoneMiddleware,
BaseMiddleware
)
from core.models import Account
from journal.tests.utils import make_test_journal
from press.models import Press
from utils.testing import helpers
class TestSiteMiddleware(TestCase):
def setUp(self):
journal_kwargs = dict(
code="test",
domain="journal.org"
)
press_kwargs = dict(
domain="press.org",
)
self.middleware = SiteSettingsMiddleware(BaseMiddleware)
self.request_factory = RequestFactory()
self.journal = make_test_journal(**journal_kwargs)
self.press = Press(**press_kwargs)
self.press.save()
@override_settings(URL_CONFIG="path")
def test_journal_site_in_path_mode(self):
#expect
expected_path_info = ("/")
expected_journal = self.journal
expected_press = self.press
expected_site_type = expected_journal
#do
request = self.request_factory.get("http://press.org/test/")
_response = self.middleware.process_request(request)
#assert
self.assertEqual(expected_journal, request.journal)
self.assertEqual(expected_press, request.press)
self.assertEqual(expected_site_type, request.site_type)
@override_settings(URL_CONFIG="domain", DEBUG=False)
def test_journal_site_with_path_in_domain_mode(self):
# expect
expected_journal = self.journal
# do
request = self.request_factory.get("http://press.org/test/")
_ = self.middleware.process_request(request)
# assert
self.assertEqual(expected_journal, request.journal)
@override_settings(URL_CONFIG="path")
@override_settings(URL_CONFIG="path")
def test_press_site_in_path_mode(self):
#expect
expected_path_info = ("/")
expected_journal = None
expected_press = self.press
expected_site_type = expected_press
#do
request = self.request_factory.get("/", SERVER_NAME="press.org")
_response = self.middleware.process_request(request)
#assert
self.assertEqual(expected_journal, request.journal)
self.assertEqual(expected_press, request.press)
self.assertEqual(expected_site_type, request.site_type)
@override_settings(URL_CONFIG="domain")
def test_journal_site_in_domain_mode(self):
#expect
expected_path_info = ("/")
expected_journal = self.journal
expected_press = self.press
expected_site_type = expected_journal
#do
request = self.request_factory.get("/", SERVER_NAME="journal.org")
_response = self.middleware.process_request(request)
#assert
self.assertEqual(expected_journal, request.journal)
self.assertEqual(expected_press, request.press)
self.assertEqual(expected_site_type, request.site_type)
@override_settings(URL_CONFIG="domain")
def test_press_site_in_domain_mode(self):
#expect
expected_path_info = ("/")
expected_journal = None
expected_press = self.press
expected_site_type = expected_press
#do
request = self.request_factory.get("/", SERVER_NAME="press.org")
_response = self.middleware.process_request(request)
#assert
self.assertEqual(expected_journal, request.journal)
self.assertEqual(expected_press, request.press)
self.assertEqual(expected_site_type, request.site_type)
@override_settings(URL_CONFIG="path")
def test_reverse_in_path_mode(self):
#expect
expected_index_path = "/test/"
#do
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
_response = self.middleware.process_request(request)
response = redirect(reverse("website_index"))
self.assertRedirects(
response,
expected_index_path,
status_code=302,
fetch_redirect_response=False,
)
class TestTimezoneMiddleware(TestCase):
def setUp(self):
journal_kwargs = dict(
code="test",
domain="journal.org"
)
press_kwargs = dict(
domain="press.org",
)
self.middleware = TimezoneMiddleware(BaseMiddleware)
self.request_factory = RequestFactory()
self.journal = make_test_journal(**journal_kwargs)
self.press = Press(**press_kwargs)
self.press.save()
self.regular_user = helpers.create_user("[email protected]")
self.regular_user.is_active = True
self.regular_user.save()
@override_settings(URL_CONFIG="path")
def test_default_case(self):
user = AnonymousUser()
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
request.session = {}
request.user = user
response = self.middleware.process_request(request)
self.assertEqual(request.timezone, None)
def test_user_preference_case(self):
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
request.session = {}
user = Account.objects.get(email='[email protected]')
user.preferred_timezone = "Europe/London"
user.save()
request.user = user
response = self.middleware.process_request(request)
self.assertEqual(request.timezone.zone, user.preferred_timezone)
def test_browser_timezone_case(self):
user = AnonymousUser()
tzname = "Atlantic/Canary"
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
request.session = {}
request.session["janeway_timezone"] = tzname
request.user = user
response = self.middleware.process_request(request)
self.assertEqual(request.timezone.zone, tzname)
def METHOD_NAME(self):
user_timezone = "Europe/Madrid"
browser_timezone = "Atlantic/Canary"
user = Account.objects.get(email='[email protected]')
user.preferred_timezone = user_timezone
user.save()
request = self.request_factory.get("/test/", SERVER_NAME="press.org")
request.session = {}
request.session["janeway_timezone"] = browser_timezone
request.user = user
response = self.middleware.process_request(request)
self.assertEqual(request.timezone.zone, user_timezone)
|
4,478 |
compile string
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2023 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Page compiler plugin for pandoc.
You will need, of course, to install pandoc
"""
import io
import os
import subprocess
from typing import List
from pathlib import Path
from nikola.plugin_categories import PageCompiler
from nikola.utils import req_missing, makedirs, write_metadata
class CompilePandoc(PageCompiler):
"""Compile markups into HTML using pandoc."""
name = "pandoc"
friendly_name = "pandoc"
def set_site(self, site):
"""Set Nikola site."""
self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])]
super().set_site(site)
def _get_pandoc_options(self, source: str) -> List[str]:
"""Obtain pandoc args from config depending on type and file extensions."""
# Union[List[str], Dict[str, List[str]]]
config_options = self.site.config['PANDOC_OPTIONS']
if isinstance(config_options, (list, tuple)):
pandoc_options = list(config_options)
elif isinstance(config_options, dict):
ext = Path(source).suffix
try:
pandoc_options = list(config_options[ext])
except KeyError:
self.logger.warn('Setting PANDOC_OPTIONS to [], because extension {} is not defined in PANDOC_OPTIONS: {}.'.format(ext, config_options))
pandoc_options = []
else:
self.logger.warn('Setting PANDOC_OPTIONS to [], because PANDOC_OPTIONS is expected to be of type Union[List[str], Dict[str, List[str]]] but this is not: {}'.format(config_options))
pandoc_options = []
return pandoc_options
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
try:
subprocess.check_call(['pandoc', '-o', dest, source] + self._get_pandoc_options(source))
with open(dest, 'r', encoding='utf-8-sig') as inf:
output, shortcode_deps = self.site.apply_shortcodes(inf.read())
with open(dest, 'w', encoding='utf-8') as outf:
outf.write(output)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
except OSError as e:
if e.strreror == 'No such file or directory':
req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False)
def METHOD_NAME(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile into HTML strings."""
raise ValueError("Pandoc compiler does not support compile_string due to multiple output formats")
def create_post(self, path, **kw):
"""Create a new post."""
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self))
fd.write(content)
|
4,479 |
test format date in string month year
|
import datetime
import unittest.mock
import dateutil
import pytest
from nikola.nikola import LEGAL_VALUES
from nikola.utils import (
LocaleBorg,
LocaleBorgUninitializedException,
TranslatableSetting,
)
TESLA_BIRTHDAY = datetime.date(1856, 7, 10)
TESLA_BIRTHDAY_DT = datetime.datetime(1856, 7, 10, 12, 34, 56)
DT_EN_US = "July 10, 1856, 12:34:56\u202fPM UTC"
DT_PL = "10 lipca 1856 12:34:56 UTC"
@pytest.mark.parametrize("initial_lang", [None, ""])
def test_initilalize_failure(initial_lang):
with pytest.raises(ValueError):
LocaleBorg.initialize({}, initial_lang)
assert not LocaleBorg.initialized
@pytest.mark.parametrize("initial_lang", ["en", "pl"])
def test_initialize(initial_lang):
LocaleBorg.initialize({}, initial_lang)
assert LocaleBorg.initialized
assert LocaleBorg().current_lang == initial_lang
def test_uninitialized_error():
with pytest.raises(LocaleBorgUninitializedException):
LocaleBorg()
@pytest.mark.parametrize(
"locale, expected_current_lang",
[
("pl", "pl"),
pytest.param(
"xx", "xx", id="fake language"
), # used to ensure any locale can be supported
],
)
def test_set_locale(base_config, locale, expected_current_lang):
LocaleBorg().set_locale(locale)
assert LocaleBorg.initialized
assert LocaleBorg().current_lang == expected_current_lang
def test_set_locale_for_template():
LocaleBorg.initialize({}, "en")
assert LocaleBorg().set_locale("xz") == "" # empty string for template ease of use
def test_format_date_webiso_basic(base_config):
with unittest.mock.patch("babel.dates.format_datetime") as m:
formatted_date = LocaleBorg().formatted_date("webiso", TESLA_BIRTHDAY_DT)
assert formatted_date == "1856-07-10T12:34:56"
m.assert_not_called()
@pytest.mark.parametrize("lang", ["en", "pl"])
def test_format_date_basic(base_config, lang):
LocaleBorg.initialize({}, lang)
formatted_date = LocaleBorg().formatted_date(
"yyyy-MM-dd HH:mm:ss", TESLA_BIRTHDAY_DT
)
assert formatted_date == "1856-07-10 12:34:56"
def test_format_date_long(base_config):
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT) == DT_EN_US
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "en") == DT_EN_US
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "pl") == DT_PL
LocaleBorg().set_locale("pl")
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT) == DT_PL
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "en") == DT_EN_US
def test_format_date_timezone(base_config):
tesla_150_birthday_dtz = datetime.datetime(
2006, 7, 10, 12, 34, 56, tzinfo=dateutil.tz.gettz("America/New_York")
)
formatted_date = LocaleBorg().formatted_date("long", tesla_150_birthday_dtz)
assert formatted_date == "July 10, 2006, 12:34:56\u202fPM -0400"
nodst = datetime.datetime(
2006, 1, 10, 12, 34, 56, tzinfo=dateutil.tz.gettz("America/New_York")
)
formatted_date = LocaleBorg().formatted_date("long", nodst)
assert formatted_date == "January 10, 2006, 12:34:56\u202fPM -0500"
@pytest.mark.parametrize(
"english_variant, expected_date",
[
pytest.param("en_US", DT_EN_US, id="US"),
pytest.param("en_GB", "10 July 1856, 12:34:56 UTC", id="GB"),
],
)
def test_format_date_locale_variants(english_variant, expected_date):
LocaleBorg.initialize({"en": english_variant}, "en")
assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "en") == expected_date
@pytest.mark.parametrize(
"lang, expected_string", [("en", "en July"), ("pl", "lipca pl")]
)
def test_format_date_translatablesetting(base_config, lang, expected_string):
df = TranslatableSetting(
"DATE_FORMAT", {"en": "'en' MMMM", "pl": "MMMM 'pl'"}, {"en": "", "pl": ""}
)
assert LocaleBorg().formatted_date(df, TESLA_BIRTHDAY_DT, lang) == expected_string
@pytest.mark.parametrize(
"lang, expected_string",
[
pytest.param(None, "Foo July Bar", id="default"),
pytest.param("pl", "Foo lipiec Bar", id="pl"),
],
)
def test_format_date_in_string_month(base_config, lang, expected_string):
formatted_date = LocaleBorg().format_date_in_string(
"Foo {month} Bar", TESLA_BIRTHDAY, lang
)
assert formatted_date == expected_string
@pytest.mark.parametrize(
"lang, expected_string",
[
pytest.param(None, "Foo July 1856 Bar", id="default"),
pytest.param("pl", "Foo lipiec 1856 Bar", id="pl"),
],
)
def METHOD_NAME(base_config, lang, expected_string):
formatted_date = LocaleBorg().format_date_in_string(
"Foo {month_year} Bar", TESLA_BIRTHDAY, lang
)
assert formatted_date == expected_string
@pytest.mark.parametrize(
"lang, expected_string",
[
pytest.param(None, "Foo July 10, 1856 Bar", id="default"),
pytest.param("pl", "Foo 10 lipca 1856 Bar", id="pl"),
],
)
def test_format_date_in_string_month_day_year(base_config, lang, expected_string):
formatted_date = LocaleBorg().format_date_in_string(
"Foo {month_day_year} Bar", TESLA_BIRTHDAY, lang
)
assert formatted_date == expected_string
@pytest.mark.parametrize(
"lang, expected_string",
[
pytest.param(None, "Foo 10 July 1856 Bar", id="default"),
pytest.param("pl", "Foo 10 lipca 1856 Bar", id="pl"),
],
)
def test_format_date_in_string_month_day_year_gb(lang, expected_string):
LocaleBorg.initialize({"en": "en_GB"}, "en")
formatted_date = LocaleBorg().format_date_in_string(
"Foo {month_day_year} Bar", TESLA_BIRTHDAY, lang
)
assert formatted_date == expected_string
@pytest.mark.parametrize(
"message, expected_string",
[
("Foo {month:'miesiąca' MMMM} Bar", "Foo miesiąca lipca Bar"),
("Foo {month_year:MMMM yyyy} Bar", "Foo lipca 1856 Bar"),
],
)
def test_format_date_in_string_customization(base_config, message, expected_string):
formatted_date = LocaleBorg().format_date_in_string(message, TESLA_BIRTHDAY, "pl")
assert formatted_date == expected_string
@pytest.mark.parametrize(
"lang, expected_format",
[("sr", "10. јул 1856. 12:34:56 UTC"), ("sr_latin", "10. jul 1856. 12:34:56 UTC")],
)
def test_locale_base(lang, expected_format):
LocaleBorg.initialize(LEGAL_VALUES["LOCALES_BASE"], "en")
formatted_date = LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, lang)
assert formatted_date == expected_format
@pytest.fixture(autouse=True)
def localeborg_reset():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
assert not LocaleBorg.initialized
try:
yield
finally:
LocaleBorg.reset()
assert not LocaleBorg.initialized
@pytest.fixture
def base_config():
"""A base config of LocaleBorg."""
LocaleBorg.initialize({}, "en")
|
4,480 |
what
|
import math
import re
import textwrap
import operator
import numpy as np
import unittest
from numba.core.compiler import compile_isolated
from numba import jit
from numba.core import types
from numba.core.errors import TypingError
from numba.core.types.functions import _header_lead
from numba.tests.support import TestCase
def METHOD_NAME():
pass
def foo():
return METHOD_NAME()
def bar(x):
return x.a
def issue_868(a):
return a.shape * 2
def impossible_return_type(x):
if x > 0:
return ()
else:
return 1j
def bad_hypot_usage():
return math.hypot(1)
def imprecise_list():
l = []
return len(l)
def using_imprecise_list():
a = np.array([])
return a.astype(np.int32)
def unknown_module():
return numpyz.int32(0)
def nop(x, y, z):
pass
def array_setitem_invalid_cast():
arr = np.empty(1, dtype=np.float64)
arr[0] = 1j # invalid cast from complex to float
return arr
class Foo(object):
def __repr__(self):
return "<Foo instance>"
class TestTypingError(unittest.TestCase):
def test_unknown_function(self):
try:
compile_isolated(foo, ())
except TypingError as e:
self.assertIn("Untyped global name 'what'", str(e))
else:
self.fail("Should raise error")
def test_unknown_attrs(self):
try:
compile_isolated(bar, (types.int32,))
except TypingError as e:
self.assertIn("Unknown attribute 'a' of type int32", str(e))
else:
self.fail("Should raise error")
def test_unknown_module(self):
# This used to print "'object' object has no attribute 'int32'"
with self.assertRaises(TypingError) as raises:
compile_isolated(unknown_module, ())
self.assertIn("name 'numpyz' is not defined", str(raises.exception))
def test_issue_868(self):
'''
Summary: multiplying a scalar by a non-scalar would cause a crash in
type inference because TimeDeltaMixOp always assumed at least one of
its operands was an NPTimeDelta in its generic() method.
'''
with self.assertRaises(TypingError) as raises:
compile_isolated(issue_868, (types.Array(types.int32, 1, 'C'),))
expected = ((_header_lead + " Function(<built-in function mul>) found "
"for signature:\n \n >>> mul(UniTuple({} x 1), {})")
.format(str(types.intp), types.IntegerLiteral(2)))
self.assertIn(expected, str(raises.exception))
self.assertIn("During: typing of", str(raises.exception))
def test_return_type_unification(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(impossible_return_type, (types.int32,))
msg = ("Can't unify return type from the following types: Tuple(), "
"complex128")
self.assertIn(msg, str(raises.exception))
def test_bad_hypot_usage(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(bad_hypot_usage, ())
errmsg = str(raises.exception)
# Make sure it listed the known signatures.
# This is sensitive to the formatting of the error message.
self.assertIn(" * (float64, float64) -> float64", errmsg)
# find the context lines
ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ]
# Check contextual msg
self.assertTrue(re.search(r'.*During: resolving callee type: Function.*hypot', ctx_lines[0]))
self.assertTrue(re.search(r'.*During: typing of call .*test_typingerror.py', ctx_lines[1]))
def test_imprecise_list(self):
"""
Type inference should catch that a list type's remain imprecise,
instead of letting lowering fail.
"""
with self.assertRaises(TypingError) as raises:
compile_isolated(imprecise_list, ())
errmsg = str(raises.exception)
msg = ("Cannot infer the type of variable 'l', have imprecise type: "
"list(undefined)")
self.assertIn(msg, errmsg)
# check help message has gone in
self.assertIn("For Numba to be able to compile a list", errmsg)
def test_using_imprecise_list(self):
"""
Type inference should report informative error about untyped list.
TODO: #2931
"""
with self.assertRaises(TypingError) as raises:
compile_isolated(using_imprecise_list, ())
errmsg = str(raises.exception)
self.assertIn("Undecided type", errmsg)
def test_array_setitem_invalid_cast(self):
with self.assertRaises(TypingError) as raises:
compile_isolated(array_setitem_invalid_cast, ())
errmsg = str(raises.exception)
self.assertIn(
_header_lead + " Function({})".format(operator.setitem),
errmsg,
)
self.assertIn(
"(array(float64, 1d, C), Literal[int](0), complex128)",
errmsg,
)
def test_template_rejection_error_message_cascade(self):
from numba import njit
@njit
def foo():
z = 1
for a, b in enumerate(z):
pass
return z
with self.assertRaises(TypingError) as raises:
foo()
errmsg = str(raises.exception)
expected = "No match."
self.assertIn(expected, errmsg)
ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ]
search = [r'.*During: resolving callee type: Function.*enumerate',
r'.*During: typing of call .*test_typingerror.py']
for i, x in enumerate(search):
self.assertTrue(re.search(x, ctx_lines[i]))
class TestArgumentTypingError(unittest.TestCase):
"""
Test diagnostics of typing errors caused by argument inference failure.
"""
def test_unsupported_array_dtype(self):
# See issue #1943
cfunc = jit(nopython=True)(nop)
a = np.ones(3)
a = a.astype(a.dtype.newbyteorder())
with self.assertRaises(TypingError) as raises:
cfunc(1, a, a)
expected = f"Unsupported array dtype: {a.dtype}"
self.assertIn(expected, str(raises.exception))
def test_unsupported_type(self):
cfunc = jit(nopython=True)(nop)
foo = Foo()
with self.assertRaises(TypingError) as raises:
cfunc(1, foo, 1)
expected=re.compile(("This error may have been caused by the following "
"argument\(s\):\\n- argument 1:.*Cannot determine "
"Numba type of "
"<class \'numba.tests.test_typingerror.Foo\'>"))
self.assertTrue(expected.search(str(raises.exception)) is not None)
class TestCallError(unittest.TestCase):
def test_readonly_array(self):
@jit("(f8[:],)", nopython=True)
def inner(x):
return x
@jit(nopython=True)
def outer():
return inner(gvalues)
gvalues = np.ones(10, dtype=np.float64)
with self.assertRaises(TypingError) as raises:
outer()
got = str(raises.exception)
pat = r"Invalid use of.*readonly array\(float64, 1d, C\)"
self.assertIsNotNone(re.search(pat, got))
if __name__ == '__main__':
unittest.main()
|
4,481 |
join
|
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
"""
Module representing object address.
"""
from .basic import PyccelAstNode, Basic
from .literals import LiteralString
__all__ = ('CMacro',
'CStringExpression',
'ObjectAddress')
class ObjectAddress(PyccelAstNode):
"""Represents the address of an object.
ObjectAddress(Variable('int','a')) is &a
ObjectAddress(Variable('int','a', memory_handling='alias')) is a
"""
__slots__ = ('_obj', '_rank', '_precision', '_dtype', '_shape', '_order')
_attribute_nodes = ('_obj',)
def __init__(self, obj):
if not isinstance(obj, PyccelAstNode):
raise TypeError("object must be an instance of PyccelAstNode")
self._obj = obj
self._rank = obj.rank
self._shape = obj.shape
self._precision = obj.precision
self._dtype = obj.dtype
self._order = obj.order
super().__init__()
@property
def obj(self):
"""The object whose address is of interest
"""
return self._obj
#------------------------------------------------------------------------------
class CStringExpression(Basic):
"""
Internal class used to hold a C string that has LiteralStrings and C macros.
Parameters
----------
*args : str / LiteralString / CMacro / CStringExpression
any number of arguments to be added to the expression
note: they will get added in the order provided
Example
------
>>> expr = CStringExpression(
... CMacro("m"),
... CStringExpression(
... LiteralString("the macro is: "),
... CMacro("mc")
... ),
... LiteralString("."),
... )
"""
__slots__ = ('_expression',)
_attribute_nodes = ('_expression',)
def __init__(self, *args):
self._expression = []
super().__init__()
for arg in args:
self.append(arg)
def __repr__(self):
return ''.METHOD_NAME(repr(e) for e in self._expression)
def __str__(self):
return ''.METHOD_NAME(str(e) for e in self._expression)
def __add__(self, o):
"""
return new CStringExpression that has `o` at the end
Parameter
----------
o : str / LiteralString / CMacro / CStringExpression
the expression to add
"""
if isinstance(o, str):
o = LiteralString(o)
if not isinstance(o, (LiteralString, CMacro, CStringExpression)):
raise TypeError(f"unsupported operand type(s) for +: '{self.__class__}' and '{type(o)}'")
return CStringExpression(*self._expression, o)
def __radd__(self, o):
if isinstance(o, LiteralString):
return CStringExpression(o, self)
return NotImplemented
def __iadd__(self, o):
self.append(o)
return self
def append(self, o):
"""
append the argument `o` to the end of the list _expression
Parameter
---------
o : str / LiteralString / CMacro / CStringExpression
the expression to append
"""
if isinstance(o, str):
o = LiteralString(o)
if not isinstance(o, (LiteralString, CMacro, CStringExpression)):
raise TypeError(f"unsupported operand type(s) for append: '{self.__class__}' and '{type(o)}'")
self._expression += (o,)
o.set_current_user_node(self)
def METHOD_NAME(self, lst):
"""
insert self between each element of the list `lst`
Parameter
---------
lst : list
the list to insert self between its elements
Example
-------
>>> a = [
... CMacro("m"),
... CStringExpression(LiteralString("the macro is: ")),
... LiteralString("."),
... ]
>>> b = CStringExpression("?").join(a)
...
... # is the same as:
...
>>> b = CStringExpression(
... CMacro("m"),
... CStringExpression("?"),
... CStringExpression(LiteralString("the macro is: ")),
CStringExpression("?"),
... LiteralString("."),
... )
"""
result = CStringExpression()
if not lst:
return result
result += lst[0]
for elm in lst[1:]:
result += self
result += elm
return result
def get_flat_expression_list(self):
"""
returns a list of LiteralStrings and CMacros after merging every
consecutive LiteralString
"""
tmp_res = []
for e in self.expression:
if isinstance(e, CStringExpression):
tmp_res.extend(e.get_flat_expression_list())
else:
tmp_res.append(e)
if not tmp_res:
return []
result = [tmp_res[0]]
for e in tmp_res[1:]:
if isinstance(e, LiteralString) and isinstance(result[-1], LiteralString):
result[-1] += e
else:
result.append(e)
return result
@property
def expression(self):
""" The list containing the literal strings and c macros
"""
return self._expression
#------------------------------------------------------------------------------
class CMacro(Basic):
"""Represents a c macro"""
__slots__ = ('_macro',)
_attribute_nodes = ()
def __init__(self, arg):
super().__init__()
if not isinstance(arg, str):
raise TypeError('arg must be of type str')
self._macro = arg
def __repr__(self):
return str(self._macro)
def __add__(self, o):
if isinstance(o, (LiteralString, CStringExpression)):
return CStringExpression(self, o)
return NotImplemented
def __radd__(self, o):
if isinstance(o, LiteralString):
return CStringExpression(o, self)
return NotImplemented
@property
def macro(self):
""" The string containing macro name
"""
return self._macro
|
4,482 |
make dirs if necessary
|
#%module
#% description: This script detectes changes by comparing two different sets of DEMs.
#%end
#%option
#% key: reference_pc
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the reference point cloud file
#%end
#%option
#% key: reference_dsm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the reference dsm file
#%end
#%option
#% key: reference_dtm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the reference dtm file
#%end
#%option
#% key: compare_pc
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the compare point cloud file
#%end
#%option
#% key: compare_dsm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the compare dsm file
#%end
#%option
#% key: compare_dtm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the compare dtm file
#%end
#%option
#% key: aligned_compare_dsm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the compare dtm file that should be aligned to the reference cloud
#%end
#%option
#% key: aligned_compare_dtm
#% type: string
#% required: yes
#% multiple: no
#% description: The path for the compare dtm file that should be aligned to the reference cloud
#%end
#%option
#% key: format
#% type: string
#% required: yes
#% multiple: no
#% description: OGR output format
#%end
#%option
#% key: epsg
#% type: string
#% required: yes
#% multiple: no
#% description: The epsg code that will be used for output
#%end
#%option
#% key: display_type
#% type: string
#% required: yes
#% multiple: no
#% description: Whether to display a heatmap or contours
#%end
#%option
#% key: resolution
#% type: double
#% required: yes
#% multiple: no
#% description: Target resolution in meters
#%end
#%option
#% key: min_height
#% type: double
#% required: yes
#% multiple: no
#% description: Min height in meters for a difference to be considered change
#%end
#%option
#% key: min_area
#% type: double
#% required: yes
#% multiple: no
#% description: Min area in meters for a difference to be considered change
#%end
#%option
#% key: can_align_and_rasterize
#% type: string
#% required: yes
#% multiple: no
#% description: Whether the comparison should be done after aligning the reference and compare clouds
#%end
from os import path, makedirs, getcwd
from compare import compare
import sys
import subprocess
import grass.script as grass
def main():
# Read params
reference_pc = opts['reference_pc']
compare_pc = opts['compare_pc']
reference_dsm = opts['reference_dsm']
reference_dtm = opts['reference_dtm']
compare_dsm = opts['compare_dsm']
compare_dtm = opts['compare_dtm']
aligned_compare_dsm = opts['aligned_compare_dsm']
aligned_compare_dtm = opts['aligned_compare_dtm']
epsg = opts['epsg']
resolution = float(opts['resolution'])
min_height = float(opts['min_height'])
min_area = float(opts['min_area'])
display_type = opts['display_type']
format = opts['format']
can_align_and_rasterize = opts['can_align_and_rasterize'] == 'true'
if can_align_and_rasterize:
handle_if_should_align_align_and_rasterize(reference_pc, compare_pc, reference_dsm, reference_dtm, aligned_compare_dsm, aligned_compare_dtm)
result_dump = compare(reference_dsm, reference_dtm, aligned_compare_dsm, aligned_compare_dtm, epsg, resolution, display_type, min_height, min_area)
else:
handle_if_shouldnt_align_and_rasterize(reference_dsm, reference_dtm, compare_dsm, compare_dtm)
result_dump = compare(reference_dsm, reference_dtm, compare_dsm, compare_dtm, epsg, resolution, display_type, min_height, min_area)
# Write the geojson as the expected format file
write_to_file(result_dump, format)
def handle_if_shouldnt_align_and_rasterize(reference_dsm, reference_dtm, compare_dsm, compare_dtm):
if not path.exists(reference_dsm) or not path.exists(reference_dtm) or not path.exists(compare_dsm) or not path.exists(compare_dtm):
raise Exception('Failed to find all four required DEMs to detect changes.')
def handle_if_should_align_align_and_rasterize(reference_pc, compare_pc, reference_dsm, reference_dtm, aligned_compare_dsm, aligned_compare_dtm):
from align.align_and_rasterize import align, rasterize
if not path.exists(reference_pc) or not path.exists(compare_pc):
raise Exception('Failed to find both the reference and compare point clouds')
# Create reference DSM if it does not exist
if not path.exists(reference_dsm):
METHOD_NAME(reference_dsm)
rasterize(reference_pc, 'dsm', reference_dsm)
# Create reference DTM if it does not exist
if not path.exists(reference_dtm):
METHOD_NAME(reference_dtm)
rasterize(reference_pc, 'dtm', reference_dtm)
if not path.exists(aligned_compare_dsm) or not path.exists(aligned_compare_dtm):
aligned_compare_pc = 'aligned.laz'
# Run ICP and align the compare point cloud
align(reference_pc, compare_pc, aligned_compare_pc)
# Create compare DSM if it does not exist
if not path.exists(aligned_compare_dsm):
METHOD_NAME(aligned_compare_dsm)
rasterize(aligned_compare_pc, 'dsm', aligned_compare_dsm)
# Create compare DTM if it does not exist
if not path.exists(aligned_compare_dtm):
METHOD_NAME(aligned_compare_dtm)
rasterize(aligned_compare_pc, 'dtm', aligned_compare_dtm)
def METHOD_NAME(file_path):
dirname = path.dirname(file_path)
makedirs(dirname, exist_ok = True)
def write_to_file(result_dump, format):
ext = ""
if format == "GeoJSON":
ext = "json"
elif format == "GPKG":
ext = "gpkg"
elif format == "DXF":
ext = "dxf"
elif format == "ESRI Shapefile":
ext = "shp"
with open("output.json", 'w+') as output:
output.write(result_dump)
if ext != "json":
subprocess.check_call(["ogr2ogr", "-f", format, "output.%s" % ext, "output.json"], stdout=subprocess.DEVNULL)
if path.isfile("output.%s" % ext):
if format == "ESRI Shapefile":
ext="zip"
makedirs("changes")
contour_files = glob.glob("output.*")
for cf in contour_files:
shutil.move(cf, path.join("changes", path.basename(cf)))
shutil.make_archive('output', 'zip', 'changes/')
print(path.join(getcwd(), "output.%s" % ext))
else:
print("error")
if __name__ == "__main__":
opts, _ = grass.parser()
try:
sys.exit(main())
except Exception as e:
print(e)
|
4,483 |
describe targets
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import math
from collections import Counter
from hypothesis.utils.dynamicvariables import DynamicVariable
collector = DynamicVariable(None)
def note_statistics(stats_dict):
callback = collector.value
if callback is not None:
callback(stats_dict)
def METHOD_NAME(best_targets):
"""Return a list of lines describing the results of `target`, if any."""
# These lines are included in the general statistics description below,
# but also printed immediately below failing examples to alleviate the
# "threshold problem" where shrinking can make severe bug look trivial.
# See https://github.com/HypothesisWorks/hypothesis/issues/2180
if not best_targets:
return []
elif len(best_targets) == 1:
label, score = next(iter(best_targets.items()))
return [f"Highest target score: {score:g} (label={label!r})"]
else:
lines = ["Highest target scores:"]
for label, score in sorted(best_targets.items(), key=lambda x: x[::-1]):
lines.append(f"{score:>16g} (label={label!r})")
return lines
def format_ms(times):
"""Format `times` into a string representing approximate milliseconds.
`times` is a collection of durations in seconds.
"""
ordered = sorted(times)
n = len(ordered) - 1
assert n >= 0
lower = int(ordered[int(math.floor(n * 0.05))] * 1000)
upper = int(ordered[int(math.ceil(n * 0.95))] * 1000)
if upper == 0:
return "< 1ms"
elif lower == upper:
return f"~ {lower}ms"
else:
return f"~ {lower}-{upper} ms"
def describe_statistics(stats_dict):
"""Return a multi-line string describing the passed run statistics.
`stats_dict` must be a dictionary of data in the format collected by
`hypothesis.internal.conjecture.engine.ConjectureRunner.statistics`.
We DO NOT promise that this format will be stable or supported over
time, but do aim to make it reasonably useful for downstream users.
It's also meant to support benchmarking for research purposes.
This function is responsible for the report which is printed in the
terminal for our pytest --hypothesis-show-statistics option.
"""
lines = [stats_dict["nodeid"] + ":\n"] if "nodeid" in stats_dict else []
prev_failures = 0
for phase in ["reuse", "generate", "shrink"]:
d = stats_dict.get(phase + "-phase", {})
# Basic information we report for every phase
cases = d.get("test-cases", [])
if not cases:
continue
statuses = Counter(t["status"] for t in cases)
runtime_ms = format_ms(t["runtime"] for t in cases)
drawtime_ms = format_ms(t["drawtime"] for t in cases)
lines.append(
f" - during {phase} phase ({d['duration-seconds']:.2f} seconds):\n"
f" - Typical runtimes: {runtime_ms}, of which {drawtime_ms} in data generation\n"
f" - {statuses['valid']} passing examples, {statuses['interesting']} "
f"failing examples, {statuses['invalid'] + statuses['overrun']} invalid examples"
)
# If we've found new distinct failures in this phase, report them
distinct_failures = d["distinct-failures"] - prev_failures
if distinct_failures:
plural = distinct_failures > 1
lines.append(
" - Found {}{} distinct error{} in this phase".format(
distinct_failures, " more" * bool(prev_failures), "s" * plural
)
)
prev_failures = d["distinct-failures"]
# Report events during the generate phase, if there were any
if phase == "generate":
events = Counter(sum((t["events"] for t in cases), []))
if events:
lines.append(" - Events:")
lines += [
f" * {100 * v / len(cases):.2f}%, {k}"
for k, v in sorted(events.items(), key=lambda x: (-x[1], x[0]))
]
# Some additional details on the shrinking phase
if phase == "shrink":
lines.append(
" - Tried {} shrinks of which {} were successful".format(
len(cases), d["shrinks-successful"]
)
)
lines.append("")
target_lines = METHOD_NAME(stats_dict.get("targets", {}))
if target_lines:
lines.append(" - " + target_lines[0])
lines.extend(" " + l for l in target_lines[1:])
lines.append(" - Stopped because " + stats_dict["stopped-because"])
return "\n".join(lines)
|
4,484 |
stop
|
"""ZAP Authenticator in a Python Thread.
.. versionadded:: 14.1
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import time
import logging
from threading import Thread, Event
import zmq
from zmq.utils import jsonapi
from zmq.utils.strtypes import bytes, unicode, b, u
import sys
from .base import Authenticator
class AuthenticationThread(Thread):
"""A Thread for running a zmq Authenticator
This is run in the background by ThreadedAuthenticator
"""
def __init__(self, context, endpoint, encoding='utf-8', log=None, authenticator=None):
super(AuthenticationThread, self).__init__()
self.context = context or zmq.Context.instance()
self.encoding = encoding
self.log = log = log or logging.getLogger('zmq.auth')
self.started = Event()
self.authenticator = authenticator or Authenticator(context, encoding=encoding, log=log)
# create a socket to communicate back to main thread.
self.pipe = context.socket(zmq.PAIR)
self.pipe.linger = 1
self.pipe.connect(endpoint)
def run(self):
"""Start the Authentication Agent thread task"""
self.authenticator.start()
self.started.set()
zap = self.authenticator.zap_socket
poller = zmq.Poller()
poller.register(self.pipe, zmq.POLLIN)
poller.register(zap, zmq.POLLIN)
while True:
try:
socks = dict(poller.poll())
except zmq.ZMQError:
break # interrupted
if self.pipe in socks and socks[self.pipe] == zmq.POLLIN:
# Make sure all API requests are processed before
# looking at the ZAP socket.
while True:
try:
msg = self.pipe.recv_multipart(flags=zmq.NOBLOCK)
except zmq.Again:
break
terminate = self._handle_pipe(msg)
if terminate:
break
if terminate:
break
if zap in socks and socks[zap] == zmq.POLLIN:
self._handle_zap()
self.pipe.close()
self.authenticator.METHOD_NAME()
def _handle_zap(self):
"""
Handle a message from the ZAP socket.
"""
msg = self.authenticator.zap_socket.recv_multipart()
if not msg: return
self.authenticator.handle_zap_message(msg)
def _handle_pipe(self, msg):
"""
Handle a message from front-end API.
"""
terminate = False
if msg is None:
terminate = True
return terminate
command = msg[0]
self.log.debug("auth received API command %r", command)
if command == b'ALLOW':
addresses = [u(m, self.encoding) for m in msg[1:]]
try:
self.authenticator.allow(*addresses)
except Exception as e:
self.log.exception("Failed to allow %s", addresses)
elif command == b'DENY':
addresses = [u(m, self.encoding) for m in msg[1:]]
try:
self.authenticator.deny(*addresses)
except Exception as e:
self.log.exception("Failed to deny %s", addresses)
elif command == b'PLAIN':
domain = u(msg[1], self.encoding)
json_passwords = msg[2]
self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords))
elif command == b'CURVE':
# For now we don't do anything with domains
domain = u(msg[1], self.encoding)
# If location is CURVE_ALLOW_ANY, allow all clients. Otherwise
# treat location as a directory that holds the certificates.
location = u(msg[2], self.encoding)
self.authenticator.configure_curve(domain, location)
elif command == b'TERMINATE':
terminate = True
else:
self.log.error("Invalid auth command from API: %r", command)
return terminate
def _inherit_docstrings(cls):
"""inherit docstrings from Authenticator, so we don't duplicate them"""
for name, method in cls.__dict__.items():
if name.startswith('_') or not callable(method):
continue
upstream_method = getattr(Authenticator, name, None)
if not method.__doc__:
method.__doc__ = upstream_method.__doc__
return cls
@_inherit_docstrings
class ThreadAuthenticator(object):
"""Run ZAP authentication in a background thread"""
context = None
log = None
encoding = None
pipe = None
pipe_endpoint = ''
thread = None
auth = None
def __init__(self, context=None, encoding='utf-8', log=None):
self.context = context or zmq.Context.instance()
self.log = log
self.encoding = encoding
self.pipe = None
self.pipe_endpoint = "inproc://{0}.inproc".format(id(self))
self.thread = None
# proxy base Authenticator attributes
def __setattr__(self, key, value):
for obj in [self] + self.__class__.mro():
if key in obj.__dict__:
object.__setattr__(self, key, value)
return
setattr(self.thread.authenticator, key, value)
def __getattr__(self, key):
try:
object.__getattr__(self, key)
except AttributeError:
return getattr(self.thread.authenticator, key)
def allow(self, *addresses):
self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses])
def deny(self, *addresses):
self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses])
def configure_plain(self, domain='*', passwords=None):
self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})])
def configure_curve(self, domain='*', location=''):
domain = b(domain, self.encoding)
location = b(location, self.encoding)
self.pipe.send_multipart([b'CURVE', domain, location])
def configure_curve_callback(self, domain='*', credentials_provider=None):
self.thread.authenticator.configure_curve_callback(domain, credentials_provider=credentials_provider)
def start(self):
"""Start the authentication thread"""
# create a socket to communicate with auth thread.
self.pipe = self.context.socket(zmq.PAIR)
self.pipe.linger = 1
self.pipe.bind(self.pipe_endpoint)
self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log)
self.thread.start()
# Event.wait:Changed in version 2.7: Previously, the method always returned None.
if sys.version_info < (2,7):
self.thread.started.wait(timeout=10)
else:
if not self.thread.started.wait(timeout=10):
raise RuntimeError("Authenticator thread failed to start")
def METHOD_NAME(self):
"""Stop the authentication thread"""
if self.pipe:
self.pipe.send(b'TERMINATE')
if self.is_alive():
self.thread.join()
self.thread = None
self.pipe.close()
self.pipe = None
def is_alive(self):
"""Is the ZAP thread currently running?"""
if self.thread and self.thread.is_alive():
return True
return False
def __del__(self):
self.METHOD_NAME()
__all__ = ['ThreadAuthenticator']
|
4,485 |
convert max length
|
# Copyright (c) 2023 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
from __future__ import annotations
from ansible_collections.arista.avd.plugins.plugin_utils.schema.avdschema import AvdSchema
from ansible_collections.arista.avd.plugins.plugin_utils.schema.avdtodocumentationschemaconverter import get_deprecation
from ansible_collections.arista.avd.plugins.plugin_utils.schema.key_to_display_name import key_to_display_name
class AvdToJsonSchemaConverter:
"""
This class will convert the proprietary AVD Schema to JSON Schema
A few keywords are converted as part of another keyword, simply because
of the way JSON Schema is organized compared to AVD Schema.
- "required"
- "primary_key"
- "allow_other_keys"
Some features of AVD Schema are not supported by JSON Schema:
- Enforcing uniqueness of "primary_key" in list of dicts
- "dynamic_keys" based on values of data
- "dynamic_valid_values" based on values of data
- Most options under str "format"
"""
def __init__(self, avdschema: AvdSchema):
self.avdschema = avdschema
self.converters = {
"display_name": self.convert_display_name,
"description": self.convert_description,
# Keeping ref and def out until vscode yaml plugin works well with refs and unevaluatedProperties
# "$ref": self.convert_ref,
# "$defs": self.convert_defs,
"type": self.convert_type,
"max": self.convert_max,
"min": self.convert_min,
"valid_values": self.convert_valid_values,
"format": self.convert_format,
"max_length": self.METHOD_NAME,
"min_length": self.convert_min_length,
"pattern": self.convert_pattern,
"default": self.convert_default,
"items": self.convert_items,
"keys": self.convert_keys,
# "dynamic_keys": self.convert_dynamic_keys,
}
def convert_schema(self, schema: dict = None) -> dict:
output = {}
if schema is None:
# We are at the root level, so fetch the full schema
# Since vscode language server is not working well with "unevaluatedProperties",
# we have to stick with "additionalProperties" which does not work in combination with $ref.
# This means we have to fully expand the schema and not use $ref in jsonschema.
schema = self.avdschema.resolved_schema
for word in schema:
if word not in self.converters:
# Ignore unsupported keys
continue
output.update(self.converters[word](schema[word], schema))
return output
def convert_type(self, type: str, parent_schema: dict) -> dict:
TYPE_MAP = {
"str": "string",
"int": "integer",
"bool": "boolean",
"list": "array",
"dict": "object",
}
return {"type": TYPE_MAP[type]}
def convert_keys(self, keys: dict, parent_schema: dict) -> dict:
return self.__convert_keys(keys, parent_schema, "properties")
def __convert_keys(self, keys: dict, parent_schema: dict, output_key: str, ignore_required: str = False) -> dict:
"""
Reusable function to convert keys, pattern_keys, $defs
output_key is set to either "properties", "patternProperties" or "$defs"
"""
output = {output_key: {}}
required = []
for key, subschema in keys.items():
if "deprecation" in subschema and get_deprecation(subschema)[0] == "removed":
# Skip key if marked as removed in the AVD schema
continue
output[output_key][key] = self.convert_schema(subschema)
# Add an auto-generated title in case one is not set
if "title" not in output[output_key][key]:
output[output_key][key]["title"] = key_to_display_name(str(key))
if not ignore_required and subschema.get("required") is True:
required.append(key)
if required:
output["required"] = required
# output["unevaluatedProperties"] = parent_schema.get("allow_other_keys", False)
output["additionalProperties"] = parent_schema.get("allow_other_keys", False)
# Always permit keys starting with underscore
if not parent_schema.get("allow_other_keys", False):
output.setdefault("patternProperties", {})["^_.+$"] = {}
return output
def convert_max(self, max: int, parent_schema: dict) -> dict:
return {"maximum": max}
def convert_min(self, min: int, parent_schema: dict) -> dict:
return {"minimum": min}
def convert_valid_values(self, valid_values: list, parent_schema: dict) -> dict:
return {"enum": valid_values}
def convert_format(self, format: str, parent_schema: dict) -> dict:
FORMAT_MAP = {
"ipv4": "ipv4",
"ipv4_cidr": None,
"ipv6": "ipv6",
"ipv6_cidr": None,
"ip": None,
"cidr": None,
"mac": None,
}
if (newformat := FORMAT_MAP[format]) is None:
return {}
return {"format": newformat}
def METHOD_NAME(self, max: int, parent_schema: dict) -> dict:
vartype = parent_schema["type"]
if vartype == "str":
return {"maxLength": max}
if vartype == "list":
return {"maxItems": max}
return {}
def convert_min_length(self, min: int, parent_schema: dict) -> dict:
vartype = parent_schema["type"]
if vartype == "str":
return {"minLength": min}
if vartype == "list":
return {"minItems": min}
return {}
def convert_pattern(self, pattern: str, parent_schema: dict) -> dict:
return {"pattern": pattern}
def convert_default(self, default, parent_schema: dict) -> dict:
return {"default": default}
def convert_items(self, items: dict, parent_schema: dict) -> dict:
output = {
"items": self.convert_schema(items),
}
if (primary_key := parent_schema.get("primary_key")) and items.get("type") == "dict":
output["items"].setdefault("required", [])
if primary_key not in output["items"]["required"]:
output["items"]["required"].append(primary_key)
return output
def convert_display_name(self, display_name: str, parent_schema: dict) -> dict:
return {"title": display_name}
def convert_description(self, description: str, parent_schema: dict) -> dict:
if "deprecation" in parent_schema:
label, deprecation_text = get_deprecation(parent_schema)
if deprecation_text is not None:
return {
"description": f"{description}\n{deprecation_text}",
"deprecated": True,
}
return {"description": description}
def convert_ref(self, ref: str, parent_schema: dict) -> dict:
jsonschema_ref = ref.replace("keys", "properties")
# TODO: Translate using paths set in avd schema store
jsonschema_ref = jsonschema_ref.replace("eos_cli_config_gen#", "../../eos_cli_config_gen/schemas/eos_cli_config_gen.jsonschema.json#")
return {"$ref": jsonschema_ref}
def convert_defs(self, dollardef: dict, parent_schema: dict) -> dict:
return self.__convert_keys(dollardef, parent_schema, "$defs", ignore_required=True)
|
4,486 |
test invert
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyNN.spiNNaker as sim
from spinnaker_testbase import BaseTestCase
WEIGHT = 5
DELAY = 2
class TestFromListConnector(BaseTestCase):
# NO unittest_setup() as sim.setup is called
def check_weights(
self, projection, aslist, w_index, d_index, sources, destinations):
from_pro = projection.get(["weight", "delay"], "list")
aslist.sort()
as_index = 0
for (source, dest, weight, delay) in from_pro:
from_as = aslist[as_index]
while from_as[0] >= sources:
as_index += 1
from_as = aslist[as_index]
while from_as[1] >= destinations:
as_index += 1
from_as = aslist[as_index]
self.assertEqual(from_as[0], source)
self.assertEqual(from_as[1], dest)
if w_index:
self.assertAlmostEqual(from_as[w_index], weight, 4)
else:
self.assertEqual(WEIGHT, weight)
if d_index:
self.assertAlmostEqual(from_as[d_index], delay, 4)
else:
self.assertEqual(DELAY, delay)
as_index += 1
while as_index < len(aslist):
from_as = aslist[as_index]
assert from_as[0] >= sources or from_as[1] >= destinations
as_index += 1
def check_other_connect(
self, aslist, column_names=None, w_index=2, d_index=3, sources=6,
destinations=8):
sim.setup(1.0)
pop1 = sim.Population(sources, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(destinations, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=WEIGHT, delay=DELAY)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector(
aslist, column_names=column_names),
synapse_type=synapse_type)
sim.run(0)
self.check_weights(
projection, aslist, w_index, d_index, sources, destinations)
sim.end()
def test_simple(self):
as_list = [
(0, 0, 0.1, 10),
(3, 0, 0.2, 11),
(2, 3, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_list_too_big(self):
as_list = [
(0, 0, 0.1, 10),
(13, 0, 0.2, 11),
(2, 13, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_no_delays(self):
as_list = [
(0, 0, 0.1),
(3, 0, 0.2),
(2, 3, 0.3),
(5, 1, 0.4),
(0, 1, 0.5),
]
self.check_other_connect(
as_list, column_names=["weight"], d_index=None)
def test_no_weight(self):
as_list = [
(0, 0, 10),
(3, 0, 11),
(2, 3, 12),
(5, 1, 13),
(0, 1, 14),
]
self.check_other_connect(
as_list, column_names=["delay"], d_index=2, w_index=None)
def METHOD_NAME(self):
as_list = [
(0, 0, 10, 0.1),
(3, 0, 11, 0.2),
(2, 3, 12, 0.3),
(5, 1, 13, 0.4),
(0, 1, 14, 0.5),
]
self.check_other_connect(
as_list, column_names=["delay", "weight"], w_index=3, d_index=2)
def test_big(self):
sources = 200
destinations = 300
aslist = []
for s in range(sources):
for d in range(destinations):
aslist.append((s, d, 5, 2))
self.check_other_connect(
aslist, column_names=None, w_index=2, d_index=3, sources=sources,
destinations=destinations)
def test_get_before_run(self):
sim.setup(1.0)
pop1 = sim.Population(3, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(3, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=5, delay=1)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector([[0, 0]]),
synapse_type=synapse_type)
weights = projection.get(["weight"], "list")
sim.run(0)
self.assertEqual(1, len(weights))
sim.end()
def test_using_static_synapse_singles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=0.7, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.7, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def test_using_half_static_synapse_singles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0, 0.7), (1, 1, 0.3)]
conn = sim.Projection(input, pop, sim.FromListConnector(
as_list, column_names=["weight"]),
sim.StaticSynapse(weight=0.6, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def test_using_static_synapse_doubles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=[0.7, 0.3],
delay=[3, 33]))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 33)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
sim.end()
|
4,487 |
split args
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import ChainMap
from typing import List, Optional, Tuple
import numpy as np
from mxnet import gluon
from gluonts.core.component import validated
from gluonts.mx import Tensor
from . import Distribution
from .bijection import AffineTransformation
from .bijection_output import BijectionOutput
from .distribution_output import ArgProj, DistributionOutput
from .transformed_distribution import TransformedDistribution
class TransformedDistributionOutput(DistributionOutput):
r"""
Class to connect a network to a distribution that is transformed
by a sequence of learnable bijections.
"""
@validated()
def __init__(
self,
base_distr_output: DistributionOutput,
transforms_output: List[BijectionOutput],
) -> None:
super().__init__()
self.base_distr_output = base_distr_output
self.transforms_output = transforms_output
self.base_distr_args_dim = base_distr_output.args_dim
self.transforms_args_dim = [
transform.args_dim for transform in transforms_output
]
def _fuse(t1: Tuple, t2: Tuple) -> Tuple:
if len(t1) > len(t2):
t1, t2 = t2, t1
# from here on len(t2) >= len(t1)
assert t2[-len(t1) :] == t1
return t2
self._event_shape: Tuple[int, ...] = ()
for to in self.transforms_output:
self._event_shape = _fuse(self._event_shape, to.event_shape)
def get_args_proj(self, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
args_dim=dict(
self.base_distr_args_dim,
**dict(ChainMap(*self.transforms_args_dim)),
),
domain_map=gluon.nn.HybridLambda(self.domain_map),
prefix=prefix,
)
def METHOD_NAME(self, args):
# Since hybrid_forward does not support dictionary, we have to separate
# the raw outputs of the network based on the indices and map them to
# the learnable parameters
num_distr_args = len(self.base_distr_args_dim)
distr_args = args[0:num_distr_args]
num_transforms_args = [
len(transform_dim_args)
for transform_dim_args in self.transforms_args_dim
]
# starting indices of arguments for each transformation
num_args_cumsum = np.cumsum([num_distr_args] + num_transforms_args)
# get the arguments for each of the transformations
transforms_args = list(
map(
lambda ixs: args[ixs[0] : ixs[1]],
zip(num_args_cumsum, num_args_cumsum[1:]),
)
)
return distr_args, transforms_args
def domain_map(self, F, *args: Tensor):
distr_args, transforms_args = self.METHOD_NAME(args)
distr_params = self.base_distr_output.domain_map(F, *distr_args)
transforms_params = [
transform_output.domain_map(F, *transform_args)
for transform_output, transform_args in zip(
self.transforms_output, transforms_args
)
]
# flatten the nested tuple
return sum(tuple([distr_params] + transforms_params), ())
def distribution(
self,
distr_args,
loc: Optional[Tensor] = None,
scale: Optional[Tensor] = None,
) -> Distribution:
distr_args, transforms_args = self.METHOD_NAME(distr_args)
distr = self.base_distr_output.distr_cls(*distr_args)
transforms = [
transform_output.bij_cls(*bij_args)
for transform_output, bij_args in zip(
self.transforms_output, transforms_args
)
]
trans_distr = TransformedDistribution(distr, transforms)
# Apply scaling as well at the end if scale is not None!
if loc is None and scale is None:
return trans_distr
else:
return TransformedDistribution(
trans_distr, [AffineTransformation(loc=loc, scale=scale)]
)
@property
def event_shape(self) -> Tuple:
return self._event_shape
|
4,488 |
downgrade
|
"""Deprecate Session.speaker_bio.
Revision ID: 284c10efdbce
Revises: 2cc791c09075
Create Date: 2021-02-09 10:01:25.069803
"""
from textwrap import dedent
from typing import Optional, Tuple, Union
import progressbar.widgets
import sqlalchemy as sa
from alembic import op
from progressbar import ProgressBar
from sqlalchemy.sql import column, table
from coaster.utils import markdown
# revision identifiers, used by Alembic.
revision = '284c10efdbce'
down_revision = '2cc791c09075'
branch_labels: Optional[Union[str, Tuple[str, ...]]] = None
depends_on: Optional[Union[str, Tuple[str, ...]]] = None
session = table(
'session',
column('id', sa.Integer()),
column('description_text', sa.UnicodeText()),
column('description_html', sa.UnicodeText()),
column('speaker_bio_text', sa.UnicodeText()),
)
def get_progressbar(label, maxval):
return ProgressBar(
maxval=maxval,
widgets=[
label,
': ',
progressbar.widgets.Percentage(),
' ',
progressbar.widgets.Bar(),
' ',
progressbar.widgets.ETA(),
' ',
],
)
def session_description(row):
description = f"{row.description_text.strip()}\r\n"
if row.speaker_bio_text:
description += f"\r\n### Speaker bio\r\n\r\n{row.speaker_bio_text.strip()}\r\n"
return description
def upgrade() -> None:
conn = op.get_bind()
count = conn.scalar(sa.select(sa.func.count('*')).select_from(session))
progress = get_progressbar("Sessions", count)
progress.start()
items = conn.execute(session.select())
for counter, item in enumerate(items):
description_text = session_description(item)
description_html = markdown(description_text)
conn.execute(
sa.update(session)
.where(session.c.id == item.id)
.values(
description_text=description_text,
description_html=description_html,
)
)
progress.update(counter)
progress.finish()
op.execute(
sa.DDL(
dedent(
'''
UPDATE session SET search_vector = setweight(to_tsvector('english', COALESCE(title, '')), 'A') || setweight(to_tsvector('english', COALESCE(description_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(speaker, '')), 'A');
DROP TRIGGER session_search_vector_trigger ON session;
DROP FUNCTION session_search_vector_update();
CREATE FUNCTION session_search_vector_update() RETURNS trigger AS $$
BEGIN
NEW.search_vector := setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') || setweight(to_tsvector('english', COALESCE(NEW.description_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.speaker, '')), 'A');
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER session_search_vector_trigger BEFORE INSERT OR UPDATE ON session
FOR EACH ROW EXECUTE PROCEDURE session_search_vector_update();
'''
)
)
)
op.drop_column('session', 'speaker_bio_text')
op.drop_column('session', 'speaker_bio_html')
def METHOD_NAME() -> None:
op.add_column(
'session',
sa.Column(
'speaker_bio_html',
sa.TEXT(),
autoincrement=False,
nullable=False,
server_default='',
),
)
op.alter_column('session', 'speaker_bio_html', server_default=None)
op.add_column(
'session',
sa.Column(
'speaker_bio_text',
sa.TEXT(),
autoincrement=False,
nullable=False,
server_default='',
),
)
op.alter_column('session', 'speaker_bio_text', server_default=None)
op.execute(
sa.DDL(
dedent(
'''
UPDATE session SET search_vector = setweight(to_tsvector('english', COALESCE(title, '')), 'A') || setweight(to_tsvector('english', COALESCE(description_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(speaker_bio_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(speaker, '')), 'A');
DROP TRIGGER session_search_vector_trigger ON session;
DROP FUNCTION session_search_vector_update();
CREATE FUNCTION session_search_vector_update() RETURNS trigger AS $$
BEGIN
NEW.search_vector := setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') || setweight(to_tsvector('english', COALESCE(NEW.description_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.speaker_bio_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.speaker, '')), 'A');
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER session_search_vector_trigger BEFORE INSERT OR UPDATE ON session
FOR EACH ROW EXECUTE PROCEDURE session_search_vector_update();
'''
)
)
)
|
4,489 |
record stats
|
"""Utils function"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import numpy
import random
import deap.base
# pylint: disable=R0914, R0912
class WeightedReducedFitness(deap.base.Fitness):
"""Fitness that compares by weighted objective values"""
def __init__(self, values=(), obj_size=None, reduce_fcn=numpy.sum):
self.weights = [-1.0] * obj_size if obj_size is not None else [-1]
self.reduce_fcn = reduce_fcn
super(WeightedReducedFitness, self).__init__(values)
@property
def reduce(self):
"""Reduce of values"""
return self.reduce_fcn(self.values)
@property
def weighted_reduce(self):
"""Reduce of weighted values"""
return self.reduce_fcn(self.wvalues)
def __le__(self, other):
return self.weighted_reduce <= other.weighted_reduce
def __lt__(self, other):
return self.weighted_reduce < other.weighted_reduce
def __deepcopy__(self, _):
"""Override deepcopy"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
class WSListIndividual(list):
"""Individual consisting of a list with weighted fitness"""
def __init__(self, *args, **kwargs):
"""Constructor"""
reduce_fcn = kwargs.get("reduce_fcn", numpy.sum)
self.fitness = WeightedReducedFitness(
obj_size=kwargs["obj_size"], reduce_fcn=reduce_fcn
)
# Index of the parent, used by MO-CMA
self._ps = "p", 0
del kwargs["obj_size"]
if "reduce_fcn" in kwargs:
del kwargs["reduce_fcn"]
super(WSListIndividual, self).__init__(*args, **kwargs)
def update_history_and_hof(halloffame, history, population):
"""Update the hall of fame with the generated individuals
Note: History and Hall-of-Fame behave like dictionaries
"""
if halloffame is not None:
halloffame.update(population)
history.update(population)
def METHOD_NAME(stats, logbook, gen, population, invalid_count):
"""Update the statistics with the new population"""
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=invalid_count, **record)
def closest_feasible(individual, lbounds, ubounds):
"""Returns the closest individual in the parameter bounds"""
# TODO: Fix 1e-9 hack
for i, (u, l, el) in enumerate(zip(ubounds, lbounds, individual)):
if el >= u:
individual[i] = u - 1e-9
elif el <= l:
individual[i] = l + 1e-9
return individual
def bound(population, lbounds, ubounds):
"""Bounds the population based on lower and upper parameter bounds."""
n_out = 0
for i, ind in enumerate(population):
if numpy.any(numpy.less(ind, lbounds)) or numpy.any(
numpy.greater(ind, ubounds)
):
population[i] = closest_feasible(ind, lbounds, ubounds)
n_out += 1
return n_out
def uniform(lower_list, upper_list, dimensions):
"""Uniformly pick an individual"""
if hasattr(lower_list, "__iter__"):
return [
random.uniform(lower, upper) for lower, upper in
zip(lower_list, upper_list)
]
else:
return [random.uniform(lower_list, upper_list) for _ in
range(dimensions)]
def reduce_method(meth):
"""Overwrite reduce"""
return (getattr, (meth.__self__, meth.__func__.__name__))
def run_next_gen(criteria, terminator):
"""Condition to stay inside the loop."""
if terminator is None:
return criteria
return criteria and not terminator.is_set()
|
4,490 |
size
|
"""tensorflow.compat.v1 backend implementation"""
from packaging.version import Version
import tensorflow.compat.v1 as tf
if Version(tf.__version__) < Version("2.7.0"):
raise RuntimeError("DeepXDE requires TensorFlow>=2.7.0.")
# The major changes from TensorFlow 1.x to TensorFlow 2.x are:
# 1. Eager execution: enable_eager_execution(), disable_eager_execution()
# 2. Resource variables: enable_resource_variables(), disable_resource_variables()
# 3. Tensor shapes: enable_v2_tensorshape(), disable_v2_tensorshape()
# 4. Control flow: enable_control_flow_v2(), disable_control_flow_v2()
# 5. Tensors comparison: enable_tensor_equality(), disable_tensor_equality()
# 6. Some internal uses of tf.data symbols
# For more details, see
# - https://www.tensorflow.org/guide/migrate
# - the source code of disable_v2_behavior()
# We can simply disable all TensorFlow 2.x behaviors by disable_v2_behavior(), but some
# features in TensorFlow 2.x are useful such as `Tensor shapes`. Actually we use `Tensor
# shapes` in DeepXDE.
tf.disable_v2_behavior()
tf.enable_v2_tensorshape()
# In terms of functionality, we only need to disable eager mode.
# tf.disable_eager_execution()
# It hurts performance a lot (only in some cases?) if enabling tensor equality.
# tf.disable_tensor_equality()
# It hurts performance a little (only in some cases?) if enabling resource variables.
# tf.disable_resource_variables()
# It hurts performance a little (only in some cases?) if enabling control flow.
# tf.disable_control_flow_v2()
lib = tf
def data_type_dict():
return {
"float16": tf.float16,
"float32": tf.float32,
"float64": tf.float64,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"int32": tf.int32,
"int64": tf.int64,
"bool": tf.bool,
}
def is_gpu_available():
return bool(tf.config.list_physical_devices("GPU"))
def is_tensor(obj):
return tf.is_tensor(obj)
def shape(input_tensor):
return input_tensor.shape.as_list()
def METHOD_NAME(tensor):
return tf.get_static_value(tf.METHOD_NAME(tensor)).item()
def ndim(input_tensor):
return len(input_tensor.shape)
def transpose(tensor, axes=None):
return tf.transpose(tensor, perm=axes)
def reshape(tensor, shape):
return tf.reshape(tensor, shape)
def Variable(initial_value, dtype=None):
return tf.Variable(initial_value=initial_value, trainable=True, dtype=dtype)
def as_tensor(data, dtype=None):
if tf.is_tensor(data):
if dtype is None or data.dtype == dtype:
return data
return tf.cast(data, dtype)
return tf.convert_to_tensor(data, dtype=dtype)
def sparse_tensor(indices, values, shape):
return tf.sparse.SparseTensor(indices, values, shape)
def from_numpy(np_array):
# Do memory copy:
# https://stackoverflow.com/questions/47519802/does-tensorflow-convert-to-tensor-do-memory-copy
# To avoid memory copy, use implicit conversion, but memory copy is still possible.
# https://www.tensorflow.org/tutorials/customization/basics#numpy_compatibility
return tf.convert_to_tensor(np_array)
def concat(values, axis):
return tf.concat(values, axis)
def stack(values, axis):
return tf.stack(values, axis)
def expand_dims(tensor, axis):
return tf.expand_dims(tensor, axis)
def reverse(tensor, axis):
return tf.reverse(tensor, axis)
def roll(tensor, shift, axis):
return tf.roll(tensor, shift, axis)
def lgamma(x):
return tf.math.lgamma(x)
def elu(x):
return tf.nn.elu(x)
def relu(x):
return tf.nn.relu(x)
def selu(x):
return tf.nn.selu(x)
def sigmoid(x):
return tf.math.sigmoid(x)
def silu(x):
return tf.keras.activations.swish(x)
def sin(x):
return tf.math.sin(x)
def cos(x):
return tf.math.cos(x)
def exp(x):
return tf.math.exp(x)
def square(x):
return tf.math.square(x)
# pylint: disable=redefined-builtin
def abs(x):
return tf.math.abs(x)
def minimum(x, y):
return tf.math.minimum(x, y)
def tanh(x):
return tf.math.tanh(x)
def pow(x, y):
return tf.math.pow(x, y)
def mean(input_tensor, dim, keepdims=False):
return tf.math.reduce_mean(input_tensor, axis=dim, keepdims=keepdims)
def reduce_mean(input_tensor):
return tf.math.reduce_mean(input_tensor)
def sum(input_tensor, dim, keepdims=False):
return tf.math.reduce_sum(input_tensor, axis=dim, keepdims=keepdims)
def reduce_sum(input_tensor):
return tf.math.reduce_sum(input_tensor)
def prod(input_tensor, dim, keepdims=False):
return tf.math.reduce_prod(input_tensor, axis=dim, keepdims=keepdims)
def reduce_prod(input_tensor):
return tf.math.reduce_prod(input_tensor)
# pylint: disable=redefined-builtin
def min(input_tensor, dim, keepdims=False):
return tf.math.reduce_min(input_tensor, axis=dim, keepdims=keepdims)
def reduce_min(input_tensor):
return tf.math.reduce_min(input_tensor)
# pylint: disable=redefined-builtin
def max(input_tensor, dim, keepdims=False):
return tf.math.reduce_max(input_tensor, axis=dim, keepdims=keepdims)
def reduce_max(input_tensor):
return tf.math.reduce_max(input_tensor)
def norm(tensor, ord=None, axis=None, keepdims=False):
if ord is None:
ord = "euclidean"
return tf.norm(tensor, ord=ord, axis=axis, keepdims=keepdims)
def zeros(shape, dtype):
return tf.zeros(shape, dtype=dtype)
def zeros_like(input_tensor):
return tf.zeros_like(input_tensor)
def matmul(x, y):
return tf.linalg.matmul(x, y)
def sparse_dense_matmul(x, y):
return tf.sparse.sparse_dense_matmul(x, y)
|
4,491 |
fin
|
from unittest import mock
import pytest
import requests
from settings import TEST_DATA
from suite.utils.resources_utils import (
create_example_app,
create_items_from_yaml,
create_namespace_with_name_from_yaml,
delete_namespace,
ensure_connection_to_public_endpoint,
ensure_response_from_backend,
wait_before_test,
wait_until_all_pods_are_ready,
)
from suite.utils.yaml_utils import get_first_ingress_host_from_yaml
class BackendSetup:
"""
Encapsulate the example details.
Attributes:
req_url (str):
ingress_hosts (dict):
"""
def __init__(self, req_url, ingress_hosts):
self.req_url = req_url
self.ingress_hosts = ingress_hosts
@pytest.fixture(scope="class")
def backend_setup(request, kube_apis, ingress_controller_endpoint) -> BackendSetup:
"""
Create 2 namespaces and deploy simple applications in them.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:return: BackendSetup
"""
watched_namespace = create_namespace_with_name_from_yaml(kube_apis.v1, f"watched-ns", f"{TEST_DATA}/common/ns.yaml")
foreign_namespace = create_namespace_with_name_from_yaml(kube_apis.v1, f"foreign-ns", f"{TEST_DATA}/common/ns.yaml")
watched_namespace2 = create_namespace_with_name_from_yaml(
kube_apis.v1, f"watched-ns2", f"{TEST_DATA}/common/ns.yaml"
)
ingress_hosts = {}
for ns in [watched_namespace, foreign_namespace, watched_namespace2]:
print(f"------------------------- Deploy the backend in {ns} -----------------------------------")
create_example_app(kube_apis, "simple", ns)
src_ing_yaml = f"{TEST_DATA}/watch-namespace/{ns}-ingress.yaml"
create_items_from_yaml(kube_apis, src_ing_yaml, ns)
ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml)
ingress_hosts[f"{ns}-ingress"] = ingress_host
req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}/backend1"
wait_until_all_pods_are_ready(kube_apis.v1, ns)
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl,
)
def METHOD_NAME():
if request.config.getoption("--skip-fixture-teardown") == "no":
print("Clean up:")
delete_namespace(kube_apis.v1, watched_namespace)
delete_namespace(kube_apis.v1, foreign_namespace)
delete_namespace(kube_apis.v1, watched_namespace2)
request.addfinalizer(METHOD_NAME)
return BackendSetup(req_url, ingress_hosts)
@pytest.mark.ingresses
@pytest.mark.parametrize(
"ingress_controller, expected_responses",
[
pytest.param(
{"extra_args": ["-watch-namespace=watched-ns"]}, {"watched-ns-ingress": 200, "foreign-ns-ingress": 404}
)
],
indirect=["ingress_controller"],
)
class TestWatchNamespace:
def test_response_codes(self, ingress_controller, backend_setup, expected_responses):
for ing in ["watched-ns-ingress", "foreign-ns-ingress"]:
ensure_response_from_backend(backend_setup.req_url, backend_setup.ingress_hosts[ing])
resp = requests.get(backend_setup.req_url, headers={"host": backend_setup.ingress_hosts[ing]})
assert (
resp.status_code == expected_responses[ing]
), f"Expected: {expected_responses[ing]} response code for {backend_setup.ingress_hosts[ing]}"
@pytest.mark.ingresses
@pytest.mark.parametrize(
"ingress_controller, expected_responses",
[
pytest.param(
{"extra_args": ["-watch-namespace=watched-ns,watched-ns2"]},
{"watched-ns-ingress": 200, "watched-ns2-ingress": 200, "foreign-ns-ingress": 404},
)
],
indirect=["ingress_controller"],
)
class TestWatchMultipleNamespaces:
def test_response_codes(self, ingress_controller, backend_setup, expected_responses):
for ing in ["watched-ns-ingress", "watched-ns2-ingress", "foreign-ns-ingress"]:
ensure_response_from_backend(backend_setup.req_url, backend_setup.ingress_hosts[ing])
resp = mock.Mock()
resp.status_code = "None"
retry = 0
while resp.status_code != expected_responses[ing] and retry < 3:
resp = requests.get(backend_setup.req_url, headers={"host": backend_setup.ingress_hosts[ing]})
retry = +1
wait_before_test()
assert (
resp.status_code == expected_responses[ing]
), f"Expected: {expected_responses[ing]} response code for {backend_setup.ingress_hosts[ing]}"
|
4,492 |
unpack
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import struct
from typing import List, Union
import attr
from pyspark import SparkContext
from shapely.wkb import loads
DOUBLE_SIZE = 8
INT_SIZE = 4
BYTE_SIZE = 1
CHAR_SIZE = 1
BOOLEAN_SIZE = 1
size_dict = {
"d": DOUBLE_SIZE,
"i": INT_SIZE,
"b": BYTE_SIZE,
"s": CHAR_SIZE,
"?": BOOLEAN_SIZE
}
@attr.s
class BinaryParser:
bytes = attr.ib(type=Union[bytearray, List[int]])
current_index = attr.ib(default=0)
def __attrs_post_init__(self):
no_negatives = self.remove_negatives(self.bytes)
self.bytes = self._convert_to_binary_array(no_negatives)
def read_geometry(self, length: int):
geom_bytes = b"".join([struct.pack("b", el) if el < 128 else struct.pack("b", el - 256) for el in
self.bytes[self.current_index: self.current_index + length]])
geom = loads(geom_bytes)
self.current_index += length
return geom
def read_double(self):
data = self.METHOD_NAME("d", self.bytes)
self.current_index = self.current_index + DOUBLE_SIZE
return data
def read_double_reverse(self):
data = self.unpack_reverse("d", self.bytes)
self.current_index = self.current_index + DOUBLE_SIZE
return data
def read_int(self):
data = self.METHOD_NAME("i", self.bytes)
self.current_index = self.current_index + INT_SIZE
return data
def read_byte(self):
data = self.METHOD_NAME("b", self.bytes)
self.current_index = self.current_index + BYTE_SIZE
return data
def read_char(self):
data = self.METHOD_NAME("c", self.bytes)
self.current_index = self.current_index + CHAR_SIZE
return data
def read_boolean(self):
data = self.METHOD_NAME("?", self.bytes)
self.current_index = self.current_index + BOOLEAN_SIZE
return data
def read_string(self, length: int, encoding: str = "utf8"):
string = self.bytes[self.current_index: self.current_index + length]
self.current_index += length
try:
encoded_string = string.decode(encoding, "ignore")
except UnicodeEncodeError:
raise UnicodeEncodeError
return encoded_string
def read_kryo_string(self, length: int, sc: SparkContext) -> str:
array_length = length - self.current_index
byte_array = sc._gateway.new_array(sc._jvm.Byte, array_length)
for index, bt in enumerate(self.bytes[self.current_index: length]):
byte_array[index] = self.bytes[self.current_index + index]
decoded_string = sc._jvm.org.imbruced.geo_pyspark.serializers.GeoSerializerData.deserializeUserData(
byte_array
)
self.current_index = length
return decoded_string
def METHOD_NAME(self, tp: str, bytes: bytearray):
max_index = self.current_index + size_dict[tp]
bytes = self._convert_to_binary_array(bytes[self.current_index: max_index])
return struct.METHOD_NAME(tp, bytes)[0]
def unpack_reverse(self, tp: str, bytes: bytearray):
max_index = self.current_index + size_dict[tp]
bytes = bytearray(reversed(self._convert_to_binary_array(bytes[self.current_index: max_index])))
return struct.METHOD_NAME(tp, bytes)[0]
@classmethod
def remove_negatives(cls, bytes):
return [cls.remove_negative(bt) for bt in bytes]
@classmethod
def remove_negative(cls, byte):
bt_pos = byte if byte >= 0 else byte + 256
return bt_pos
@staticmethod
def _convert_to_binary_array(bytes):
if type(bytes) == list:
bytes = bytearray(bytes)
return bytes
class BinaryBuffer:
def __init__(self):
self.array = []
def put_double(self, value):
bytes = self.__pack("d", value)
self.__extend_buffer(bytes)
def put_int(self, value):
bytes = self.__pack("i", value)
self.__extend_buffer(bytes)
def put_byte(self, value):
bytes = self.__pack("b", value)
self.__extend_buffer(bytes)
def put(self, value):
self.__extend_buffer(value)
def __pack(self, type, value):
return struct.pack(type, value)
def __extend_buffer(self, bytes):
self.array.extend(list(bytes))
def __translate_values(self, values):
return [el if el < 128 else el - 256 for el in values]
def add_empty_bytes(self, tp: str, number_of_empty):
if tp == "double":
for _ in range(number_of_empty):
self.put_double(0.0)
elif tp == "int":
for _ in range(number_of_empty):
self.put_int(0)
elif tp == "double":
for _ in range(number_of_empty):
self.put_byte(0)
else:
raise TypeError(f"Passed {tp} is not available")
@property
def byte_array(self):
return self.__translate_values(self.array)
|
4,493 |
project
|
import logging
import os
import re
import json
from typing import List, Any
from datetime import datetime
from github.Issue import Issue
from github.Repository import Repository
import requests
from azure.devops.v6_0.pipelines.pipelines_client import PipelinesClient
from azure.devops.v6_0.pipelines import models
from msrest.authentication import BasicAuthentication
from packaging.version import parse as Version
from urllib3 import Retry, PoolManager
REQUEST_REPO = 'Azure/sdk-release-request'
REST_REPO = 'Azure/azure-rest-api-specs'
AUTO_ASSIGN_LABEL = 'assigned'
AUTO_PARSE_LABEL = 'auto-link'
AUTO_CLOSE_LABEL = 'auto-close'
MULTI_LINK_LABEL = 'MultiLink'
INCONSISTENT_TAG = 'Inconsistent tag'
_LOG = logging.getLogger(__name__)
def get_origin_link_and_tag(issue_body_list: List[str]) -> (str, str):
link, readme_tag = '', ''
for row in issue_body_list:
if 'link' in row.lower() and 'release request' not in row.lower() and link == '':
link = row.split(":", 1)[-1].strip()
if 'readme tag' in row.lower() and readme_tag == '':
readme_tag = row.split(":", 1)[-1].strip()
if link and readme_tag:
break
if link.count('https') > 1:
link = link.split(']')[0]
link = link.replace('[', "").replace(']', "").replace('(', "").replace(')', "")
return link, readme_tag
def to_datetime(data: str) -> datetime:
return datetime.strptime(data, '%Y-%m-%dT%H:%M:%S')
def get_last_released_date(package_name: str) -> (str, datetime):
try:
pypi = PyPIClient()
latest_release, latest_stable = pypi.get_relevant_versions(package_name)
latest_release_date = pypi.project_release(package_name, latest_release)["urls"][0]["upload_time"]
latest_stable_date = pypi.project_release(package_name, latest_stable)["urls"][0]["upload_time"]
if latest_release_date > latest_stable_date:
return str(latest_release), to_datetime(latest_release_date)
return str(latest_stable), to_datetime(latest_stable_date)
except:
return '', to_datetime('1970-01-01T00:00:00')
# get python release pipeline link from web
def get_python_release_pipeline(output_folder):
pipeline_client = PipelinesClient(base_url='https://dev.azure.com/azure-sdk',
creds=BasicAuthentication(os.getenv('PIPELINE_TOKEN'), ''))
pipelines = pipeline_client.list_pipelines(METHOD_NAME='internal')
for pipeline in pipelines:
if re.findall('^python - \w*$', pipeline.name):
key = pipeline.name.replace('python - ', '')
if key == output_folder:
pipeline_url = 'https://dev.azure.com/azure-sdk/internal/_build?definitionId={}'.format(pipeline.id)
return pipeline_url
else:
_LOG.info('Cannot find definitionId, Do not display pipeline_url')
return ''
# Run sdk-auto-release(main) to generate SDK
def run_pipeline(issue_link, pipeline_url, spec_readme, python_tag="", rest_repo_hash="", target_date=""):
paramaters = {
"stages_to_skip": [],
"resources": {
"repositories": {
"self": {
"refName": "refs/heads/main"
}
}
},
"variables": {
"BASE_BRANCH": {
"value": "",
"isSecret": False
},
"ISSUE_LINK": {
"value": issue_link,
"isSecret": False
},
"PIPELINE_LINK": {
"value": pipeline_url,
"isSecret": False
},
"SPEC_README": {
"value": spec_readme,
"isSecret": False
},
"PYTHON_TAG": {
"value": python_tag,
"isSecret": False
},
"REST_REPO_HASH": {
"value": rest_repo_hash,
"isSecret": False
},
"TARGET_DATE": {
"value": target_date,
"isSecret": False
}
}
}
# Fill in with your personal access token and org URL
personal_access_token = os.getenv('PIPELINE_TOKEN')
organization_url = 'https://dev.azure.com/azure-sdk'
# Create a connection to the org
credentials = BasicAuthentication('', personal_access_token)
run_parameters = models.RunPipelineParameters(**paramaters)
client = PipelinesClient(base_url=organization_url, creds=credentials)
result = client.run_pipeline(METHOD_NAME='internal', pipeline_id=2500, run_parameters=run_parameters)
return result.state == 'inProgress'
def record_release(package_name: str, issue_info: Any, file: str, version: str) -> None:
created_at = issue_info.created_at.strftime('%Y-%m-%d')
closed_at = issue_info.closed_at.strftime('%Y-%m-%d')
assignee = issue_info.assignee.login
author = issue_info.user.login
link = issue_info.html_url
is_stable = True if 'b' not in version else ''
closed_issue_info = f'{package_name},{author},{assignee},{created_at},{closed_at},{link},{version},{is_stable}\n'
with open(file, 'r') as file_read:
lines = file_read.readlines()
with open(file, 'w') as file_write:
lines.insert(1, closed_issue_info)
file_write.writelines(lines)
class IssuePackage:
issue = None # origin issue instance
rest_repo = None # repo instance: Azure/azure-rest-api-specs
labels_name = {} # name set of issue labels
def __init__(self, issue: Issue, rest_repo: Repository):
self.issue = issue
self.rest_repo = rest_repo
self.labels_name = {label.name for label in issue.labels}
class PyPIClient:
def __init__(self, host="https://pypi.org"):
self._host = host
self._http = PoolManager(
retries=Retry(total=3, raise_on_status=True), ca_certs=os.getenv("REQUESTS_CA_BUNDLE", None)
)
def METHOD_NAME(self, package_name):
response = self._http.request(
"get", "{host}/pypi/{project_name}/json".format(host=self._host, project_name=package_name)
)
return json.loads(response.data.decode("utf-8"))
def project_release(self, package_name, version):
response = self._http.request(
"get",
"{host}/pypi/{project_name}/{version}/json".format(
host=self._host, project_name=package_name, version=version
),
)
return json.loads(response.data.decode("utf-8"))
def get_ordered_versions(self, package_name) -> List[Version]:
METHOD_NAME = self.METHOD_NAME(package_name)
versions = [Version(package_version) for package_version in METHOD_NAME["releases"].keys()]
versions.sort()
return versions
def get_relevant_versions(self, package_name):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
if pre_releases:
return versions[-1], pre_releases[-1]
return versions[-1], versions[-1]
|
4,494 |
check format
|
from __future__ import absolute_import, division, print_function
from wxtbx import phil_controls
import wxtbx
from libtbx.utils import Abort, to_unicode, to_str
from libtbx import Auto
import wx
import sys
class ValidatedTextCtrl(wx.TextCtrl, phil_controls.PhilCtrl):
def __init__(self, *args, **kwds):
saved_value = None
if (kwds.get('value', "") != ""):
saved_value = kwds['value']
kwds['value'] = ""
super(ValidatedTextCtrl, self).__init__(*args, **kwds)
font = wx.Font(wxtbx.default_font_size, wx.MODERN, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
style = self.GetWindowStyle()
if (not style & wx.TE_PROCESS_ENTER):
style |= wx.TE_PROCESS_ENTER
self.SetWindowStyle(style)
self.SetValidator(self.CreateValidator())
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self)
self.Bind(wx.EVT_KILL_FOCUS, self.OnFocusLost, self)
if saved_value is not None:
if (type(saved_value) == str):
save_value = to_unicode(saved_value)
self.SetValue(saved_value)
def GetValue(self):
try:
val = wx.TextCtrl.GetValue(self)
except Exception as e:
val = "" # Probably C++ object deleted
if wxtbx.is_unicode_build():
return to_str(val)
else :
assert isinstance(val, str)
return val
def OnEnter(self, evt=None):
#self.Validate()
self.DoSendEvent()
def OnFocusLost(self, event):
self.DoSendEvent()
event.Skip()
def CreateValidator(self):
raise NotImplementedError()
def Validate(self):
# XXX why doesn't self.Validate() work?
if self.GetValidator().Validate(self.GetParent()):
return True
else :
raise Abort()
def FormatValue(self, value):
raise NotImplementedError()
def GetPhilValue(self):
raise NotImplementedError()
def GetStringValue(self):
value = self.GetPhilValue()
if (value is not None) and (value is not Auto):
return self.FormatValue(value)
elif (self.UseAuto()) or (value is Auto):
return Auto
return None
def Enable(self, enable=True):
wx.TextCtrl.Enable(self, enable)
if enable :
self.SetBackgroundColour((255,255,255))
else :
self.SetBackgroundColour((200,200,200))
Validator = wx.Validator
if wx.VERSION < (4, 0):
Validator = wx.PyValidator
class TextCtrlValidator(Validator):
def __init__(self):
Validator.__init__(self)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
def Clone(self):
return self.__class__()
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
def METHOD_NAME(self, value):
raise NotImplementedError()
def Validate(self, win):
ctrl = self.GetWindow()
try :
value = to_unicode(ctrl.GetValue())
# if isinstance(value, str):
# value = value.decode("utf-8")
if (value == ""):
ctrl.SetBackgroundColour(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
return True
reformatted = self.METHOD_NAME(value)
if isinstance(reformatted, str):
reformatted = to_unicode(reformatted)
ctrl.SetValue(reformatted)
ctrl.SetBackgroundColour(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
#ctrl.SetFocus()
ctrl.Refresh()
return True
except NotImplementedError :
raise
except Exception as e :
ctrl_name = str(ctrl.GetName())
msg = "Inappropriate value given for \"%s\": %s" %(ctrl_name,str(e))
if (type(e).__name__ == "UnicodeEncodeError"):
msg = ("You have entered characters which cannot be converted to "+
"Latin characters in the control '%s'; due to limitations of the "+
"underlying code, only the standard UTF-8 character set is "+
"allowed.") % ctrl_name
wx.MessageBox(caption="Format error", message=msg)
ctrl.SetBackgroundColour("red")
# Don't set focus on Windows since messagebox is modal and thus
# would automatically recapture focus leading to an endless UI loop
if (sys.platform != 'win32'):
ctrl.SetFocus()
ctrl.Refresh()
return False
def OnEnter(self, event):
#self.Validate(None)
ctrl = self.GetWindow()
ctrl.DoSendEvent()
|
4,495 |
compile
|
"""
Soup Sieve.
A CSS selector filter for BeautifulSoup4.
MIT License
Copyright (c) 2018 Isaac Muse
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .__meta__ import __version__, __version_info__ # noqa: F401
from . import css_parser as cp
from . import css_match as cm
from . import css_types as ct
from .util import DEBUG, SelectorSyntaxError # noqa: F401
import bs4 # type: ignore[import]
from typing import Dict, Optional, Any, List, Iterator, Iterable
__all__ = (
'DEBUG', 'SelectorSyntaxError', 'SoupSieve',
'closest', 'compile', 'filter', 'iselect',
'match', 'select', 'select_one'
)
SoupSieve = cm.SoupSieve
def METHOD_NAME( # noqa: A001
pattern: str,
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> cm.SoupSieve:
"""Compile CSS pattern."""
ns = ct.Namespaces(namespaces) if namespaces is not None else namespaces # type: Optional[ct.Namespaces]
cs = ct.CustomSelectors(custom) if custom is not None else custom # type: Optional[ct.CustomSelectors]
if isinstance(pattern, SoupSieve):
if flags:
raise ValueError("Cannot process 'flags' argument on a compiled selector list")
elif namespaces is not None:
raise ValueError("Cannot process 'namespaces' argument on a compiled selector list")
elif custom is not None:
raise ValueError("Cannot process 'custom' argument on a compiled selector list")
return pattern
return cp._cached_css_compile(pattern, ns, cs, flags)
def purge() -> None:
"""Purge cached patterns."""
cp._purge_cache()
def closest(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> 'bs4.Tag':
"""Match closest ancestor."""
return METHOD_NAME(select, namespaces, flags, **kwargs).closest(tag)
def match(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> bool:
"""Match node."""
return METHOD_NAME(select, namespaces, flags, **kwargs).match(tag)
def filter( # noqa: A001
select: str,
iterable: Iterable['bs4.Tag'],
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> List['bs4.Tag']:
"""Filter list of nodes."""
return METHOD_NAME(select, namespaces, flags, **kwargs).filter(iterable)
def select_one(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> 'bs4.Tag':
"""Select a single tag."""
return METHOD_NAME(select, namespaces, flags, **kwargs).select_one(tag)
def select(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
limit: int = 0,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> List['bs4.Tag']:
"""Select the specified tags."""
return METHOD_NAME(select, namespaces, flags, **kwargs).select(tag, limit)
def iselect(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
limit: int = 0,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> Iterator['bs4.Tag']:
"""Iterate the specified tags."""
for el in METHOD_NAME(select, namespaces, flags, **kwargs).iselect(tag, limit):
yield el
def escape(ident: str) -> str:
"""Escape identifier."""
return cp.escape(ident)
|
4,496 |
send finding to webex
|
import tempfile
from enum import Enum
from webexteamssdk import WebexTeamsAPI
from robusta.core.reporting.base import Finding, FindingSeverity
from robusta.core.reporting.blocks import BaseBlock, FileBlock, List, MarkdownBlock, TableBlock
from robusta.core.reporting.utils import SVG_SUFFIX, add_pngs_for_all_svgs
from robusta.core.sinks.transformer import Transformer
from robusta.core.sinks.webex.webex_sink_params import WebexSinkParams
INVESTIGATE_ICON = "🔍"
SILENCE_ICON = "🔕"
MAX_BLOCK_CHARS = 7439
ADAPTIVE_CARD_VERSION = "1.2"
ADAPTIVE_CARD_SCHEMA = "http://adaptivecards.io/schemas/adaptive-card.json"
ATTACHMENT_CONTENT_TYPE = "application/vnd.microsoft.card.adaptive"
class CardTypes(Enum):
ADAPTIVE_CARD = "AdaptiveCard"
class WebexSender:
"""
Send findings to webex.
Parse different findings to show on Webex UI
"""
def __init__(
self, bot_access_token: str, room_id: str, cluster_name: str, account_id: str, webex_params: WebexSinkParams
):
self.cluster_name = cluster_name
self.webex_params = webex_params
self.room_id = room_id
self.account_id = account_id
self.client = WebexTeamsAPI(access_token=bot_access_token) # Create a client using webexteamssdk
def METHOD_NAME(self, finding: Finding, platform_enabled: bool):
message, table_blocks, file_blocks, description = self._separate_blocks(finding, platform_enabled)
adaptive_card_body = self._createAdaptiveCardBody(message, table_blocks, description)
adaptive_card = self._createAdaptiveCard(adaptive_card_body)
attachment = [
{
"contentType": ATTACHMENT_CONTENT_TYPE,
"content": adaptive_card,
}
]
# Here text="." is added because Webex API throws error to add text/file/markdown
self.client.messages.create(roomId=self.room_id, text=".", attachments=attachment)
if file_blocks:
self._send_files(file_blocks)
def _createAdaptiveCardBody(self, message_content, table_blocks: List[TableBlock], description):
body = []
message_content_json = self._createMessageContentJSON(message_content, description)
body.append(message_content_json)
if table_blocks:
self._createTableBlockJSON(table_blocks, body)
return body
def _createTableBlockJSON(self, table_blocks: List[TableBlock], body: list):
for block in table_blocks:
container = {
"type": "Container",
"items": [{"type": "ColumnSet", "columns": []}],
}
for header in block.headers:
container["items"][0]["columns"].append(
{
"type": "Column",
"width": "stretch",
"items": [{"type": "TextBlock", "text": header, "wrap": "true"}],
}
)
# seperating each row to add below headers of column
rows = block.render_rows()
for row in rows:
row_json = {"type": "ColumnSet", "columns": []}
for text in row:
row_json["columns"].append(
{
"type": "Column",
"width": "stretch",
"items": [{"type": "TextBlock", "text": text, "wrap": "true"}],
}
)
container["items"].append(row_json)
body.append(container)
def _createMessageContentJSON(self, message_content, description):
message_content_container = {
"type": "Container",
"items": [
{"type": "TextBlock", "text": message_content, "wrap": "true"},
{"type": "TextBlock", "text": description, "wrap": "true"},
],
}
return message_content_container
def _createAdaptiveCard(self, blocks):
# https://learn.microsoft.com/en-us/adaptive-cards/
# metadata for adaptive cards
adaptive_card = {
"type": CardTypes.ADAPTIVE_CARD.value,
"$schema": ADAPTIVE_CARD_SCHEMA,
"version": ADAPTIVE_CARD_VERSION,
}
# Creating a container from message_content and description of finding for adaptive card
adaptive_card["body"] = blocks
return adaptive_card
def _separate_blocks(self, finding: Finding, platform_enabled: bool):
table_blocks: List[TableBlock] = []
file_blocks: List[FileBlock] = []
description = None
message_content = self._create_message_content(finding, platform_enabled)
blocks = [MarkdownBlock(text=f"*Source:* _{self.cluster_name}_\n\n")]
# Seperate blocks into *Other* Blocks, TableBlocks and FileBlocks
for enrichment in finding.enrichments:
blocks.extend([block for block in enrichment.blocks if self.__is_webex_text_block(block)])
table_blocks.extend([block for block in enrichment.blocks if isinstance(block, TableBlock)])
file_blocks.extend(
add_pngs_for_all_svgs([block for block in enrichment.blocks if isinstance(block, FileBlock)])
)
if not self.webex_params.send_svg:
file_blocks = [b for b in file_blocks if not b.filename.endswith(".svg")]
# first add finding description block
if finding.description:
if table_blocks:
description = finding.description
else:
blocks.append(MarkdownBlock(finding.description))
# Convert *Other* blocks to markdown
for block in blocks:
block_text = Transformer.to_standard_markdown([block])
if len(block_text) + len(message_content) >= MAX_BLOCK_CHARS: # webex message size limit
break
message_content += block_text + "\n"
return message_content, table_blocks, file_blocks, description
def _send_files(self, files: List[FileBlock]):
# Webex allows for only one file attachment per message
# This function sends the files individually to webex
for block in files:
suffix = "." + block.filename.split(".")[1]
if suffix != SVG_SUFFIX:
with tempfile.NamedTemporaryFile(suffix=suffix, delete=True) as f:
f.write(block.contents)
f.flush()
self.client.messages.create(
roomId=self.room_id,
files=[f.name],
)
f.close() # File is deleted when closed
def _create_message_content(self, finding: Finding, platform_enabled: bool):
message_content = self.__build_webex_title(finding.title, finding.severity)
if platform_enabled:
message_content += (
f"[{INVESTIGATE_ICON} Investigate]({finding.get_investigate_uri(self.account_id, self.cluster_name)}) "
)
if finding.add_silence_url:
message_content += f"[{SILENCE_ICON} Silence]({finding.get_prometheus_silence_url(self.account_id, self.cluster_name)})"
message_content += "\n\n"
return message_content
@classmethod
def __is_webex_text_block(cls, block: BaseBlock) -> bool:
return not (isinstance(block, FileBlock) or isinstance(block, TableBlock))
@classmethod
def __build_webex_title(cls, title: str, severity: FindingSeverity) -> str:
icon = FindingSeverity.to_emoji(severity)
return f"{icon} **{severity.name} - {title}**\n\n"
|
4,497 |
deleterule
|
from typing_extensions import Literal, TypeAlias
_Key: TypeAlias = bytes | str | memoryview
ADD_CMD: Literal["TS.ADD"]
ALTER_CMD: Literal["TS.ALTER"]
CREATERULE_CMD: Literal["TS.CREATERULE"]
CREATE_CMD: Literal["TS.CREATE"]
DECRBY_CMD: Literal["TS.DECRBY"]
DELETERULE_CMD: Literal["TS.DELETERULE"]
DEL_CMD: Literal["TS.DEL"]
GET_CMD: Literal["TS.GET"]
INCRBY_CMD: Literal["TS.INCRBY"]
INFO_CMD: Literal["TS.INFO"]
MADD_CMD: Literal["TS.MADD"]
MGET_CMD: Literal["TS.MGET"]
MRANGE_CMD: Literal["TS.MRANGE"]
MREVRANGE_CMD: Literal["TS.MREVRANGE"]
QUERYINDEX_CMD: Literal["TS.QUERYINDEX"]
RANGE_CMD: Literal["TS.RANGE"]
REVRANGE_CMD: Literal["TS.REVRANGE"]
class TimeSeriesCommands:
def create(
self,
key: _Key,
retention_msecs: int | None = None,
uncompressed: bool | None = False,
labels: dict[str, str] | None = None,
chunk_size: int | None = None,
duplicate_policy: str | None = None,
): ...
def alter(
self,
key: _Key,
retention_msecs: int | None = None,
labels: dict[str, str] | None = None,
chunk_size: int | None = None,
duplicate_policy: str | None = None,
): ...
def add(
self,
key: _Key,
timestamp: int | str,
value: float,
retention_msecs: int | None = None,
uncompressed: bool | None = False,
labels: dict[str, str] | None = None,
chunk_size: int | None = None,
duplicate_policy: str | None = None,
): ...
def madd(self, ktv_tuples): ...
def incrby(
self,
key: _Key,
value: float,
timestamp: int | str | None = None,
retention_msecs: int | None = None,
uncompressed: bool | None = False,
labels: dict[str, str] | None = None,
chunk_size: int | None = None,
): ...
def decrby(
self,
key: _Key,
value: float,
timestamp: int | str | None = None,
retention_msecs: int | None = None,
uncompressed: bool | None = False,
labels: dict[str, str] | None = None,
chunk_size: int | None = None,
): ...
def delete(self, key, from_time, to_time): ...
def createrule(
self, source_key: _Key, dest_key: _Key, aggregation_type: str, bucket_size_msec: int, align_timestamp: int | None = None
): ...
def METHOD_NAME(self, source_key, dest_key): ...
def range(
self,
key: _Key,
from_time: int | str,
to_time: int | str,
count: int | None = None,
aggregation_type: str | None = None,
bucket_size_msec: int | None = 0,
filter_by_ts: list[int] | None = None,
filter_by_min_value: int | None = None,
filter_by_max_value: int | None = None,
align: int | str | None = None,
latest: bool | None = False,
bucket_timestamp: str | None = None,
empty: bool | None = False,
): ...
def revrange(
self,
key: _Key,
from_time: int | str,
to_time: int | str,
count: int | None = None,
aggregation_type: str | None = None,
bucket_size_msec: int | None = 0,
filter_by_ts: list[int] | None = None,
filter_by_min_value: int | None = None,
filter_by_max_value: int | None = None,
align: int | str | None = None,
latest: bool | None = False,
bucket_timestamp: str | None = None,
empty: bool | None = False,
): ...
def mrange(
self,
from_time: int | str,
to_time: int | str,
filters: list[str],
count: int | None = None,
aggregation_type: str | None = None,
bucket_size_msec: int | None = 0,
with_labels: bool | None = False,
filter_by_ts: list[int] | None = None,
filter_by_min_value: int | None = None,
filter_by_max_value: int | None = None,
groupby: str | None = None,
reduce: str | None = None,
select_labels: list[str] | None = None,
align: int | str | None = None,
latest: bool | None = False,
bucket_timestamp: str | None = None,
empty: bool | None = False,
): ...
def mrevrange(
self,
from_time: int | str,
to_time: int | str,
filters: list[str],
count: int | None = None,
aggregation_type: str | None = None,
bucket_size_msec: int | None = 0,
with_labels: bool | None = False,
filter_by_ts: list[int] | None = None,
filter_by_min_value: int | None = None,
filter_by_max_value: int | None = None,
groupby: str | None = None,
reduce: str | None = None,
select_labels: list[str] | None = None,
align: int | str | None = None,
latest: bool | None = False,
bucket_timestamp: str | None = None,
empty: bool | None = False,
): ...
def get(self, key: _Key, latest: bool | None = False): ...
def mget(
self,
filters: list[str],
with_labels: bool | None = False,
select_labels: list[str] | None = None,
latest: bool | None = False,
): ...
def info(self, key): ...
def queryindex(self, filters): ...
|
4,498 |
enable rate limit
|
import math
import time
import threading
import uuid
from pprint import pformat # noqa
from banal import hash_data
from datetime import datetime
from flask_babel import get_locale
from flask import request, Response, Blueprint
from werkzeug.exceptions import TooManyRequests
import structlog
from structlog.contextvars import clear_contextvars, bind_contextvars
from aleph import __version__
from aleph.queues import get_rate_limit
from aleph.settings import SETTINGS
from aleph.authz import Authz
from aleph.model import Role
log = structlog.get_logger(__name__)
local = threading.local()
blueprint = Blueprint("context", __name__)
class NotModified(Exception):
"""Converts to HTTP status 304."""
pass
def handle_not_modified(exc):
return Response(status=304)
def tag_request(**kwargs):
"""Store metadata for structured log output."""
for tag, value in kwargs.items():
if value is not None:
request._log_tags[tag] = value
def enable_cache(vary_user=True, vary=None):
"""Enable caching in the context of a view.
If desired, instructions on the cache parameters can be included, such as
if the data is fit for public caches (default: no, vary_user) and what
values to include in the generation of an etag.
"""
if not SETTINGS.CACHE:
return
request._http_cache = True
request._http_revalidate = vary is not None
args = sorted(set(request.args.items()))
cache_parts = [args, vary, request._app_locale]
if vary_user and request.authz.logged_in:
cache_parts.extend((request.authz.roles))
request._http_private = True
request._http_etag = hash_data(cache_parts)
if request._http_etag in request.if_none_match:
raise NotModified()
def _get_remote_ip():
forwarded_for = request.headers.getlist("X-Forwarded-For")
if len(forwarded_for):
return forwarded_for[0]
return request.remote_addr
def _get_credential_authz(credential):
if credential is None or not len(credential):
return
if " " in credential:
method, credential = credential.split(" ", 1)
if method == "Token":
return Authz.from_token(credential)
role = Role.by_api_key(credential)
if role is not None:
return Authz.from_role(role=role)
def get_authz(request):
authz = None
if "Authorization" in request.headers:
credential = request.headers.get("Authorization")
authz = _get_credential_authz(credential)
if authz is None and "api_key" in request.args:
authz = _get_credential_authz(request.args.get("api_key"))
return authz
def enable_authz(request):
authz = get_authz(request)
authz = authz or Authz.from_role(role=None)
request.authz = authz
def METHOD_NAME(request):
if request.authz.logged_in:
return
limit = SETTINGS.API_RATE_LIMIT * SETTINGS.API_RATE_WINDOW
request.rate_limit = get_rate_limit(
_get_remote_ip(), limit=limit, interval=SETTINGS.API_RATE_WINDOW, unit=60
)
if not request.rate_limit.check():
raise TooManyRequests("Rate limit exceeded.")
@blueprint.before_app_request
def setup_request():
"""Set some request attributes at the beginning of the request.
By default, caching will be disabled."""
request._begin_time = time.time()
request._app_locale = str(get_locale())
request._session_id = request.headers.get("X-Aleph-Session")
request._http_cache = False
request._http_private = False
request._http_revalidate = False
request._http_etag = None
request._log_tags = {}
request._trace_id = str(uuid.uuid4())
# First set up auth context so that we know who we are dealing with
# when we log their activity or enforce rate limits
enable_authz(request)
setup_logging_context(request)
METHOD_NAME(request)
@blueprint.after_app_request
def finalize_response(resp):
"""Post-request processing to set cache parameters."""
# Compute overall request duration:
now = time.time()
took = now - getattr(request, "_begin_time", now)
# Finalize reporting of the rate limiter:
if hasattr(request, "rate_limit") and request.rate_limit is not None:
usage = request.rate_limit.update(amount=math.ceil(took))
resp.headers["X-Rate-Limit"] = request.rate_limit.limit
resp.headers["X-Rate-Usage"] = usage
generate_request_log(resp, took)
if resp.is_streamed:
# http://wiki.nginx.org/X-accel#X-Accel-Buffering
resp.headers["X-Accel-Buffering"] = "no"
if not hasattr(request, "_http_cache") or not request._http_cache:
resp.cache_control.no_cache = True
return resp
if request.method != "GET" or resp.status_code != 200:
resp.cache_control.no_cache = True
return resp
resp.cache_control.public = True
resp.vary.add("Accept-Language")
resp.vary.add("Authorization")
if request._http_etag:
resp.set_etag(request._http_etag)
if request._http_revalidate:
resp.cache_control.must_revalidate = request._http_revalidate
else:
resp.cache_control.max_age = 3600 * 12
else:
resp.expires = -1
if request._http_private:
resp.cache_control.public = None
resp.cache_control.private = True
return resp
def setup_logging_context(request):
role_id = None
if hasattr(request, "authz"):
role_id = request.authz.id
# Set up context varibales for structured logging. The context is included
# with every log entry produced by this particular request
clear_contextvars()
bind_contextvars(
v=__version__,
method=request.method,
endpoint=request.endpoint,
referrer=request.referrer,
ip=_get_remote_ip(),
ua=str(request.user_agent),
begin_time=datetime.utcfromtimestamp(request._begin_time).isoformat(),
role_id=role_id,
session_id=getattr(request, "_session_id", None),
locale=getattr(request, "_app_locale", None),
url=request.url,
path=request.full_path,
trace_id=request._trace_id,
)
def generate_request_log(resp, took):
"""Collect data about the request for analytical purposes."""
# Only add the context info that hasn't been already set in the beginning
# of the request
payload = {
"time": datetime.utcnow().isoformat(),
"took": took,
"status": resp.status_code,
}
tags = dict(request.view_args or ())
if hasattr(request, "_log_tags"):
tags.update(request._log_tags)
for tag, value in tags.items():
if value is not None and tag not in payload:
payload[tag] = value
bind_contextvars(**payload)
# This logging statement is here to make sure we log the context of every
# request for analytics purposes
log.info("Request handled", request_logging=True)
|
4,499 |
package
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class MsgpackConan(ConanFile):
name = "msgpack"
description = "The official C++ library for MessagePack"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/msgpack/msgpack-c"
topics = ("conan", "msgpack", "message-pack", "serialization")
license = "BSL-1.0"
exports_sources = "CMakeLists.txt"
generators = "cmake"
settings = "os", "arch", "build_type", "compiler"
options = {
"fPIC": [True, False],
"shared": [True, False],
"c_api": [True, False],
"cpp_api": [True, False],
"with_boost": [True, False],
"header_only": [True, False]
}
default_options = {
"fPIC": True,
"shared": False,
"c_api": True,
"cpp_api": True,
"with_boost": False,
"header_only": False
}
deprecated = "msgpack-c or msgpack-cxx"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
# Deprecate header_only option
if self.options.header_only:
self.options.c_api = False
self.options.cpp_api = True
self.output.warn("header_only option is deprecated, prefer c_api=False and cpp_api=True")
del self.options.header_only
if not self.options.c_api and not self.options.cpp_api:
raise ConanInvalidConfiguration("You must enable at least c_api or cpp_api.")
if self.options.c_api:
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
else:
del self.options.shared
del self.options.fPIC
if not self.options.cpp_api:
del self.options.with_boost
if self.options.get_safe("with_boost"):
self.options["boost"].header_only = False
self.options["boost"].without_chrono = False
self.options["boost"].without_context = False
self.options["boost"].without_system = False
self.options["boost"].without_timer = False
def requirements(self):
if self.options.get_safe("with_boost"):
self.requires("boost/1.74.0")
def package_id(self):
del self.info.options.with_boost
if not self.options.c_api:
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "msgpack-c-cpp-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["MSGPACK_ENABLE_SHARED"] = self.options.shared
self._cmake.definitions["MSGPACK_ENABLE_STATIC"] = not self.options.shared
self._cmake.definitions["MSGPACK_ENABLE_CXX"] = self.options.cpp_api
self._cmake.definitions["MSGPACK_BOOST"] = self.options.get_safe("with_boost", False)
self._cmake.definitions["MSGPACK_32BIT"] = self.settings.arch == "x86"
self._cmake.definitions["MSGPACK_BUILD_EXAMPLES"] = False
self._cmake.definitions["MSGPACK_BUILD_TESTS"] = False
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
if self.options.get_safe("with_boost") and \
(self.options["boost"].header_only or self.options["boost"].without_chrono or \
self.options["boost"].without_context or self.options["boost"].without_system or \
self.options["boost"].without_timer):
raise ConanInvalidConfiguration("msgpack with boost requires the following boost components: chrono, context, system and timer.")
if self.options.c_api:
cmake = self._configure_cmake()
cmake.build()
def METHOD_NAME(self):
self.copy("LICENSE_1_0.txt", dst="licenses", src=self._source_subfolder)
if self.options.c_api:
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
else:
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("*.hpp", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
# TODO: CMake imported targets shouldn't be namespaced (waiting implementation of https://github.com/conan-io/conan/issues/7615)
if self.options.c_api:
self.cpp_info.components["msgpackc"].names["cmake_find_package"] = "msgpackc"
self.cpp_info.components["msgpackc"].names["cmake_find_package_multi"] = "msgpackc"
self.cpp_info.components["msgpackc"].libs = tools.collect_libs(self)
if self.options.cpp_api:
self.cpp_info.components["msgpackc-cxx"].names["cmake_find_package"] = "msgpackc-cxx"
self.cpp_info.components["msgpackc-cxx"].names["cmake_find_package_multi"] = "msgpackc-cxx"
if self.options.with_boost:
self.cpp_info.components["msgpackc-cxx"].defines = ["MSGPACK_USE_BOOST"]
self.cpp_info.components["msgpackc-cxx"].requires = ["boost::boost"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.